From 33c466d88c4bee67f0b4eb15b2ca1b2fc421b10b Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 28 Mar 2024 12:43:58 +0100 Subject: [PATCH 001/700] chore: add `.editorconfig` (#7369) --- .editorconfig | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000000..d53c0e8dded96 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,26 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# editorconfig.org + +root = true + +[*] +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +indent_style = space +indent_size = 4 + +[*.rs] +max_line_length = 100 + +[*.md] +# double whitespace at end of line +# denotes a line break in Markdown +trim_trailing_whitespace = false + +[Makefile] +indent_style = tab + +[] From bd2945a988ad2bb93dccf3967f4349e99e25da90 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 28 Mar 2024 14:27:13 +0100 Subject: [PATCH 002/700] fix: return an error if block does nit exist (#7374) --- crates/rpc/rpc-builder/tests/it/http.rs | 4 ++- crates/rpc/rpc/src/eth/filter.rs | 38 ++++++++++++++++--------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index bb0ccef9b4302..c2740d880db77 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -546,7 +546,9 @@ async fn test_eth_logs_args() { let mut params = ArrayParams::default(); params.insert( serde_json::json!({"blockHash":"0x58dc57ab582b282c143424bd01e8d923cddfdcda9455bad02a29522f6274a948"})).unwrap(); - let _resp = client.request::, _>("eth_getLogs", params).await.unwrap(); + let resp = client.request::, _>("eth_getLogs", params).await; + // block does not exist + assert!(resp.is_err()); } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index f69c00cc0a7dd..ad6b28f1ee50a 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -349,21 +349,31 @@ where async fn logs_for_filter(&self, filter: Filter) -> Result, FilterError> { match filter.block_option { FilterBlockOption::AtBlockHash(block_hash) => { + // all matching logs in the block + let block_number = self + .provider + .block_number_for_id(block_hash.into())? + .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + + // we also need to ensure that the receipts are available and return an error if + // not, in case the block hash been reorged + let receipts = self + .eth_cache + .get_receipts(block_hash) + .await? + .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + let mut all_logs = Vec::new(); - // all matching logs in the block, if it exists - if let Some(block_number) = self.provider.block_number_for_id(block_hash.into())? { - if let Some(receipts) = self.eth_cache.get_receipts(block_hash).await? { - let filter = FilteredParams::new(Some(filter)); - logs_utils::append_matching_block_logs( - &mut all_logs, - &self.provider, - &filter, - (block_hash, block_number).into(), - &receipts, - false, - )?; - } - } + let filter = FilteredParams::new(Some(filter)); + logs_utils::append_matching_block_logs( + &mut all_logs, + &self.provider, + &filter, + (block_hash, block_number).into(), + &receipts, + false, + )?; + Ok(all_logs) } FilterBlockOption::Range { from_block, to_block } => { From 43c72b022cea3068edbce794fa94adefb029fd06 Mon Sep 17 00:00:00 2001 From: Nikolai Golub Date: Thu, 28 Mar 2024 16:39:00 +0100 Subject: [PATCH 003/700] Do no use feature `secp256k/rand-std` in project level Cargo.toml (#7378) --- Cargo.toml | 1 - examples/manual-p2p/Cargo.toml | 2 +- examples/polygon-p2p/Cargo.toml | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4fb9fb7663ab0..5bfc4e0acedd0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -327,7 +327,6 @@ jsonrpsee-types = "0.20" # crypto secp256k1 = { version = "0.27.0", default-features = false, features = [ "global-context", - "rand-std", "recovery", ] } enr = { version = "=0.10.0", default-features = false, features = ["k256"] } diff --git a/examples/manual-p2p/Cargo.toml b/examples/manual-p2p/Cargo.toml index a7beb353e25bd..a9c7f251322b0 100644 --- a/examples/manual-p2p/Cargo.toml +++ b/examples/manual-p2p/Cargo.toml @@ -15,5 +15,5 @@ reth-discv4.workspace = true reth-eth-wire.workspace = true reth-ecies.workspace = true futures.workspace = true -secp256k1.workspace = true +secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } tokio.workspace = true diff --git a/examples/polygon-p2p/Cargo.toml b/examples/polygon-p2p/Cargo.toml index 80872936ca455..b1f5c98708398 100644 --- a/examples/polygon-p2p/Cargo.toml +++ b/examples/polygon-p2p/Cargo.toml @@ -8,7 +8,7 @@ license.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -secp256k1.workspace = true +secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } tokio.workspace = true reth-network.workspace = true reth-primitives.workspace = true From d022b5be38c3907424f916773a12e58d3a99da10 Mon Sep 17 00:00:00 2001 From: Russel Waters <6511720+argakiig@users.noreply.github.com> Date: Thu, 28 Mar 2024 10:50:55 -0700 Subject: [PATCH 004/700] feat: `--db.exclusive` flag for nfs volumes (#7346) --- book/cli/reth/db.md | 5 +++++ book/cli/reth/db/diff.md | 5 +++++ book/cli/reth/import.md | 5 +++++ book/cli/reth/init.md | 5 +++++ book/cli/reth/node.md | 16 +++++++++------- book/cli/reth/p2p.md | 5 +++++ book/cli/reth/recover/storage-tries.md | 5 +++++ book/cli/reth/stage/drop.md | 5 +++++ book/cli/reth/stage/dump.md | 5 +++++ book/cli/reth/stage/run.md | 11 ++++++++--- book/cli/reth/stage/unwind.md | 5 +++++ crates/node-core/src/args/database_args.rs | 5 +++++ 12 files changed, 67 insertions(+), 10 deletions(-) diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 7c72730ae16c7..77137dadb38e0 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -67,6 +67,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index 6923980fe6fde..3c0bd56413615 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -58,6 +58,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + --table The table name to diff. If not specified, all tables are diffed. diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 6461e6a3fd0b4..382efb8ef9ea9 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -58,6 +58,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + The path to a block file for import. diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index f9fb1de02844d..fc20da02be7e3 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -55,6 +55,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 961f71592f314..ccbc9cd3f529e 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -46,9 +46,6 @@ Options: Mutually exclusive with `--instance`. - --trusted-setup-file - Overrides the KZG trusted setup by reading from the supplied file - -h, --help Print help (see a summary with '-h') @@ -98,7 +95,7 @@ Networking: --identity Custom node identity - [default: reth/-/] + [default: reth/-/-gnu] --p2p-secret-key Secret key to use for this node. @@ -130,14 +127,14 @@ Networking: Maximum number of inbound requests. default: 30 --pooled-tx-response-soft-limit - Soft limit for the byte size of a [`PooledTransactions`](reth_eth_wire::PooledTransactions) response on assembling a [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request. Spec'd at 2 MiB. + Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. . [default: 2097152] --pooled-tx-pack-soft-limit - Default soft limit for the byte size of a [`PooledTransactions`](reth_eth_wire::PooledTransactions) response on assembling a [`GetPooledTransactions`](reth_eth_wire::PooledTransactions) request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a [`PooledTransactions`](reth_eth_wire::PooledTransactions) response. Default is 128 KiB + Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB [default: 131072] @@ -238,7 +235,7 @@ RPC: --rpc-max-tracing-requests Maximum number of concurrent tracing requests - [default: 10] + [default: 14] --rpc-max-blocks-per-filter Maximum number of blocks that could be scanned per filter request. (0 = entire chain) @@ -434,6 +431,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + Dev testnet: --dev Start the node in dev mode diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 4c7ecb972ab58..17cd396cf548b 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -101,6 +101,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index f3be1b573c4fc..32f135916a034 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -55,6 +55,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index 88a0197990d36..2efe9ed78fd63 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -55,6 +55,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + Possible values: - headers: The headers stage within the pipeline diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index c70eba2fcc579..2788cc40a06fd 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -62,6 +62,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index ade508237170b..f20eb3f68a8af 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -126,7 +126,7 @@ Networking: --identity Custom node identity - [default: reth/-/] + [default: reth/-/-gnu] --p2p-secret-key Secret key to use for this node. @@ -158,14 +158,14 @@ Networking: Maximum number of inbound requests. default: 30 --pooled-tx-response-soft-limit - Soft limit for the byte size of a [`PooledTransactions`](reth_eth_wire::PooledTransactions) response on assembling a [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request. Spec'd at 2 MiB. + Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. . [default: 2097152] --pooled-tx-pack-soft-limit - Default soft limit for the byte size of a [`PooledTransactions`](reth_eth_wire::PooledTransactions) response on assembling a [`GetPooledTransactions`](reth_eth_wire::PooledTransactions) request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a [`PooledTransactions`](reth_eth_wire::PooledTransactions) response. Default is 128 KiB + Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB [default: 131072] @@ -183,6 +183,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + -c, --commit Commits the changes in the database. WARNING: potentially destructive. diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index 712b78cf2c624..8479bca514eba 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -60,6 +60,11 @@ Database: - trace: Enables logging for trace debug-level messages - extra: Enables logging for extra debug-level messages + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/crates/node-core/src/args/database_args.rs b/crates/node-core/src/args/database_args.rs index 3c30d28de2a02..1c14c99db6eb6 100644 --- a/crates/node-core/src/args/database_args.rs +++ b/crates/node-core/src/args/database_args.rs @@ -12,6 +12,10 @@ pub struct DatabaseArgs { /// Database logging level. Levels higher than "notice" require a debug build. #[arg(long = "db.log-level", value_enum)] pub log_level: Option, + /// Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an + /// NFS volume. + #[arg(long = "db.exclusive")] + pub exclusive: Option, } impl DatabaseArgs { @@ -19,6 +23,7 @@ impl DatabaseArgs { pub fn database_args(&self) -> reth_db::mdbx::DatabaseArguments { reth_db::mdbx::DatabaseArguments::new(default_client_version()) .with_log_level(self.log_level) + .with_exclusive(self.exclusive) } } From 79e15b1956e52ccab80179cc3b0d040216673ef3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 29 Mar 2024 12:48:42 +0100 Subject: [PATCH 005/700] Add announcement tx types panel (#7383) --- etc/grafana/dashboards/reth-mempool.json | 5231 +++++++++++----------- 1 file changed, 2717 insertions(+), 2514 deletions(-) diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index bbd48ee468d72..7128dc6a398bb 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -1,2657 +1,2860 @@ { - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - }, + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + }, + { + "name": "DS_EXPRESSION", + "label": "Expression", + "description": "", + "type": "datasource", + "pluginId": "__expr__" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "datasource", + "id": "__expr__", + "version": "1.0.0" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.3.3" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ { - "name": "DS_EXPRESSION", - "label": "Expression", - "description": "", - "type": "datasource", - "pluginId": "__expr__" + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" } - ], - "__elements": {}, - "__requires": [ - { - "type": "datasource", - "id": "__expr__", - "version": "1.0.0" + ] + }, + "description": "Metrics for transaction P2P gossip and the local view of mempool data", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.3.3" + "id": 96, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 1 }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ { - "builtIn": 1, "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" } - ] + ], + "title": "Version", + "transparent": true, + "type": "stat" }, - "description": "Metrics for transaction P2P gossip and the local view of mempool data", - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 96, - "panels": [], - "repeat": "instance", - "repeatDirection": "h", - "title": "Overview", - "type": "row" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 0, - "y": 1 - }, - "id": 22, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "text": { - "valueSize": 20 - }, - "textMode": "name" - }, - "pluginVersion": "10.1.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{version}}", - "range": false, - "refId": "A" - } - ], - "title": "Version", - "transparent": true, - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 1 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 3, - "y": 1 - }, - "id": 192, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "text": { - "valueSize": 20 - }, - "textMode": "name" - }, - "pluginVersion": "10.1.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{build_timestamp}}", - "range": false, - "refId": "A" - } - ], - "title": "Build Timestamp", - "transparent": true, - "type": "stat" + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 1 - }, - "id": 193, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "text": { - "valueSize": 20 - }, - "textMode": "name" - }, - "pluginVersion": "10.1.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{git_sha}}", - "range": false, - "refId": "A" - } - ], - "title": "Git SHA", - "transparent": true, - "type": "stat" + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 1 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 12, - "y": 1 - }, - "id": 195, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "text": { - "valueSize": 20 - }, - "textMode": "name" - }, - "pluginVersion": "10.1.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{build_profile}}", - "range": false, - "refId": "A" - } - ], - "title": "Build Profile", - "transparent": true, - "type": "stat" + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 1 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 5, - "x": 14, - "y": 1 - }, - "id": 196, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "text": { - "valueSize": 20 - }, - "textMode": "name" - }, - "pluginVersion": "10.1.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{target_triple}}", - "range": false, - "refId": "A" - } - ], - "title": "Target Triple", - "transparent": true, - "type": "stat" + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 5, - "x": 19, - "y": 1 - }, - "id": 197, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "text": { - "valueSize": 20 - }, - "textMode": "name" - }, - "pluginVersion": "10.1.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{cargo_features}}", - "range": false, - "refId": "A" - } - ], - "title": "Cargo Features", - "transparent": true, - "type": "stat" + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 4 - }, - "id": 89, - "panels": [], - "repeat": "instance", - "repeatDirection": "h", - "title": "Transaction Pool", - "type": "row" + "id": 89, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Transaction Pool", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Transaction pool maintenance metrics", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "description": "Transaction pool maintenance metrics", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 5 - }, - "id": 91, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + ] + }, + "unit": "bytes", + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_dirty_accounts{instance=~\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Dirty Accounts", - "range": true, - "refId": "A", - "useBackend": false + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 91, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_drift_count{instance=~\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Drift Count", - "range": true, - "refId": "B", - "useBackend": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_dirty_accounts{instance=~\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Dirty Accounts", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_reinserted_transactions{instance=~\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Reinserted Transactions", - "range": true, - "refId": "C", - "useBackend": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_drift_count{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Drift Count", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_deleted_tracked_finalized_blobs{instance=~\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Deleted Tracked Finalized Blobs", - "range": true, - "refId": "D", - "useBackend": false - } - ], - "title": "TxPool Maintenance", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_reinserted_transactions{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Reinserted Transactions", + "range": true, + "refId": "C", + "useBackend": false }, - "description": "Tracks a heuristic of the memory footprint of the various transaction pool sub-pools", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_deleted_tracked_finalized_blobs{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Deleted Tracked Finalized Blobs", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "TxPool Maintenance", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks a heuristic of the memory footprint of the various transaction pool sub-pools", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 5 - }, - "id": 210, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + ] + }, + "unit": "bytes", + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_transaction_pool_basefee_pool_size_bytes{instance=~\"$instance\"}", - "legendFormat": "Base fee pool size", - "range": true, - "refId": "A" + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 210, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_transaction_pool_pending_pool_size_bytes{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Pending pool size", - "range": true, - "refId": "B" + "editorMode": "builder", + "expr": "reth_transaction_pool_basefee_pool_size_bytes{instance=~\"$instance\"}", + "legendFormat": "Base fee pool size", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_transaction_pool_queued_pool_size_bytes{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Queued pool size", - "range": true, - "refId": "C" + "editorMode": "builder", + "expr": "reth_transaction_pool_pending_pool_size_bytes{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Pending pool size", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_transaction_pool_blob_pool_size_bytes{instance=~\"$instance\"}", - "legendFormat": "Blob pool size", - "range": true, - "refId": "D" - } - ], - "title": "Subpool sizes in bytes", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "editorMode": "builder", + "expr": "reth_transaction_pool_queued_pool_size_bytes{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Queued pool size", + "range": true, + "refId": "C" }, - "description": "Currently active outgoing GetPooledTransactions requests.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_blob_pool_size_bytes{instance=~\"$instance\"}", + "legendFormat": "Blob pool size", + "range": true, + "refId": "D" + } + ], + "title": "Subpool sizes in bytes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Currently active outgoing GetPooledTransactions requests.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } + ] }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 13 - }, - "id": 104, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_network_inflight_transaction_requests{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Inflight Transaction Requests", - "range": true, - "refId": "C" - } - ], - "title": "Inflight Transaction Requests", - "type": "timeseries" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Tracks the number of transactions in the various transaction pool sub-pools", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 104, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_network_inflight_transaction_requests{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Inflight Transaction Requests", + "range": true, + "refId": "C" + } + ], + "title": "Inflight Transaction Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of transactions in the various transaction pool sub-pools", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } + ] }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 13 - }, - "id": 92, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_transaction_pool_basefee_pool_transactions{instance=~\"$instance\"}", - "legendFormat": "Base fee pool transactions", - "range": true, - "refId": "A" + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 13 + }, + "id": 92, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_transaction_pool_pending_pool_transactions{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Pending pool transactions", - "range": true, - "refId": "B" + "editorMode": "builder", + "expr": "reth_transaction_pool_basefee_pool_transactions{instance=~\"$instance\"}", + "legendFormat": "Base fee pool transactions", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_transaction_pool_queued_pool_transactions{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Queued pool transactions", - "range": true, - "refId": "C" + "editorMode": "builder", + "expr": "reth_transaction_pool_pending_pool_transactions{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Pending pool transactions", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_transaction_pool_blob_pool_transactions{instance=~\"$instance\"}", - "legendFormat": "Blob pool transactions", - "range": true, - "refId": "D" - } - ], - "title": "Subpool transaction count", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "editorMode": "builder", + "expr": "reth_transaction_pool_queued_pool_transactions{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Queued pool transactions", + "range": true, + "refId": "C" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_blob_pool_transactions{instance=~\"$instance\"}", + "legendFormat": "Blob pool transactions", + "range": true, + "refId": "D" + } + ], + "title": "Subpool transaction count", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 199, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_hashes_pending_fetch{instance=~\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Hashes in Pending Fetch Cache", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_hashes_inflight_transaction_requests{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Hashes in Inflight Requests", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "__expr__", + "uid": "${DS_EXPRESSION}" + }, + "expression": "$A + $B", + "hide": false, + "refId": "Total Hashes in Transaction Fetcher", + "type": "math" + } + ], + "title": "Transaction Fetcher Hashes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Number of transactions about to be imported into the pool.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 21 - }, - "id": 199, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_hashes_pending_fetch{instance=~\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Hashes in Pending Fetch Cache", - "range": true, - "refId": "A", - "useBackend": false + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 94, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_network_pending_pool_imports{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Transactions pending import", + "range": true, + "refId": "C" + } + ], + "title": "Pending pool imports", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of transaction messages in the channel from the network to the transaction pool", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": true, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] }, + "unit": "reqps", + "unitScale": true + }, + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_hashes_inflight_transaction_requests{instance=~\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Hashes in Inflight Requests", - "range": true, - "refId": "B", - "useBackend": false + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] }, { - "datasource": { - "type": "__expr__", - "uid": "${DS_EXPRESSION}" - }, - "expression": "$A + $B", - "hide": false, - "refId": "Total Hashes in Transaction Fetcher", - "type": "math" + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "events" + } + ] } - ], - "title": "Transaction Fetcher Hashes", - "type": "timeseries" + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 29 + }, + "id": 95, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "rate(reth_network_pool_transactions_messages_sent{instance=~\"$instance\"}[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "Tx", + "range": true, + "refId": "A" }, - "description": "Number of transactions about to be imported into the pool.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "rate(reth_network_pool_transactions_messages_received{instance=~\"$instance\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Rx", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_network_pool_transactions_messages_sent{instance=~\"$instance\"} - reth_network_pool_transactions_messages_received{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Messages in channel", + "range": true, + "refId": "C" + } + ], + "title": "Network transaction channel", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of transactions per second that are inserted and removed from the transaction pool, as well as the number of invalid transactions per second.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": true, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } + ] }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 21 - }, - "id": 94, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "unit": "ops", + "unitScale": true }, - "targets": [ + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_network_pending_pool_imports{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Transactions pending import", - "range": true, - "refId": "C" + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] } - ], - "title": "Pending pool imports", - "type": "timeseries" + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 29 + }, + "id": 93, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_transaction_pool_inserted_transactions{instance=~\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Inserted transactions", + "range": true, + "refId": "A", + "useBackend": false }, - "description": "Tracks the number of transaction messages in the channel from the network to the transaction pool", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": true, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_transaction_pool_removed_transactions{instance=~\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Removed transactions", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_transaction_pool_invalid_transactions{instance=~\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Invalid transactions", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Inserted transactions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Durations of nested function calls, in one call to poll `TransactionsManager` future:\n\nNetwork Events - stream peer session updates from `NetworkManager`;\nTransaction Events - stream txns gossip from `NetworkManager`;\nPending Transactions - stream hashes of txns successfully inserted into pending set in `TransactionPool`;\nPending Pool Imports - flush txns to pool from `TransactionsManager`;\nFetch Events - stream fetch txn events (success case wraps a tx) from `TransactionFetcher`;\nFetch Pending Hashes - search for hashes announced by an idle peer in cache for hashes pending fetch;\n(Transactions Commands - stream commands from testnet to fetch/serve/propagate txns)\n", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "reqps" + ] }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "B" - }, - "properties": [ - { - "id": "custom.transform", - "value": "negative-Y" - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "C" - }, - "properties": [ - { - "id": "unit", - "value": "events" - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 29 - }, - "id": 95, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "unit": "s", + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "rate(reth_network_pool_transactions_messages_sent{instance=~\"$instance\"}[$__rate_interval])", - "hide": false, - "instant": false, - "legendFormat": "Tx", - "range": true, - "refId": "A" + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 37 + }, + "id": 200, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "rate(reth_network_pool_transactions_messages_received{instance=~\"$instance\"}[$__rate_interval])", - "hide": false, - "legendFormat": "Rx", - "range": true, - "refId": "B" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_network_events{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Network Events", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "reth_network_pool_transactions_messages_sent{instance=~\"$instance\"} - reth_network_pool_transactions_messages_received{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Messages in channel", - "range": true, - "refId": "C" - } - ], - "title": "Network transaction channel", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_transaction_events{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Transaction Events", + "range": true, + "refId": "C", + "useBackend": false }, - "description": "Tracks the number of transactions per second that are inserted and removed from the transaction pool, as well as the number of invalid transactions per second.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": true, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "B" - }, - "properties": [ - { - "id": "custom.transform", - "value": "negative-Y" - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 29 - }, - "id": 93, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_network_acc_duration_poll_imported_transactions{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Pending Transactions", + "range": true, + "refId": "D", + "useBackend": false }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_transaction_pool_inserted_transactions{instance=~\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Inserted transactions", - "range": true, - "refId": "A", - "useBackend": false + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_transaction_pool_removed_transactions{instance=~\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "Removed transactions", - "range": true, - "refId": "B", - "useBackend": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_pending_pool_imports{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Pending Pool Imports", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_transaction_pool_invalid_transactions{instance=~\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "Invalid transactions", - "range": true, - "refId": "C", - "useBackend": false - } - ], - "title": "Inserted transactions", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_fetch_events{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Fetch Events", + "range": true, + "refId": "F", + "useBackend": false }, - "description": "Durations of nested function calls, in one call to poll `TransactionsManager` future:\n\nNetwork Events - stream peer session updates from `NetworkManager`;\nTransaction Events - stream txns gossip from `NetworkManager`;\nPending Transactions - stream hashes of txns successfully inserted into pending set in `TransactionPool`;\nPending Pool Imports - flush txns to pool from `TransactionsManager`;\nFetch Events - stream fetch txn events (success case wraps a tx) from `TransactionFetcher`;\nFetch Pending Hashes - search for hashes announced by an idle peer in cache for hashes pending fetch;\n(Transactions Commands - stream commands from testnet to fetch/serve/propagate txns)\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_commands{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Commands", + "range": true, + "refId": "G", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_fetch_pending_hashes{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Fetch Pending Hashes", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Transactions Manager Poll Duration Nested Function Calls", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the entries, byte size, failed inserts and file deletes of the blob store", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 37 - }, - "id": 200, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + ] + }, + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_acc_duration_poll_network_events{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Network Events", - "range": true, - "refId": "B", - "useBackend": false + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 37 + }, + "id": 115, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_acc_duration_poll_transaction_events{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Transaction Events", - "range": true, - "refId": "C", - "useBackend": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_entries{instance=~\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Entries", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_network_acc_duration_poll_imported_transactions{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Pending Transactions", - "range": true, - "refId": "D", - "useBackend": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_byte_size{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Bytesize", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_acc_duration_poll_pending_pool_imports{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Pending Pool Imports", - "range": true, - "refId": "E", - "useBackend": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_failed_inserts{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Inserts", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_acc_duration_poll_fetch_events{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Fetch Events", - "range": true, - "refId": "F", - "useBackend": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_failed_deletes{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Deletes", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "Blob store", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "All Transactions metrics", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_acc_duration_poll_commands{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Commands", - "range": true, - "refId": "G", - "useBackend": false + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 45 + }, + "id": 116, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_all_transactions_by_hash{instance=~\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "All transactions by hash", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_acc_duration_fetch_pending_hashes{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Fetch Pending Hashes", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Transactions Manager Poll Duration Nested Function Calls", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_all_transactions_by_id{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "All transactions by id", + "range": true, + "refId": "B", + "useBackend": false }, - "description": "Tracks the entries, byte size, failed inserts and file deletes of the blob store", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_all_transactions_by_all_senders{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "All transactions by all senders", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "All Transactions metrics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Duration spent inside one call to poll the `NetworkManager` future", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } + ] }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 37 - }, - "id": 115, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "unit": "s", + "unitScale": true }, - "targets": [ + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "Network Manager Future" + ], + "prefix": "All except:", + "readOnly": true + } }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_blobstore_entries{instance=~\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Entries", - "range": true, - "refId": "A", - "useBackend": false + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 45 + }, + "id": 212, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_blobstore_byte_size{instance=~\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Bytesize", - "range": true, - "refId": "B", - "useBackend": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_duration_poll_network_manager{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Network Manager Future", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Network Manager Future Poll Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Duration spent inside one call to poll the `TransactionsManager` future", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_blobstore_failed_inserts{instance=~\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Failed Inserts", - "range": true, - "refId": "C", - "useBackend": false + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] }, + "unit": "s", + "unitScale": true + }, + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "Transactions Manager Future" + ], + "prefix": "All except:", + "readOnly": true + } }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_blobstore_failed_deletes{instance=~\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Failed Deletes", - "range": true, - "refId": "D", - "useBackend": false + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] } - ], - "title": "Blob store", - "type": "timeseries" + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "All Transactions metrics", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 53 + }, + "id": 201, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_duration_poll_tx_manager{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Transactions Manager Future", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Transactions Manager Future Poll Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Frequency of a peer sending a transaction that has already been marked as seen by that peer. This could for example be the case if a transaction is sent/announced to the peer at the same time that the peer sends/announces the same transaction to us.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } + ] }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 37 - }, - "id": 116, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "unit": "cps", + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_all_transactions_by_hash{instance=~\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "All transactions by hash", - "range": true, - "refId": "A", - "useBackend": false + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 53 + }, + "id": 208, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_all_transactions_by_id{instance=~\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "All transactions by id", - "range": true, - "refId": "B", - "useBackend": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_occurrences_hash_already_seen_by_peer{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Freq Announced Transactions Already Seen by Peer", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_transaction_pool_all_transactions_by_all_senders{instance=~\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "All transactions by all senders", - "range": true, - "refId": "C", - "useBackend": false - } - ], - "title": "All Transactions metrics", - "type": "timeseries" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_occurrences_of_transaction_already_seen_by_peer{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Freq Received Transactions Already Seen by Peer", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Frequency of Transactions Already Marked as Seen by Peer", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Duration spent inside one call to poll the `TransactionsManager` future", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "description": "Durations of nested function calls, in one call to poll `NetworkManager` future:\n\nNetwork Handle Message - stream network handle messages from `TransactionsManager`;\nSwarm Events - stream transaction gossip from `Swarm`", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "Transactions Manager Future" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 45 - }, - "id": 201, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + ] + }, + "unit": "s", + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_duration_poll_tx_manager{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Transactions Manager Future", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Transactions Manager Future Poll Duration", - "type": "timeseries" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 61 + }, + "id": 209, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_duration_poll_network_handle{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Network Handle Messages", + "range": true, + "refId": "A", + "useBackend": false }, - "description": "Duration spent inside one call to poll the `NetworkManager` future", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_duration_poll_swarm{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Swarm Events", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Network Manager Poll Duration Nested Function Calls", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Total number of times a transaction is sent/announced that is already in the local pool.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "Network Manager Future" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 45 - }, - "id": 212, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + ] + }, + "unit": "cps", + "unitScale": true }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_duration_poll_network_manager{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Network Manager Future", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Network Manager Future Poll Duration", - "type": "timeseries" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Durations of nested function calls, in one call to poll `NetworkManager` future:\n\nNetwork Handle Message - stream network handle messages from `TransactionsManager`;\nSwarm Events - stream transaction gossip from `Swarm`", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 53 - }, - "id": 209, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 61 + }, + "id": 213, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_occurrences_hashes_already_in_pool{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Freq Announced Transactions Already in Pool", + "range": true, + "refId": "A", + "useBackend": false }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_duration_poll_network_handle{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Network Handle Messages", - "range": true, - "refId": "A", - "useBackend": false + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_network_duration_poll_swarm{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Swarm Events", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Network Manager Poll Duration Nested Function Calls", - "type": "timeseries" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_occurrences_transactions_already_in_pool{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Freq Received Transactions Already in Pool ", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Frequency of Transactions Already in Pool", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Frequency of a peer sending a transaction that has already been marked as seen by that peer. This could for example be the case if a transaction is sent/announced to the peer at the same time that the peer sends/announces the same transaction to us.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "description": "Frequency of transaction types seen in announcements", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "cps" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 53 - }, - "id": 208, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_network_occurrences_hash_already_seen_by_peer{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Freq Announced Transactions Already Seen by Peer", - "range": true, - "refId": "A", - "useBackend": false + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_network_occurrences_of_transaction_already_seen_by_peer{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Freq Received Transactions Already Seen by Peer", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Frequency of Transactions Already Marked as Seen by Peer", - "type": "timeseries" + "unit": "cps", + "unitScale": true + }, + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 69 + }, + "id": 214, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_legacy_sum{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Legacy", + "range": true, + "refId": "A", + "useBackend": false }, - "description": "Total number of times a transaction is sent/announced that is already in the local pool.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "cps" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 61 - }, - "id": 213, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_eip2930_sum{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eip2930", + "range": true, + "refId": "B", + "useBackend": false }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_network_occurrences_hashes_already_in_pool{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Freq Announced Transactions Already in Pool", - "range": true, - "refId": "A", - "useBackend": false + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_network_occurrences_transactions_already_in_pool{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Freq Received Transactions Already in Pool ", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Frequency of Transactions Already in Pool", - "type": "timeseries" - } - ], - "refresh": "30s", - "revision": 1, - "schemaVersion": 38, - "style": "dark", - "tags": [], - "templating": { - "list": [ + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_eip1559_sum{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eip1559", + "range": true, + "refId": "C", + "useBackend": false + }, { - "current": {}, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "query_result(reth_info)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "instance", - "options": [], - "query": { - "query": "query_result(reth_info)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "/.*instance=\\\"([^\\\"]*).*/", - "skipUrlSync": false, - "sort": 0, - "type": "query" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_eip4844_sum{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eip4844", + "range": true, + "refId": "D", + "useBackend": false } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "reth - mempool", - "uid": "bee34f59-c79c-4669-a000-198057b3703d", - "version": 9, - "weekStart": "" - } \ No newline at end of file + ], + "title": "Announced Transactions by TxType", + "type": "timeseries" + } + ], + "refresh": "30s", + "revision": 1, + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "query_result(reth_info)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "query_result(reth_info)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "/.*instance=\\\"([^\\\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "reth - mempool", + "uid": "bee34f59-c79c-4669-a000-198057b3703d", + "version": 1, + "weekStart": "" +} \ No newline at end of file From 8b8830690736f213148c1d9586f2ed206ba553a9 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 29 Mar 2024 13:13:27 +0100 Subject: [PATCH 006/700] fix: handle optimism deposit transactions on `SenderRecovery` stage (#7376) --- crates/primitives/src/transaction/mod.rs | 29 +++++++++++++++++++++ crates/stages/src/stages/sender_recovery.rs | 6 +---- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 92d55b8d82b07..4266075832211 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -856,10 +856,39 @@ impl TransactionSignedNoHash { /// /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. pub fn recover_signer(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(feature = "optimism")] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } + let signature_hash = self.signature_hash(); self.signature.recover_signer(signature_hash) } + /// Recover signer from signature and hash _without ensuring that the signature has a low `s` + /// value_. + /// + /// Re-uses a given buffer to avoid numerous reallocations when recovering batches. **Clears the + /// buffer before use.** + /// + /// Returns `None` if the transaction's signature is invalid, see also + /// [Signature::recover_signer_unchecked]. + pub fn encode_and_recover_unchecked(&self, buffer: &mut Vec) -> Option
{ + buffer.clear(); + self.transaction.encode_without_signature(buffer); + + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(feature = "optimism")] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } + + self.signature.recover_signer_unchecked(keccak256(buffer)) + } + /// Converts into a transaction type with its hash: [`TransactionSigned`]. /// /// Note: This will recalculate the hash of the transaction. diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 4648f9732443c..afb65c560605e 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -9,7 +9,6 @@ use reth_db::{ }; use reth_interfaces::consensus; use reth_primitives::{ - keccak256, stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, Address, PruneSegment, StaticFileSegment, TransactionSignedNoHash, TxNumber, }; @@ -229,16 +228,13 @@ fn recover_sender( (tx_id, tx): (TxNumber, TransactionSignedNoHash), rlp_buf: &mut Vec, ) -> Result<(u64, Address), Box> { - tx.transaction.encode_without_signature(rlp_buf); - // We call [Signature::recover_signer_unchecked] because transactions run in the pipeline are // known to be valid - this means that we do not need to check whether or not the `s` value is // greater than `secp256k1n / 2` if past EIP-2. There are transactions pre-homestead which have // large `s` values, so using [Signature::recover_signer] here would not be // backwards-compatible. let sender = tx - .signature - .recover_signer_unchecked(keccak256(rlp_buf)) + .encode_and_recover_unchecked(rlp_buf) .ok_or(SenderRecoveryStageError::FailedRecovery(FailedSenderRecoveryError { tx: tx_id }))?; Ok((tx_id, sender)) From 00a4555b7d808be2a96b760d3fd558bf2b58c03f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Mar 2024 16:22:50 +0100 Subject: [PATCH 007/700] chore: downgrade dns debug! to trace (#7384) --- crates/net/dns/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index f1056d841d59c..07c39e231d5e4 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -255,7 +255,7 @@ impl DnsDiscoveryService { debug!(target: "disc::dns",%err, domain=%link.domain, ?hash, "Failed to lookup entry") } None => { - debug!(target: "disc::dns",domain=%link.domain, ?hash, "No dns entry") + trace!(target: "disc::dns",domain=%link.domain, ?hash, "No dns entry") } Some(Ok(entry)) => { // cache entry From be16072728abae152fd04a83f0f22e8b08ecbe04 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Mar 2024 17:06:48 +0100 Subject: [PATCH 008/700] chore: simplify examples recipient matching (#7385) --- examples/custom-inspector/src/main.rs | 81 ++++++++++++---------- examples/trace-transaction-cli/src/main.rs | 14 ++-- 2 files changed, 53 insertions(+), 42 deletions(-) diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index e8c9c52463484..b0fe4fbb8c2ed 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -28,7 +28,6 @@ use reth::{ transaction_pool::TransactionPool, }; use reth_node_ethereum::node::EthereumNode; -use std::collections::HashSet; fn main() { Cli::::parse() @@ -37,8 +36,6 @@ fn main() { let NodeHandle { mut node, node_exit_future } = builder.node(EthereumNode::default()).launch().await?; - let recipients = args.recipients.iter().copied().collect::>(); - // create a new subscription to pending transactions let mut pending_transactions = node.pool.new_pending_pool_transactions_listener(); @@ -54,42 +51,45 @@ fn main() { let tx = event.transaction; println!("Transaction received: {tx:?}"); - if recipients.is_empty() { - // convert the pool transaction - let call_request = - transaction_to_call_request(tx.to_recovered_transaction()); + if let Some(recipient) = tx.to() { + if args.is_match(&recipient) { + // convert the pool transaction + let call_request = + transaction_to_call_request(tx.to_recovered_transaction()); - let result = eth_api - .spawn_with_call_at( - call_request, - BlockNumberOrTag::Latest.into(), - EvmOverrides::default(), - move |db, env| { - let mut dummy_inspector = DummyInspector::default(); - { - // configure the evm with the custom inspector - let mut evm = Evm::builder() - .with_db(db) - .with_external_context(&mut dummy_inspector) - .with_env_with_handler_cfg(env) - .append_handler_register(inspector_handle_register) - .build(); - // execute the transaction on a blocking task and await the - // inspector result - let _ = evm.transact()?; - } - Ok(dummy_inspector) - }, - ) - .await; + let result = eth_api + .spawn_with_call_at( + call_request, + BlockNumberOrTag::Latest.into(), + EvmOverrides::default(), + move |db, env| { + let mut dummy_inspector = DummyInspector::default(); + { + // configure the evm with the custom inspector + let mut evm = Evm::builder() + .with_db(db) + .with_external_context(&mut dummy_inspector) + .with_env_with_handler_cfg(env) + .append_handler_register(inspector_handle_register) + .build(); + // execute the transaction on a blocking task and await + // the + // inspector result + let _ = evm.transact()?; + } + Ok(dummy_inspector) + }, + ) + .await; - if let Ok(ret_val) = result { - let hash = tx.hash(); - println!( - "Inspector result for transaction {}: \n {}", - hash, - ret_val.ret_val.join("\n") - ); + if let Ok(ret_val) = result { + let hash = tx.hash(); + println!( + "Inspector result for transaction {}: \n {}", + hash, + ret_val.ret_val.join("\n") + ); + } } } } @@ -108,6 +108,13 @@ struct RethCliTxpoolExt { pub recipients: Vec
, } +impl RethCliTxpoolExt { + /// Check if the recipient is in the list of recipients to trace. + pub fn is_match(&self, recipient: &Address) -> bool { + self.recipients.is_empty() || self.recipients.contains(recipient) + } +} + /// A dummy inspector that logs the opcodes and their corresponding program counter for a /// transaction #[derive(Default, Debug, Clone)] diff --git a/examples/trace-transaction-cli/src/main.rs b/examples/trace-transaction-cli/src/main.rs index 7a4fde6af5ac4..ab72c272006ab 100644 --- a/examples/trace-transaction-cli/src/main.rs +++ b/examples/trace-transaction-cli/src/main.rs @@ -23,7 +23,6 @@ use reth::{ transaction_pool::TransactionPool, }; use reth_node_ethereum::node::EthereumNode; -use std::collections::HashSet; fn main() { Cli::::parse() @@ -32,8 +31,6 @@ fn main() { let NodeHandle { mut node, node_exit_future } = builder.node(EthereumNode::default()).launch().await?; - let recipients = args.recipients.iter().copied().collect::>(); - // create a new subscription to pending transactions let mut pending_transactions = node.pool.new_pending_pool_transactions_listener(); @@ -48,8 +45,8 @@ fn main() { let tx = event.transaction; println!("Transaction received: {tx:?}"); - if let Some(tx_recipient_address) = tx.to() { - if recipients.is_empty() || recipients.contains(&tx_recipient_address) { + if let Some(recipient) = tx.to() { + if args.is_match(&recipient) { // trace the transaction with `trace_call` let callrequest = transaction_to_call_request(tx.to_recovered_transaction()); @@ -76,3 +73,10 @@ struct RethCliTxpoolExt { #[arg(long, value_delimiter = ',')] pub recipients: Vec
, } + +impl RethCliTxpoolExt { + /// Check if the recipient is in the list of recipients to trace. + pub fn is_match(&self, recipient: &Address) -> bool { + self.recipients.is_empty() || self.recipients.contains(recipient) + } +} From b1026e0e23dd458e2ca507cbca824b3af25310e0 Mon Sep 17 00:00:00 2001 From: jn Date: Fri, 29 Mar 2024 11:16:32 -0700 Subject: [PATCH 009/700] Fix body stage insufficient backpressure (#7350) Co-authored-by: Matthias Seitz --- crates/net/downloaders/src/bodies/bodies.rs | 40 +++++++++++++-------- crates/stages/Cargo.toml | 4 ++- crates/stages/benches/criterion.rs | 10 ++++++ 3 files changed, 38 insertions(+), 16 deletions(-) diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 11095f8eca064..d45c9b191ca3e 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -97,7 +97,7 @@ where max_non_empty: u64, ) -> DownloadResult>> { if range.is_empty() || max_non_empty == 0 { - return Ok(None) + return Ok(None); } // Collect headers while @@ -146,7 +146,7 @@ where // if we're only connected to a few peers, we keep it low if num_peers < *self.concurrent_requests_range.start() { - return max_requests + return max_requests; } max_requests.min(*self.concurrent_requests_range.end()) @@ -240,7 +240,7 @@ where .skip_while(|b| b.block_number() < expected) .take_while(|b| self.download_range.contains(&b.block_number())) .collect() - }) + }); } // Drop buffered response since we passed that range @@ -259,10 +259,23 @@ where self.queued_bodies.shrink_to_fit(); self.metrics.total_flushed.increment(next_batch.len() as u64); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); - return Some(next_batch) + return Some(next_batch); } None } + + /// Check if a new request can be submitted, it implements back pressure to prevent overwhelming + /// the system and causing memory overload. + /// + /// Returns true if a new request can be submitted + fn can_submit_new_request(&self) -> bool { + // requests are issued in order but not necessarily finished in order, so the queued bodies + // can grow large if a certain request is slow, so we limit the followup requests if the + // queued bodies grew too large + self.queued_bodies.len() < 4 * self.stream_batch_size && + self.has_buffer_capacity() && + self.in_progress_queue.len() < self.concurrent_request_limit() + } } impl BodiesDownloader @@ -343,13 +356,13 @@ where fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); if this.is_terminated() { - return Poll::Ready(None) + return Poll::Ready(None); } // Submit new requests and poll any in progress loop { // Yield next batch if ready if let Some(next_batch) = this.try_split_next_batch() { - return Poll::Ready(Some(Ok(next_batch))) + return Poll::Ready(Some(Ok(next_batch))); } // Poll requests @@ -362,7 +375,7 @@ where Err(error) => { tracing::debug!(target: "downloaders::bodies", %error, "Request failed"); this.clear(); - return Poll::Ready(Some(Err(error))) + return Poll::Ready(Some(Err(error))); } }; } @@ -370,10 +383,7 @@ where // Loop exit condition let mut new_request_submitted = false; // Submit new requests - let concurrent_requests_limit = this.concurrent_request_limit(); - 'inner: while this.in_progress_queue.len() < concurrent_requests_limit && - this.has_buffer_capacity() - { + 'inner: while this.can_submit_new_request() { match this.next_headers_request() { Ok(Some(request)) => { this.metrics.in_flight_requests.increment(1.); @@ -388,7 +398,7 @@ where Err(error) => { tracing::error!(target: "downloaders::bodies", %error, "Failed to download from next request"); this.clear(); - return Poll::Ready(Some(Err(error))) + return Poll::Ready(Some(Err(error))); } }; } @@ -401,21 +411,21 @@ where this.buffered_responses.shrink_to_fit(); if !new_request_submitted { - break + break; } } // All requests are handled, stream is finished if this.in_progress_queue.is_empty() { if this.queued_bodies.is_empty() { - return Poll::Ready(None) + return Poll::Ready(None); } let batch_size = this.stream_batch_size.min(this.queued_bodies.len()); let next_batch = this.queued_bodies.drain(..batch_size).collect::>(); this.queued_bodies.shrink_to_fit(); this.metrics.total_flushed.increment(next_batch.len() as u64); this.metrics.queued_blocks.set(this.queued_bodies.len() as f64); - return Poll::Ready(Some(Ok(next_batch))) + return Poll::Ready(Some(Ok(next_batch))); } Poll::Pending diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 9d8e72fda8a9a..84ef28e0ff097 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -69,12 +69,14 @@ rand.workspace = true paste.workspace = true # Stage benchmarks -pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } criterion = { workspace = true, features = ["async_futures"] } # io serde_json.workspace = true +[target.'cfg(not(target_os = "windows"))'.dev-dependencies] +pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } + [features] test-utils = ["reth-interfaces/test-utils", "reth-db/test-utils", "reth-provider/test-utils"] diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index 59ad5916789c8..03cb52383aaed 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -3,6 +3,7 @@ use criterion::{ async_executor::FuturesExecutor, criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, }; +#[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; use reth_config::config::EtlConfig; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; @@ -18,11 +19,20 @@ use std::{ops::RangeInclusive, sync::Arc}; mod setup; use setup::StageRange; +#[cfg(not(target_os = "windows"))] criterion_group! { name = benches; config = Criterion::default().with_profiler(PProfProfiler::new(1000, Output::Flamegraph(None))); targets = transaction_lookup, account_hashing, senders, merkle } + +#[cfg(target_os = "windows")] +criterion_group! { + name = benches; + config = Criterion::default(); + targets = transaction_lookup, account_hashing, senders, merkle +} + criterion_main!(benches); const DEFAULT_NUM_BLOCKS: u64 = 10_000; From c13d7da68d83404ec385ed106fe309db5472e086 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 29 Mar 2024 17:43:52 -0400 Subject: [PATCH 010/700] chore: move pk2id and id2pk to primitives (#7382) --- crates/net/discv4/src/lib.rs | 4 +-- crates/net/discv4/src/proto.rs | 6 ++-- crates/net/discv4/src/test_utils.rs | 6 ++-- crates/net/dns/src/lib.rs | 4 +-- crates/net/ecies/src/algorithm.rs | 4 +-- crates/net/ecies/src/stream.rs | 2 +- crates/net/ecies/src/util.rs | 35 +------------------ crates/net/eth-wire/src/ethstream.rs | 7 ++-- crates/net/eth-wire/src/hello.rs | 6 ++-- crates/net/eth-wire/src/muxdemux.rs | 39 +++++++++------------ crates/net/eth-wire/src/test_utils.rs | 3 +- crates/net/network/src/config.rs | 3 +- crates/net/network/src/session/active.rs | 5 ++- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/peer.rs | 44 +++++++++++++++++++++--- crates/rpc/rpc-types/src/net.rs | 2 +- examples/manual-p2p/src/main.rs | 4 +-- 17 files changed, 84 insertions(+), 92 deletions(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 8c5f0394ba02a..d144cdef86eca 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -201,14 +201,14 @@ impl Discv4 { /// # use std::io; /// use rand::thread_rng; /// use reth_discv4::{Discv4, Discv4Config}; - /// use reth_primitives::{NodeRecord, PeerId}; + /// use reth_primitives::{pk2id, NodeRecord, PeerId}; /// use secp256k1::SECP256K1; /// use std::{net::SocketAddr, str::FromStr}; /// # async fn t() -> io::Result<()> { /// // generate a (random) keypair /// let mut rng = thread_rng(); /// let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng); - /// let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); + /// let id = pk2id(&pk); /// /// let socket = SocketAddr::from_str("0.0.0.0:0").unwrap(); /// let local_enr = diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 864ad0068c3a4..8bbb84b62964d 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -7,7 +7,7 @@ use alloy_rlp::{ use enr::{Enr, EnrKey}; use reth_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, - keccak256, ForkId, NodeRecord, B256, + keccak256, pk2id, ForkId, NodeRecord, B256, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, @@ -161,7 +161,7 @@ impl Message { let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_slice())?; let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?; - let node_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); + let node_id = pk2id(&pk); let msg_type = packet[97]; let payload = &mut &packet[98..]; @@ -724,7 +724,7 @@ mod tests { for _ in 0..100 { let msg = rng_message(&mut rng); let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng); - let sender_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); + let sender_id = pk2id(&pk); let (buf, _) = msg.encode(&secret_key); diff --git a/crates/net/discv4/src/test_utils.rs b/crates/net/discv4/src/test_utils.rs index c9ec95f805ee4..ccd4f9a039608 100644 --- a/crates/net/discv4/src/test_utils.rs +++ b/crates/net/discv4/src/test_utils.rs @@ -6,7 +6,7 @@ use crate::{ IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; use rand::{thread_rng, Rng, RngCore}; -use reth_primitives::{hex, ForkHash, ForkId, NodeRecord, B256}; +use reth_primitives::{hex, pk2id, ForkHash, ForkId, NodeRecord, B256}; use secp256k1::{SecretKey, SECP256K1}; use std::{ collections::{HashMap, HashSet}, @@ -49,7 +49,7 @@ impl MockDiscovery { let mut rng = thread_rng(); let socket = SocketAddr::from_str("0.0.0.0:0").unwrap(); let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng); - let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); + let id = pk2id(&pk); let socket = Arc::new(UdpSocket::bind(socket).await?); let local_addr = socket.local_addr()?; let local_enr = NodeRecord { @@ -241,7 +241,7 @@ pub async fn create_discv4_with_config(config: Discv4Config) -> (Discv4, Discv4S let mut rng = thread_rng(); let socket = SocketAddr::from_str("0.0.0.0:0").unwrap(); let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng); - let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); + let id = pk2id(&pk); let local_enr = NodeRecord { address: socket.ip(), tcp_port: socket.port(), udp_port: socket.port(), id }; Discv4::bind(socket, local_enr, secret_key, config).await.unwrap() diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index 07c39e231d5e4..d5ca9da32c656 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -22,7 +22,7 @@ use crate::{ pub use config::DnsDiscoveryConfig; use enr::Enr; use error::ParseDnsEntryError; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_primitives::{pk2id, ForkId, NodeRecord}; use schnellru::{ByLength, LruMap}; use secp256k1::SecretKey; use std::{ @@ -398,7 +398,7 @@ fn convert_enr_node_record(enr: &Enr) -> Option address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, udp_port: enr.udp4().or_else(|| enr.udp6())?, - id: PeerId::from_slice(&enr.public_key().serialize_uncompressed()[1..]), + id: pk2id(&enr.public_key()), } .into_ipv4_mapped(); diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 8d685c93b16f7..5dce7fee69024 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -3,7 +3,7 @@ use crate::{ error::ECIESErrorImpl, mac::{HeaderBytes, MAC}, - util::{hmac_sha256, id2pk, pk2id, sha256}, + util::{hmac_sha256, sha256}, ECIESError, }; use aes::{cipher::StreamCipher, Aes128, Aes256}; @@ -15,7 +15,7 @@ use educe::Educe; use rand::{thread_rng, Rng}; use reth_primitives::{ bytes::{BufMut, Bytes, BytesMut}, - B128, B256, B512 as PeerId, + id2pk, pk2id, B128, B256, B512 as PeerId, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index 53f784b14e203..47518aa2575ce 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -175,7 +175,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::util::pk2id; + use reth_primitives::pk2id; use secp256k1::SECP256K1; use tokio::net::{TcpListener, TcpStream}; diff --git a/crates/net/ecies/src/util.rs b/crates/net/ecies/src/util.rs index 84968918838eb..1984a37665433 100644 --- a/crates/net/ecies/src/util.rs +++ b/crates/net/ecies/src/util.rs @@ -1,8 +1,7 @@ //! Utility functions for hashing and encoding. use hmac::{Hmac, Mac}; -use reth_primitives::{B256, B512 as PeerId}; -use secp256k1::PublicKey; +use reth_primitives::B256; use sha2::{Digest, Sha256}; /// Hashes the input data with SHA256. @@ -21,35 +20,3 @@ pub(crate) fn hmac_sha256(key: &[u8], input: &[&[u8]], auth_data: &[u8]) -> B256 hmac.update(auth_data); B256::from_slice(&hmac.finalize().into_bytes()) } - -/// Converts a [secp256k1::PublicKey] to a [PeerId] by stripping the -/// SECP256K1_TAG_PUBKEY_UNCOMPRESSED tag and storing the rest of the slice in the [PeerId]. -pub fn pk2id(pk: &PublicKey) -> PeerId { - PeerId::from_slice(&pk.serialize_uncompressed()[1..]) -} - -/// Converts a [PeerId] to a [secp256k1::PublicKey] by prepending the [PeerId] bytes with the -/// SECP256K1_TAG_PUBKEY_UNCOMPRESSED tag. -pub(crate) fn id2pk(id: PeerId) -> Result { - // NOTE: B512 is used as a PeerId not because it represents a hash, but because 512 bits is - // enough to represent an uncompressed public key. - let mut s = [0u8; 65]; - // SECP256K1_TAG_PUBKEY_UNCOMPRESSED = 0x04 - // see: https://github.com/bitcoin-core/secp256k1/blob/master/include/secp256k1.h#L211 - s[0] = 4; - s[1..].copy_from_slice(id.as_slice()); - PublicKey::from_slice(&s) -} - -#[cfg(test)] -mod tests { - use super::*; - use secp256k1::{SecretKey, SECP256K1}; - - #[test] - fn pk2id2pk() { - let prikey = SecretKey::new(&mut secp256k1::rand::thread_rng()); - let pubkey = PublicKey::from_secret_key(SECP256K1, &prikey); - assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap()); - } -} diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 95b2d1ef65162..a157ce52ebf6f 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -345,8 +345,6 @@ where #[cfg(test)] mod tests { - use std::time::Duration; - use super::UnauthedEthStream; use crate::{ errors::{EthHandshakeError, EthStreamError}, @@ -357,9 +355,10 @@ mod tests { use alloy_chains::NamedChain; use futures::{SinkExt, StreamExt}; use reth_discv4::DEFAULT_DISCOVERY_PORT; - use reth_ecies::{stream::ECIESStream, util::pk2id}; - use reth_primitives::{ForkFilter, Head, B256, U256}; + use reth_ecies::stream::ECIESStream; + use reth_primitives::{pk2id, ForkFilter, Head, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; + use std::time::Duration; use tokio::net::{TcpListener, TcpStream}; use tokio_util::codec::Decoder; diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 8621e8bf6bcbd..6ca8d9d99d806 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -37,8 +37,8 @@ impl HelloMessageWithProtocols { /// Starts a new `HelloMessageProtocolsBuilder` /// /// ``` - /// use reth_ecies::util::pk2id; /// use reth_eth_wire::HelloMessageWithProtocols; + /// use reth_primitives::pk2id; /// use secp256k1::{SecretKey, SECP256K1}; /// let secret_key = SecretKey::new(&mut rand::thread_rng()); /// let id = pk2id(&secret_key.public_key(SECP256K1)); @@ -119,8 +119,8 @@ impl HelloMessage { /// Starts a new `HelloMessageBuilder` /// /// ``` - /// use reth_ecies::util::pk2id; /// use reth_eth_wire::HelloMessage; + /// use reth_primitives::pk2id; /// use secp256k1::{SecretKey, SECP256K1}; /// let secret_key = SecretKey::new(&mut rand::thread_rng()); /// let id = pk2id(&secret_key.public_key(SECP256K1)); @@ -209,7 +209,7 @@ impl HelloMessageBuilder { mod tests { use alloy_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; use reth_discv4::DEFAULT_DISCOVERY_PORT; - use reth_ecies::util::pk2id; + use reth_primitives::pk2id; use secp256k1::{SecretKey, SECP256K1}; use crate::{ diff --git a/crates/net/eth-wire/src/muxdemux.rs b/crates/net/eth-wire/src/muxdemux.rs index 4cf8c82cdf7c2..3aa7bc1dd6f9f 100644 --- a/crates/net/eth-wire/src/muxdemux.rs +++ b/crates/net/eth-wire/src/muxdemux.rs @@ -21,24 +21,22 @@ //! [`StreamClone`] buffers egress bytes for [`MuxDemuxer`] that are read and sent to the network //! when [`MuxDemuxStream`] is polled. +use crate::{ + capability::{Capability, SharedCapabilities, SharedCapability}, + errors::MuxDemuxError, + CanDisconnect, DisconnectP2P, DisconnectReason, +}; +use derive_more::{Deref, DerefMut}; +use futures::{Sink, SinkExt, StreamExt}; +use reth_primitives::bytes::{Bytes, BytesMut}; use std::{ collections::HashMap, pin::Pin, task::{ready, Context, Poll}, }; - -use derive_more::{Deref, DerefMut}; -use futures::{Sink, SinkExt, StreamExt}; -use reth_primitives::bytes::{Bytes, BytesMut}; use tokio::sync::mpsc; use tokio_stream::Stream; -use crate::{ - capability::{Capability, SharedCapabilities, SharedCapability}, - errors::MuxDemuxError, - CanDisconnect, DisconnectP2P, DisconnectReason, -}; - use MuxDemuxError::*; /// Stream MUX/DEMUX acts like a regular stream and sink for the owning stream, and handles bytes @@ -351,29 +349,26 @@ impl CanDisconnect for StreamClone { #[cfg(test)] mod tests { - use std::{net::SocketAddr, pin::Pin}; - + use crate::{ + capability::{Capability, SharedCapabilities}, + muxdemux::MuxDemuxStream, + protocol::Protocol, + EthVersion, HelloMessageWithProtocols, Status, StatusBuilder, StreamClone, + UnauthedEthStream, UnauthedP2PStream, + }; use futures::{Future, SinkExt, StreamExt}; - use reth_ecies::util::pk2id; use reth_primitives::{ bytes::{BufMut, Bytes, BytesMut}, - ForkFilter, Hardfork, MAINNET, + pk2id, ForkFilter, Hardfork, MAINNET, }; use secp256k1::{SecretKey, SECP256K1}; + use std::{net::SocketAddr, pin::Pin}; use tokio::{ net::{TcpListener, TcpStream}, task::JoinHandle, }; use tokio_util::codec::{Decoder, Framed, LengthDelimitedCodec}; - use crate::{ - capability::{Capability, SharedCapabilities}, - muxdemux::MuxDemuxStream, - protocol::Protocol, - EthVersion, HelloMessageWithProtocols, Status, StatusBuilder, StreamClone, - UnauthedEthStream, UnauthedP2PStream, - }; - const ETH_68_CAP: Capability = Capability::eth(EthVersion::Eth68); const ETH_68_PROTOCOL: Protocol = Protocol::new(ETH_68_CAP, 13); const CUSTOM_CAP: Capability = Capability::new_static("snap", 1); diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index a085cfcc08f3f..27ed78784e480 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -5,8 +5,7 @@ use crate::{ }; use alloy_chains::Chain; use reth_discv4::DEFAULT_DISCOVERY_PORT; -use reth_ecies::util::pk2id; -use reth_primitives::{ForkFilter, Head, B256, U256}; +use reth_primitives::{pk2id, ForkFilter, Head, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; use std::net::SocketAddr; use tokio::net::TcpStream; diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 760b5182f1f39..39f12b2981f24 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -10,10 +10,9 @@ use crate::{ }; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; use reth_dns_discovery::DnsDiscoveryConfig; -use reth_ecies::util::pk2id; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; use reth_primitives::{ - mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, PeerId, MAINNET, + mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, PeerId, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 1c8c0b735cdbb..33c0a66e3f29b 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -20,7 +20,6 @@ use reth_eth_wire::{ }; use reth_interfaces::p2p::error::RequestError; use reth_metrics::common::mpsc::MeteredPollSender; - use reth_primitives::PeerId; use std::{ collections::VecDeque, @@ -764,13 +763,13 @@ mod tests { config::PROTOCOL_BREACH_REQUEST_TIMEOUT, handle::PendingSessionEvent, start_pending_incoming_session, }; - use reth_ecies::{stream::ECIESStream, util::pk2id}; + use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, Status, StatusBuilder, UnauthedEthStream, UnauthedP2PStream, }; use reth_net_common::bandwidth_meter::{BandwidthMeter, MeteredStream}; - use reth_primitives::{ForkFilter, Hardfork, MAINNET}; + use reth_primitives::{pk2id, ForkFilter, Hardfork, MAINNET}; use secp256k1::{SecretKey, SECP256K1}; use tokio::{ net::{TcpListener, TcpStream}, diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index c19bd444ee16e..7cb3d054c83ed 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -74,7 +74,7 @@ pub use net::{ goerli_nodes, holesky_nodes, mainnet_nodes, parse_nodes, sepolia_nodes, NodeRecord, GOERLI_BOOTNODES, HOLESKY_BOOTNODES, MAINNET_BOOTNODES, SEPOLIA_BOOTNODES, }; -pub use peer::{AnyNode, PeerId, WithPeerId}; +pub use peer::{id2pk, pk2id, AnyNode, PeerId, WithPeerId}; pub use prune::{ PruneCheckpoint, PruneMode, PruneModes, PruneProgress, PrunePurpose, PruneSegment, PruneSegmentError, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, diff --git a/crates/primitives/src/peer.rs b/crates/primitives/src/peer.rs index 73b445d6b659d..0f6a814c3abb2 100644 --- a/crates/primitives/src/peer.rs +++ b/crates/primitives/src/peer.rs @@ -1,10 +1,38 @@ use enr::Enr; use reth_rpc_types::NodeRecord; -use secp256k1::SecretKey; +use secp256k1::{constants::UNCOMPRESSED_PUBLIC_KEY_SIZE, PublicKey, SecretKey}; use std::{net::IpAddr, str::FromStr}; + // Re-export PeerId for ease of use. pub use reth_rpc_types::PeerId; +/// This tag should be set to indicate to libsecp256k1 that the following bytes denote an +/// uncompressed pubkey. +/// +/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` = `0x04` +/// +/// See: +const SECP256K1_TAG_PUBKEY_UNCOMPRESSED: u8 = 4; + +/// Converts a [secp256k1::PublicKey] to a [PeerId] by stripping the +/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag and storing the rest of the slice in the [PeerId]. +#[inline] +pub fn pk2id(pk: &PublicKey) -> PeerId { + PeerId::from_slice(&pk.serialize_uncompressed()[1..]) +} + +/// Converts a [PeerId] to a [secp256k1::PublicKey] by prepending the [PeerId] bytes with the +/// SECP256K1_TAG_PUBKEY_UNCOMPRESSED tag. +#[inline] +pub fn id2pk(id: PeerId) -> Result { + // NOTE: B512 is used as a PeerId because 512 bits is enough to represent an uncompressed + // public key. + let mut s = [0u8; UNCOMPRESSED_PUBLIC_KEY_SIZE]; + s[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED; + s[1..].copy_from_slice(id.as_slice()); + PublicKey::from_slice(&s) +} + /// A peer that can come in ENR or [NodeRecord] form. #[derive( Debug, Clone, Eq, PartialEq, Hash, serde_with::SerializeDisplay, serde_with::DeserializeFromStr, @@ -23,9 +51,7 @@ impl AnyNode { pub fn peer_id(&self) -> PeerId { match self { AnyNode::NodeRecord(record) => record.id, - AnyNode::Enr(enr) => { - PeerId::from_slice(&enr.public_key().serialize_uncompressed()[1..]) - } + AnyNode::Enr(enr) => pk2id(&enr.public_key()), AnyNode::PeerId(peer_id) => *peer_id, } } @@ -39,7 +65,7 @@ impl AnyNode { address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, udp_port: enr.udp4().or_else(|| enr.udp6())?, - id: PeerId::from_slice(&enr.public_key().serialize_uncompressed()[1..]), + id: pk2id(&enr.public_key()), } .into_ipv4_mapped(); Some(node_record) @@ -151,6 +177,7 @@ impl WithPeerId> { #[cfg(test)] mod tests { use super::*; + use secp256k1::SECP256K1; #[test] fn test_node_record_parse() { @@ -190,4 +217,11 @@ mod tests { ); assert_eq!(node.to_string(), url); } + + #[test] + fn pk2id2pk() { + let prikey = SecretKey::new(&mut secp256k1::rand::thread_rng()); + let pubkey = PublicKey::from_secret_key(SECP256K1, &prikey); + assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap()); + } } diff --git a/crates/rpc/rpc-types/src/net.rs b/crates/rpc/rpc-types/src/net.rs index 885920162075c..c5d2f72e392c4 100644 --- a/crates/rpc/rpc-types/src/net.rs +++ b/crates/rpc/rpc-types/src/net.rs @@ -304,7 +304,7 @@ mod tests { let cases = vec![ // IPv4 ( - "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"", + "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"", NodeRecord{ address: IpAddr::V4([10, 3, 58, 6].into()), tcp_port: 30303u16, diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index 3ff90bf75dcab..737daf728653a 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -11,13 +11,13 @@ use std::time::Duration; use futures::StreamExt; use once_cell::sync::Lazy; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; -use reth_ecies::{stream::ECIESStream, util::pk2id}; +use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ EthMessage, EthStream, HelloMessage, P2PStream, Status, UnauthedEthStream, UnauthedP2PStream, }; use reth_network::config::rng_secret_key; use reth_primitives::{ - mainnet_nodes, Chain, Hardfork, Head, NodeRecord, MAINNET, MAINNET_GENESIS_HASH, + mainnet_nodes, pk2id, Chain, Hardfork, Head, NodeRecord, MAINNET, MAINNET_GENESIS_HASH, }; use secp256k1::{SecretKey, SECP256K1}; use tokio::net::TcpStream; From 87c4ae744ae8430ef64e8055d9531a7cdcc2f164 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 30 Mar 2024 03:47:38 +0100 Subject: [PATCH 011/700] Clean up log messages (#7390) --- .../net/network/src/transactions/fetcher.rs | 76 +++++++++++-------- crates/net/network/src/transactions/mod.rs | 24 +++--- 2 files changed, 58 insertions(+), 42 deletions(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 45260ba7b0750..4e708b02167e2 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -393,8 +393,8 @@ impl TransactionFetcher { } else { if *retries >= DEFAULT_MAX_RETRIES { trace!(target: "net::tx", - hash=%hash, - retries=retries, + %hash, + retries, "retry limit for `GetPooledTransactions` requests reached for hash, dropping hash" ); @@ -481,7 +481,7 @@ impl TransactionFetcher { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), hashes=?*hashes_to_request, - conn_eth_version=%conn_eth_version, + %conn_eth_version, "requesting hashes that were stored pending fetch from peer" ); @@ -491,8 +491,8 @@ impl TransactionFetcher { { debug!(target: "net::tx", peer_id=format!("{peer_id:#}"), - failed_to_request_hashes=?failed_to_request_hashes, - conn_eth_version=%conn_eth_version, + ?failed_to_request_hashes, + %conn_eth_version, "failed sending request to peer's session, buffering hashes" ); @@ -530,10 +530,10 @@ impl TransactionFetcher { if size != prev_size { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - hash=%hash, - size=size, - previously_seen_size=previously_seen_size, - client_version=%client_version, + %hash, + size, + previously_seen_size, + %client_version, "peer announced a different size for tx, this is especially worrying if one size is much bigger..." ); } @@ -585,9 +585,9 @@ impl TransactionFetcher { debug!(target: "net::tx", peer_id=format!("{peer_id:#}"), - hash=%hash, - msg_version=?msg_version, - client_version=%client_version, + %hash, + ?msg_version, + %client_version, "failed to cache new announced hash from peer in schnellru::LruMap, dropping hash" ); @@ -608,10 +608,10 @@ impl TransactionFetcher { #[cfg(debug_assertions)] trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - msg_version=?msg_version, - client_version=%client_version, - previously_unseen_hashes_len=?previously_unseen_hashes.len(), - previously_unseen_hashes=?previously_unseen_hashes, + ?msg_version, + %client_version, + previously_unseen_hashes_len=previously_unseen_hashes.len(), + ?previously_unseen_hashes, "received previously unseen hashes in announcement from peer" ); } @@ -634,8 +634,8 @@ impl TransactionFetcher { if self.active_peers.len() >= self.info.max_inflight_requests { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - new_announced_hashes=?*new_announced_hashes, - conn_eth_version=%conn_eth_version, + hashes=?*new_announced_hashes, + %conn_eth_version, max_inflight_transaction_requests=self.info.max_inflight_requests, "limit for concurrent `GetPooledTransactions` requests reached, dropping request for hashes to peer" ); @@ -645,7 +645,7 @@ impl TransactionFetcher { let Some(inflight_count) = self.active_peers.get_or_insert(peer_id, || 0) else { debug!(target: "net::tx", peer_id=format!("{peer_id:#}"), - new_announced_hashes=?*new_announced_hashes, + hashes=?*new_announced_hashes, conn_eth_version=%conn_eth_version, "failed to cache active peer in schnellru::LruMap, dropping request to peer" ); @@ -655,9 +655,9 @@ impl TransactionFetcher { if *inflight_count >= DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - new_announced_hashes=?*new_announced_hashes, - conn_eth_version=%conn_eth_version, - MAX_CONCURRENT_TX_REQUESTS_PER_PEER=DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, + hashes=?*new_announced_hashes, + %conn_eth_version, + max_concurrent_tx_reqs_per_peer=DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, "limit for concurrent `GetPooledTransactions` requests per peer reached" ); return Some(new_announced_hashes) @@ -837,7 +837,7 @@ impl TransactionFetcher { inflight_requests=self.inflight_requests.len(), max_inflight_transaction_requests=info.max_inflight_requests, hashes_pending_fetch=self.hashes_pending_fetch.len(), - limit=limit, + limit, "search breadth limited in search for idle fallback peer for some hash pending fetch" ); @@ -948,7 +948,7 @@ impl TransactionFetcher { // todo: report peer for sending hashes that weren't requested trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - unverified_len=unverified_len, + unverified_len, verified_payload_len=verified_payload.len(), "received `PooledTransactions` response from peer with entries that didn't verify against request, filtered out transactions" ); @@ -974,7 +974,7 @@ impl TransactionFetcher { if validation_outcome == FilterOutcome::ReportPeer { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - unvalidated_payload_len=unvalidated_payload_len, + unvalidated_payload_len, valid_payload_len=valid_payload.len(), "received invalid `PooledTransactions` response from peer, filtered out duplicate entries" ); @@ -1237,6 +1237,8 @@ impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { #[cfg(debug_assertions)] let mut tx_hashes_not_requested: SmallVec<[TxHash; 16]> = smallvec!(); + #[cfg(not(debug_assertions))] + let mut tx_hashes_not_requested_count = 0; txns.0.retain(|tx| { if !requested_hashes.contains(tx.hash()) { @@ -1244,6 +1246,10 @@ impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { #[cfg(debug_assertions)] tx_hashes_not_requested.push(*tx.hash()); + #[cfg(not(debug_assertions))] + { + tx_hashes_not_requested_count += 1; + } return false } @@ -1251,11 +1257,21 @@ impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { }); #[cfg(debug_assertions)] - trace!(target: "net::tx", - peer_id=format!("{_peer_id:#}"), - tx_hashes_not_requested=?tx_hashes_not_requested, - "transactions in `PooledTransactions` response from peer were not requested" - ); + if !tx_hashes_not_requested.is_empty() { + trace!(target: "net::tx", + peer_id=format!("{_peer_id:#}"), + ?tx_hashes_not_requested, + "transactions in `PooledTransactions` response from peer were not requested" + ); + } + #[cfg(not(debug_assertions))] + if tx_hashes_not_requested_count != 0 { + trace!(target: "net::tx", + peer_id=format!("{_peer_id:#}"), + tx_hashes_not_requested_count, + "transactions in `PooledTransactions` response from peer were not requested" + ); + } (verification_outcome, VerifiedPooledTransactions::new(txns)) } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index c81763e60e31c..32d855ca1099d 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -609,8 +609,8 @@ where // get handle to peer's session, if the session is still active let Some(peer) = self.peers.get_mut(&peer_id) else { trace!( - peer_id=format!("{peer_id:#}"), - msg=?msg, + peer_id = format!("{peer_id:#}"), + ?msg, "discarding announcement from inactive peer" ); @@ -636,9 +636,9 @@ where .increment(count_txns_already_seen_by_peer); trace!(target: "net::tx", - count_txns_already_seen_by_peer=%count_txns_already_seen_by_peer, + %count_txns_already_seen_by_peer, peer_id=format!("{peer_id:#}"), - client=?client, + ?client, "Peer sent hashes that have already been marked as seen by peer" ); @@ -746,8 +746,8 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), hashes=?*hashes, - msg_version=%msg_version, - client_version=%client, + %msg_version, + %client, "buffering hashes announced by busy peer" ); @@ -773,8 +773,8 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), surplus_hashes=?*surplus_hashes, - msg_version=%msg_version, - client_version=%client, + %msg_version, + %client, "some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes" ); @@ -784,8 +784,8 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), hashes=?*hashes_to_request, - msg_version=%msg_version, - client_version=%client, + %msg_version, + %client, "sending hashes in `GetPooledTransactions` request to peer's session" ); @@ -801,8 +801,8 @@ where debug!(target: "net::tx", peer_id=format!("{peer_id:#}"), failed_to_request_hashes=?*failed_to_request_hashes, - conn_eth_version=%conn_eth_version, - client_version=%client, + %conn_eth_version, + %client, "sending `GetPooledTransactions` request to peer's session failed, buffering hashes" ); self.transaction_fetcher.buffer_hashes(failed_to_request_hashes, Some(peer_id)); From 16456ccc86bbfa4563fbb0d7d396a3733c0eb10b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 30 Mar 2024 15:03:36 +0100 Subject: [PATCH 012/700] chore: add spawn helpers (#7396) --- crates/rpc/rpc/src/eth/api/mod.rs | 2 ++ crates/rpc/rpc/src/eth/api/transactions.rs | 35 ++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 2828cef0047dd..6f643b32874c6 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -166,6 +166,8 @@ where /// /// This accepts a closure that creates a new future using a clone of this type and spawns the /// future onto a new task that is allowed to block. + /// + /// Note: This is expected for futures that are dominated by blocking IO operations. pub(crate) async fn on_blocking_task(&self, c: C) -> EthResult where C: FnOnce(Self) -> F, diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 14681558c0588..12580a321bf94 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -47,6 +47,7 @@ use revm::{ }, Inspector, }; +use std::future::Future; #[cfg(feature = "optimism")] use crate::eth::api::optimism::OptimismTxMeta; @@ -88,6 +89,24 @@ pub trait EthTransactions: Send + Sync { /// Returns default gas limit to use for `eth_call` and tracing RPC methods. fn call_gas_limit(&self) -> u64; + /// Executes the future on a new blocking task. + /// + /// Note: This is expected for futures that are dominated by blocking IO operations, for tracing + /// or CPU bound operations in general use [Self::spawn_blocking]. + async fn spawn_blocking_future(&self, c: F) -> EthResult + where + F: Future> + Send + 'static, + R: Send + 'static; + + /// Executes a blocking on the tracing pol. + /// + /// Note: This is expected for futures that are predominantly CPU bound, for blocking IO futures + /// use [Self::spawn_blocking_future]. + async fn spawn_blocking(&self, c: F) -> EthResult + where + F: FnOnce() -> EthResult + Send + 'static, + R: Send + 'static; + /// Returns the state at the given [BlockId] fn state_at(&self, at: BlockId) -> EthResult; @@ -464,6 +483,22 @@ where self.inner.gas_cap } + async fn spawn_blocking_future(&self, c: F) -> EthResult + where + F: Future> + Send + 'static, + R: Send + 'static, + { + self.on_blocking_task(|_| c).await + } + + async fn spawn_blocking(&self, c: F) -> EthResult + where + F: FnOnce() -> EthResult + Send + 'static, + R: Send + 'static, + { + self.spawn_tracing_task_with(move |_| c()).await + } + fn state_at(&self, at: BlockId) -> EthResult { self.state_at_block_id(at) } From a81e7e41de28f1254700c5a68829e6c332e9d0fc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 30 Mar 2024 15:03:46 +0100 Subject: [PATCH 013/700] chore: add EthApiError::Other (#7398) --- crates/rpc/rpc/src/eth/api/transactions.rs | 10 ++--- crates/rpc/rpc/src/eth/error.rs | 46 +++++++--------------- crates/rpc/rpc/src/eth/mod.rs | 3 ++ crates/rpc/rpc/src/eth/optimism.rs | 46 ++++++++++++++++++++++ 4 files changed, 69 insertions(+), 36 deletions(-) create mode 100644 crates/rpc/rpc/src/eth/optimism.rs diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 12580a321bf94..f1016e475e7d9 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -52,7 +52,7 @@ use std::future::Future; #[cfg(feature = "optimism")] use crate::eth::api::optimism::OptimismTxMeta; #[cfg(feature = "optimism")] -use crate::eth::error::OptimismEthApiError; +use crate::eth::optimism::OptimismEthApiError; #[cfg(feature = "optimism")] use reth_revm::optimism::RethL1BlockInfo; #[cfg(feature = "optimism")] @@ -1321,10 +1321,10 @@ where &envelope_buf, tx.is_deposit(), ) - .map_err(|_| EthApiError::Optimism(OptimismEthApiError::L1BlockFeeError))?; + .map_err(|_| OptimismEthApiError::L1BlockFeeError)?; let inner_l1_data_gas = l1_block_info .l1_data_gas(&self.inner.provider.chain_spec(), block_timestamp, &envelope_buf) - .map_err(|_| EthApiError::Optimism(OptimismEthApiError::L1BlockGasError))?; + .map_err(|_| OptimismEthApiError::L1BlockGasError)?; (Some(inner_l1_fee), Some(inner_l1_data_gas)) } else { (None, None) @@ -1351,7 +1351,7 @@ where target = "rpc::eth", "Failed to serialize transaction for forwarding to sequencer" ); - EthApiError::Optimism(OptimismEthApiError::InvalidSequencerTransaction) + OptimismEthApiError::InvalidSequencerTransaction })?; self.inner @@ -1361,7 +1361,7 @@ where .body(body) .send() .await - .map_err(|err| EthApiError::Optimism(OptimismEthApiError::HttpError(err)))?; + .map_err(OptimismEthApiError::HttpError)?; } Ok(()) } diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index a4e8d4d6ea08c..3e54c20930327 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -20,6 +20,12 @@ use std::time::Duration; /// Result alias pub type EthResult = Result; +/// A tait for custom rpc errors used by [EthApiError::Other]. +pub trait ToRpcError: std::error::Error + Send + Sync + 'static { + /// Converts the error to a JSON-RPC error object. + fn to_rpc_error(&self) -> ErrorObject<'static>; +} + /// Errors that can occur when interacting with the `eth_` namespace #[derive(Debug, thiserror::Error)] pub enum EthApiError { @@ -106,10 +112,6 @@ pub enum EthApiError { #[error(transparent)] /// Call Input error when both `data` and `input` fields are set and not equal. TransactionInputError(#[from] TransactionInputError), - /// Optimism related error - #[error(transparent)] - #[cfg(feature = "optimism")] - Optimism(#[from] OptimismEthApiError), /// Evm generic purpose error. #[error("Revm error: {0}")] EvmCustom(String), @@ -119,27 +121,16 @@ pub enum EthApiError { /// Error thrown when tracing with a muxTracer fails #[error(transparent)] MuxTracerError(#[from] MuxError), + /// Any other error + #[error("0")] + Other(Box), } -/// Eth Optimism Api Error -#[cfg(feature = "optimism")] -#[derive(Debug, thiserror::Error)] -pub enum OptimismEthApiError { - /// Wrapper around a [hyper::Error]. - #[error(transparent)] - HyperError(#[from] hyper::Error), - /// Wrapper around an [reqwest::Error]. - #[error(transparent)] - HttpError(#[from] reqwest::Error), - /// Thrown when serializing transaction to forward to sequencer - #[error("invalid sequencer transaction")] - InvalidSequencerTransaction, - /// Thrown when calculating L1 gas fee - #[error("failed to calculate l1 gas fee")] - L1BlockFeeError, - /// Thrown when calculating L1 gas used - #[error("failed to calculate l1 gas used")] - L1BlockGasError, +impl EthApiError { + /// crates a new [EthApiError::Other] variant. + pub fn other(err: E) -> Self { + EthApiError::Other(Box::new(err)) + } } impl From for ErrorObject<'static> { @@ -178,14 +169,7 @@ impl From for ErrorObject<'static> { err @ EthApiError::InternalBlockingTaskError => internal_rpc_err(err.to_string()), err @ EthApiError::InternalEthError => internal_rpc_err(err.to_string()), err @ EthApiError::TransactionInputError(_) => invalid_params_rpc_err(err.to_string()), - #[cfg(feature = "optimism")] - EthApiError::Optimism(err) => match err { - OptimismEthApiError::HyperError(err) => internal_rpc_err(err.to_string()), - OptimismEthApiError::HttpError(err) => internal_rpc_err(err.to_string()), - OptimismEthApiError::InvalidSequencerTransaction | - OptimismEthApiError::L1BlockFeeError | - OptimismEthApiError::L1BlockGasError => internal_rpc_err(err.to_string()), - }, + EthApiError::Other(err) => err.to_rpc_error(), EthApiError::MuxTracerError(msg) => internal_rpc_err(msg.to_string()), } } diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 97f57af68855a..3183050937528 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -13,6 +13,9 @@ pub mod revm_utils; mod signer; pub(crate) mod utils; +#[cfg(feature = "optimism")] +pub mod optimism; + pub use api::{ fee_history::{fee_history_cache_new_blocks_task, FeeHistoryCache, FeeHistoryCacheConfig}, EthApi, EthApiSpec, EthTransactions, TransactionSource, RPC_DEFAULT_GAS_CAP, diff --git a/crates/rpc/rpc/src/eth/optimism.rs b/crates/rpc/rpc/src/eth/optimism.rs new file mode 100644 index 0000000000000..2dedf2625cd9d --- /dev/null +++ b/crates/rpc/rpc/src/eth/optimism.rs @@ -0,0 +1,46 @@ +//! Optimism specific types. + +use crate::{ + eth::error::{EthApiError, ToRpcError}, + result::internal_rpc_err, +}; +use jsonrpsee::types::ErrorObject; + +/// Eth Optimism Api Error +#[cfg(feature = "optimism")] +#[derive(Debug, thiserror::Error)] +pub enum OptimismEthApiError { + /// Wrapper around a [hyper::Error]. + #[error(transparent)] + HyperError(#[from] hyper::Error), + /// Wrapper around an [reqwest::Error]. + #[error(transparent)] + HttpError(#[from] reqwest::Error), + /// Thrown when serializing transaction to forward to sequencer + #[error("invalid sequencer transaction")] + InvalidSequencerTransaction, + /// Thrown when calculating L1 gas fee + #[error("failed to calculate l1 gas fee")] + L1BlockFeeError, + /// Thrown when calculating L1 gas used + #[error("failed to calculate l1 gas used")] + L1BlockGasError, +} + +impl ToRpcError for OptimismEthApiError { + fn to_rpc_error(&self) -> ErrorObject<'static> { + match self { + OptimismEthApiError::HyperError(err) => internal_rpc_err(err.to_string()), + OptimismEthApiError::HttpError(err) => internal_rpc_err(err.to_string()), + OptimismEthApiError::InvalidSequencerTransaction | + OptimismEthApiError::L1BlockFeeError | + OptimismEthApiError::L1BlockGasError => internal_rpc_err(self.to_string()), + } + } +} + +impl From for EthApiError { + fn from(err: OptimismEthApiError) -> Self { + EthApiError::other(err) + } +} From 9de7b4152e79ec635c4db956252d4e7046921e2f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 30 Mar 2024 15:03:56 +0100 Subject: [PATCH 014/700] chore: enable 4844 support by default in validator (#7399) --- crates/transaction-pool/src/validate/eth.rs | 33 +++++++++++++++------ 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 51de1fe958278..a07e6fc97cefb 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -445,10 +445,14 @@ pub struct EthTransactionValidatorBuilder { impl EthTransactionValidatorBuilder { /// Creates a new builder for the given [ChainSpec] + /// + /// By default this assumes the network is on the `Cancun` hardfork and the following + /// transactions are allowed: + /// - Legacy + /// - EIP-2718 + /// - EIP-1559 + /// - EIP-4844 pub fn new(chain_spec: Arc) -> Self { - // If cancun is enabled at genesis, enable it - let cancun = chain_spec.is_cancun_active_at_timestamp(chain_spec.genesis_timestamp()); - Self { chain_spec, block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, @@ -466,8 +470,8 @@ impl EthTransactionValidatorBuilder { // shanghai is activated by default shanghai: true, - // TODO: can hard enable by default once mainnet transitioned - cancun, + // cancun is activated by default + cancun: true, } } @@ -502,28 +506,39 @@ impl EthTransactionValidatorBuilder { self } - /// Disables the eip2718 support. + /// Disables the support for EIP-2718 transactions. pub const fn no_eip2718(self) -> Self { self.set_eip2718(false) } - /// Set eip2718 support. + /// Set the support for EIP-2718 transactions. pub const fn set_eip2718(mut self, eip2718: bool) -> Self { self.eip2718 = eip2718; self } - /// Disables the eip1559 support. + /// Disables the support for EIP-1559 transactions. pub const fn no_eip1559(self) -> Self { self.set_eip1559(false) } - /// Set the eip1559 support. + /// Set the support for EIP-1559 transactions. pub const fn set_eip1559(mut self, eip1559: bool) -> Self { self.eip1559 = eip1559; self } + /// Disables the support for EIP-4844 transactions. + pub const fn no_eip4844(self) -> Self { + self.set_eip1559(false) + } + + /// Set the support for EIP-4844 transactions. + pub const fn set_eip4844(mut self, eip1559: bool) -> Self { + self.eip1559 = eip1559; + self + } + /// Sets the [KzgSettings] to use for validating KZG proofs. pub fn kzg_settings(mut self, kzg_settings: Arc) -> Self { self.kzg_settings = kzg_settings; From bfadc26b37c24128c14323c7f99078a0c2dd965a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 30 Mar 2024 22:17:54 +0100 Subject: [PATCH 015/700] Add metrics for observing tx fetch outcome (#7401) --- crates/net/network/src/metrics.rs | 7 ++++++- crates/net/network/src/transactions/fetcher.rs | 5 +++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index b87eadb271ccb..a758ec59e8692 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -200,7 +200,12 @@ pub struct TransactionFetcherMetrics { pub(crate) egress_peer_channel_full: Counter, /// Total number of hashes pending fetch. pub(crate) hashes_pending_fetch: Gauge, - + /// Total number of fetched transactions. + pub(crate) fetched_transactions: Counter, + /// Total number of transactions that were received in + /// [`PooledTransactions`](reth_eth_wire::PooledTransactions) responses, that weren't + /// requested. + pub(crate) unsolicited_transactions: Counter, /* ================ SEARCH DURATION ================ */ /// Time spent searching for an idle peer in call to /// [`TransactionFetcher::find_any_idle_fallback_peer_for_any_pending_hash`](crate::transactions::TransactionFetcher::find_any_idle_fallback_peer_for_any_pending_hash). diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 4e708b02167e2..831a633453dff 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -944,6 +944,10 @@ impl TransactionFetcher { let (verification_outcome, verified_payload) = payload.verify(&requested_hashes, &peer_id); + let unsolicited = unverified_len - verified_payload.len(); + if unsolicited > 0 { + self.metrics.unsolicited_transactions.increment(unsolicited as u64); + } if verification_outcome == VerificationOutcome::ReportPeer { // todo: report peer for sending hashes that weren't requested trace!(target: "net::tx", @@ -997,6 +1001,7 @@ impl TransactionFetcher { true }); fetched.shrink_to_fit(); + self.metrics.fetched_transactions.increment(fetched.len() as u64); if fetched.len() < requested_hashes_len { trace!(target: "net::tx", From 4c82d271ee2fd038aa68f40d47180ffc9f00f771 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 31 Mar 2024 13:12:13 +0200 Subject: [PATCH 016/700] chore(deps): weekly `cargo update` (#7403) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 144 ++++++++++++++++++++++++++--------------------------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae1d61fa9d547..cfdea68fe0b5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -270,7 +270,7 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -344,7 +344,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", "syn-solidity", "tiny-keccak", ] @@ -476,7 +476,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -693,7 +693,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -704,7 +704,7 @@ checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -747,7 +747,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -855,7 +855,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -1051,7 +1051,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", "synstructure", ] @@ -1150,7 +1150,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -1253,9 +1253,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.35" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" +checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1354,7 +1354,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -1720,7 +1720,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -1879,7 +1879,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -1912,7 +1912,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core 0.20.8", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -1998,7 +1998,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -2149,7 +2149,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -2346,7 +2346,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -2359,7 +2359,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -2370,7 +2370,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -2755,7 +2755,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -3367,7 +3367,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -3999,9 +3999,9 @@ dependencies = [ [[package]] name = "lz4_flex" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "912b45c753ff5f7f5208307e8ace7d2a2e30d024e26d3509f3dce546c044ce15" +checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" [[package]] name = "mach2" @@ -4051,9 +4051,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memmap2" @@ -4119,7 +4119,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -4243,7 +4243,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -4448,7 +4448,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -4743,7 +4743,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -4772,14 +4772,14 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -4952,7 +4952,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" dependencies = [ "proc-macro2", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -5633,7 +5633,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -5968,7 +5968,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.55", + "syn 2.0.57", "trybuild", ] @@ -7274,9 +7274,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -7287,9 +7287,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -7360,7 +7360,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -7434,7 +7434,7 @@ dependencies = [ "darling 0.20.8", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -7459,7 +7459,7 @@ checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -7583,9 +7583,9 @@ dependencies = [ [[package]] name = "similar" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" +checksum = "fa42c91313f1d05da9b26f267f931cf178d4aba455b4c4622dd7355eb80c6640" dependencies = [ "bstr", "unicode-segmentation", @@ -7788,7 +7788,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -7801,7 +7801,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -7869,9 +7869,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.55" +version = "2.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" +checksum = "11a6ae1e52eb25aab8f3fb9fca13be982a373b8f1157ca14b897a825ba4a2d35" dependencies = [ "proc-macro2", "quote", @@ -7887,7 +7887,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -7904,7 +7904,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -7996,7 +7996,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -8035,7 +8035,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -8168,9 +8168,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -8193,7 +8193,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -8418,7 +8418,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -8617,9 +8617,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.90" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aa6f84ec205ebf87fb7a0abdbcd1467fa5af0e86878eb6d888b78ecbb10b6d5" +checksum = "8ad7eb6319ebadebca3dacf1f85a93bc54b73dd81b9036795f73de7ddfe27d5a" dependencies = [ "glob", "once_cell", @@ -8888,7 +8888,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", "wasm-bindgen-shared", ] @@ -8922,7 +8922,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9262,7 +9262,7 @@ checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", "synstructure", ] @@ -9283,7 +9283,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -9303,7 +9303,7 @@ checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", "synstructure", ] @@ -9324,7 +9324,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] @@ -9346,32 +9346,32 @@ checksum = "7b4e5997cbf58990550ef1f0e5124a05e47e1ebd33a84af25739be6031a62c20" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.57", ] [[package]] name = "zstd" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.0.0" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", From 283d51c2f1fc932086de51e0ab0d68e17403edf2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 31 Mar 2024 14:53:14 +0200 Subject: [PATCH 017/700] Add fix lint to make file (#7393) --- Makefile | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/Makefile b/Makefile index ac2a25bb5a2e5..c8adf4ff91333 100644 --- a/Makefile +++ b/Makefile @@ -309,6 +309,53 @@ lint: make lint-op-reth && \ make lint-other-targets +fix-lint-reth: + cargo +nightly clippy \ + --workspace \ + --bin "reth" \ + --lib \ + --examples \ + --tests \ + --benches \ + --features "ethereum $(BIN_OTHER_FEATURES)" \ + --fix \ + --allow-staged \ + --allow-dirty \ + -- -D warnings + +fix-lint-op-reth: + cargo +nightly clippy \ + --workspace \ + --bin "op-reth" \ + --lib \ + --examples \ + --tests \ + --benches \ + --features "optimism $(BIN_OTHER_FEATURES)" \ + --fix \ + --allow-staged \ + --allow-dirty \ + -- -D warnings + +fix-lint-other-targets: + cargo +nightly clippy \ + --workspace \ + --lib \ + --examples \ + --tests \ + --benches \ + --all-features \ + --fix \ + --allow-staged \ + --allow-dirty \ + -- -D warnings + +fix-lint: + make lint-reth && \ + make lint-op-reth && \ + make lint-other-targets && \ + make fmt + .PHONY: rustdocs rustdocs: ## Runs `cargo docs` to generate the Rust documents in the `target/doc` directory RUSTDOCFLAGS="\ From 0c363ea010c13fa0250439c7e017997eaf3cbf71 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 31 Mar 2024 14:53:51 +0200 Subject: [PATCH 018/700] fix(tx-mgr): report peers that send known bad transactions (#7400) --- crates/net/network/src/metrics.rs | 4 +- crates/net/network/src/transactions/mod.rs | 62 ++++++++++++++-------- 2 files changed, 44 insertions(+), 22 deletions(-) diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index a758ec59e8692..d976560ab8eac 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -118,7 +118,9 @@ pub struct TransactionsManagerMetrics { /* ================ POOL IMPORTS ================ */ /// Number of transactions about to be imported into the pool. pub(crate) pending_pool_imports: Gauge, - /// Total number of bad imports. + /// Total number of bad imports, imports that fail because the transaction is badly formed + /// (i.e. have no chance of passing validation, unlike imports that fail due to e.g. nonce + /// gaps). pub(crate) bad_imports: Counter, /// Number of inflight requests at which the /// [`TransactionPool`](reth_transaction_pool::TransactionPool) is considered to be at diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 32d855ca1099d..04851f42bc5dc 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -31,8 +31,9 @@ use reth_primitives::{ B256, }; use reth_transaction_pool::{ - error::PoolResult, GetPooledTransactionLimit, PoolTransaction, PropagateKind, - PropagatedTransactions, TransactionPool, ValidPoolTransaction, + error::{PoolError, PoolResult}, + GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, + TransactionPool, ValidPoolTransaction, }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, @@ -1013,9 +1014,9 @@ where peer_id=format!("{peer_id:#}"), hash=%tx.hash(), client_version=%peer.client_version, - "received an invalid transaction from peer" + "received a known bad transaction from peer" ); - self.metrics.bad_imports.increment(1); + has_bad_transactions = true; } } } @@ -1079,18 +1080,7 @@ where self.on_good_import(hash); } Err(err) => { - // If we're _currently_ syncing and the transaction is bad we - // ignore it, otherwise we penalize the peer that sent the bad - // transaction with the assumption that the peer should have - // known that this transaction is bad. (e.g. consensus rules). - // Note: nonce gaps are not considered bad transactions. - // - if err.is_bad_transaction() && !self.network.is_syncing() { - debug!(target: "net::tx", %err, "bad pool transaction import"); - self.on_bad_import(err.hash); - continue - } - self.on_good_import(err.hash); + self.on_bad_import(err); } } } @@ -1157,15 +1147,45 @@ where self.transactions_by_peers.remove(&hash); } - /// Penalize the peers that sent the bad transaction and cache it to avoid fetching or - /// importing it again. - fn on_bad_import(&mut self, hash: TxHash) { - if let Some(peers) = self.transactions_by_peers.remove(&hash) { + /// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid + /// fetching or importing it again. + /// + /// Errors that count as bad transactions are: + /// + /// - intrinsic gas too low + /// - exceeds gas limit + /// - gas uint overflow + /// - exceeds max init code size + /// - oversized data + /// - signer account has bytecode + /// - chain id mismatch + /// - old legacy chain id + /// - tx type not supported + /// + /// (and additionally for blobs txns...) + /// + /// - no blobs + /// - too many blobs + /// - invalid kzg proof + /// - kzg error + /// - not blob transaction (tx type mismatch) + /// - wrong versioned kzg commitment hash + fn on_bad_import(&mut self, err: PoolError) { + let peers = self.transactions_by_peers.remove(&err.hash); + + // if we're _currently_ syncing, we ignore a bad transaction + if !err.is_bad_transaction() || self.network.is_syncing() { + return + } + // otherwise we penalize the peer that sent the bad transaction, with the assumption that + // the peer should have known that this transaction is bad (e.g. violating consensus rules) + if let Some(peers) = peers { for peer_id in peers { self.report_peer_bad_transactions(peer_id); } } - self.bad_imports.insert(hash); + self.metrics.bad_imports.increment(1); + self.bad_imports.insert(err.hash); } /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns From 51aea14484d3f65144da95ed035a8419fc419403 Mon Sep 17 00:00:00 2001 From: rakita Date: Mon, 1 Apr 2024 12:52:50 +0200 Subject: [PATCH 019/700] chore(docs): fix database mermaid graph (#7409) --- docs/design/database.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/database.md b/docs/design/database.md index db5da983f51e1..485c183e8cbf0 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -113,7 +113,7 @@ HashedStorages { U256 StorageValue } AccountsTrie { - Nibbles "PK" + StoredNibbles Nibbles "PK" BranchNodeCompact Node } StoragesTrie { From 1b9a7687544cf85863b3ba13b52b978e07a7d200 Mon Sep 17 00:00:00 2001 From: Luca Provini Date: Mon, 1 Apr 2024 07:56:26 -0400 Subject: [PATCH 020/700] e2e eth node tests (#7075) Co-authored-by: Matthias Seitz --- .github/workflows/integration.yml | 10 +- Cargo.lock | 2 + crates/node-e2e-tests/Cargo.toml | 2 + crates/node-e2e-tests/tests/it/dev.rs | 2 +- crates/node-e2e-tests/tests/it/eth.rs | 123 +++++++++++++++++ crates/node-e2e-tests/tests/it/main.rs | 4 + crates/node-e2e-tests/tests/it/test_suite.rs | 131 +++++++++++++++++++ crates/payload/builder/src/lib.rs | 1 + 8 files changed, 273 insertions(+), 2 deletions(-) create mode 100644 crates/node-e2e-tests/tests/it/eth.rs create mode 100644 crates/node-e2e-tests/tests/it/test_suite.rs diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index cff3ec64e73f9..7d01b0030c511 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -36,12 +36,20 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - name: Run tests + - if: matrix.network == 'ethereum' + name: Run tests run: | cargo nextest run \ --locked --features "asm-keccak ${{ matrix.network }}" \ --workspace --exclude examples --exclude ef-tests \ -E "kind(test)" + - if: matrix.network == 'optimism' + name: Run tests + run: | + cargo nextest run \ + --locked --features "asm-keccak ${{ matrix.network }}" \ + --workspace --exclude examples --exclude ef-tests node-e2e-tests \ + -E "kind(test)" sync: name: sync / 100k blocks diff --git a/Cargo.lock b/Cargo.lock index cfdea68fe0b5b..72861c1bbdf39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4299,10 +4299,12 @@ version = "0.0.0" dependencies = [ "eyre", "futures-util", + "rand 0.8.5", "reth", "reth-node-core", "reth-node-ethereum", "reth-primitives", + "secp256k1 0.27.0", "serde_json", "tokio", ] diff --git a/crates/node-e2e-tests/Cargo.toml b/crates/node-e2e-tests/Cargo.toml index 6210eaa02877a..9072d4ce1a50a 100644 --- a/crates/node-e2e-tests/Cargo.toml +++ b/crates/node-e2e-tests/Cargo.toml @@ -16,3 +16,5 @@ futures-util.workspace = true eyre.workspace = true tokio.workspace = true serde_json.workspace = true +rand.workspace = true +secp256k1.workspace = true diff --git a/crates/node-e2e-tests/tests/it/dev.rs b/crates/node-e2e-tests/tests/it/dev.rs index 9237630385ef3..ef579b1a71f4c 100644 --- a/crates/node-e2e-tests/tests/it/dev.rs +++ b/crates/node-e2e-tests/tests/it/dev.rs @@ -17,7 +17,7 @@ async fn can_run_dev_node() -> eyre::Result<()> { // create node config let node_config = NodeConfig::test() .dev() - .with_rpc(RpcServerArgs::default().with_http()) + .with_rpc(RpcServerArgs::default().with_http().with_unused_ports()) .with_chain(custom_chain()); let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) diff --git a/crates/node-e2e-tests/tests/it/eth.rs b/crates/node-e2e-tests/tests/it/eth.rs new file mode 100644 index 0000000000000..633541508dedb --- /dev/null +++ b/crates/node-e2e-tests/tests/it/eth.rs @@ -0,0 +1,123 @@ +use crate::test_suite::TestSuite; +use futures_util::StreamExt; +use reth::{ + builder::{NodeBuilder, NodeHandle}, + payload::EthPayloadBuilderAttributes, + providers::{BlockReaderIdExt, CanonStateSubscriptions}, + rpc::{ + api::EngineApiClient, + eth::EthTransactions, + types::engine::{ExecutionPayloadEnvelopeV3, ForkchoiceState, PayloadAttributes}, + }, + tasks::TaskManager, +}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; +use reth_node_ethereum::{EthEngineTypes, EthereumNode}; +use reth_primitives::{Address, BlockNumberOrTag, B256}; +use std::time::{SystemTime, UNIX_EPOCH}; + +#[tokio::test] +async fn can_run_eth_node() -> eyre::Result<()> { + let tasks = TaskManager::current(); + let test_suite = TestSuite::new(); + + // Node setup + let node_config = NodeConfig::test() + .with_chain(test_suite.chain_spec.clone()) + .with_rpc(RpcServerArgs::default().with_http()); + + let NodeHandle { mut node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(tasks.executor()) + .node(EthereumNode::default()) + .launch() + .await?; + + // setup engine api events and payload service events + let mut notifications = node.provider.canonical_state_stream(); + let payload_events = node.payload_builder.subscribe().await?; + + // push tx into pool via RPC server + let eth_api = node.rpc_registry.eth_api(); + let transfer_tx = test_suite.transfer_tx(); + eth_api.send_raw_transaction(transfer_tx.envelope_encoded()).await?; + + // trigger new payload building draining the pool + let eth_attr = eth_payload_attributes(); + let payload_id = node.payload_builder.new_payload(eth_attr.clone()).await?; + + // resolve best payload via engine api + let client = node.auth_server_handle().http_client(); + EngineApiClient::::get_payload_v3(&client, payload_id).await?; + + let mut payload_event_stream = payload_events.into_stream(); + + // first event is the payload attributes + let first_event = payload_event_stream.next().await.unwrap()?; + if let reth::payload::Events::Attributes(attr) = first_event { + assert_eq!(eth_attr.timestamp, attr.timestamp); + } else { + panic!("Expect first event as payload attributes.") + } + + // second event is built payload + let second_event = payload_event_stream.next().await.unwrap()?; + if let reth::payload::Events::BuiltPayload(payload) = second_event { + // setup payload for submission + let envelope_v3 = ExecutionPayloadEnvelopeV3::from(payload); + let payload_v3 = envelope_v3.execution_payload; + + // submit payload to engine api + let submission = EngineApiClient::::new_payload_v3( + &client, + payload_v3, + vec![], + eth_attr.parent_beacon_block_root.unwrap(), + ) + .await?; + assert!(submission.is_valid()); + + // get latest valid hash from blockchain tree + let hash = submission.latest_valid_hash.unwrap(); + + // trigger forkchoice update via engine api to commit the block to the blockchain + let fcu = EngineApiClient::::fork_choice_updated_v2( + &client, + ForkchoiceState { + head_block_hash: hash, + safe_block_hash: hash, + finalized_block_hash: hash, + }, + None, + ) + .await?; + assert!(fcu.is_valid()); + + // get head block from notifications stream and verify the tx has been pushed to the pool + // is actually present in the canonical block + let head = notifications.next().await.unwrap(); + let tx = head.tip().transactions().next().unwrap(); + assert_eq!(tx.hash(), transfer_tx.hash); + + // make sure the block hash we submitted via FCU engine api is the new latest block using an + // RPC call + let latest_block = node.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)?.unwrap(); + assert_eq!(latest_block.hash_slow(), hash); + } else { + panic!("Expect a built payload event."); + } + + Ok(()) +} + +fn eth_payload_attributes() -> EthPayloadBuilderAttributes { + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + EthPayloadBuilderAttributes::new(B256::ZERO, attributes) +} diff --git a/crates/node-e2e-tests/tests/it/main.rs b/crates/node-e2e-tests/tests/it/main.rs index 77e27a5dcf9a6..c84aeb0cf5050 100644 --- a/crates/node-e2e-tests/tests/it/main.rs +++ b/crates/node-e2e-tests/tests/it/main.rs @@ -1,3 +1,7 @@ mod dev; +mod eth; + +mod test_suite; + fn main() {} diff --git a/crates/node-e2e-tests/tests/it/test_suite.rs b/crates/node-e2e-tests/tests/it/test_suite.rs new file mode 100644 index 0000000000000..e35b2fc3aa6f1 --- /dev/null +++ b/crates/node-e2e-tests/tests/it/test_suite.rs @@ -0,0 +1,131 @@ +use reth_primitives::{ + keccak256, revm_primitives::fixed_bytes, sign_message, AccessList, Address, Bytes, ChainConfig, + ChainSpec, Genesis, GenesisAccount, Transaction, TransactionKind, TransactionSigned, TxEip1559, + B256, U256, +}; +use secp256k1::{PublicKey, Secp256k1, SecretKey}; +use std::{collections::BTreeMap, sync::Arc}; + +/// Helper struct to customize the chain spec during e2e tests +pub struct TestSuite { + pub account: Account, + pub chain_spec: Arc, +} + +impl TestSuite { + /// Creates a new e2e test suit with a random account and a custom chain spec + pub fn new() -> Self { + let account = Account::new(); + let chain_spec = TestSuite::chain_spec(&account); + Self { account, chain_spec } + } + /// Returns the raw transfer transaction + pub fn transfer_tx(&self) -> TransactionSigned { + self.account.transfer_tx() + } + /// Creates a custom chain spec and allocates the initial balance to the given account + fn chain_spec(account: &Account) -> Arc { + let sk = B256::from_slice(&account.secret_key.secret_bytes()); + let mut alloc = BTreeMap::new(); + let genesis_acc = GenesisAccount { + balance: U256::from(1_000_000_000_000_000_000_000_000u128), + code: None, + storage: None, + nonce: Some(0), + private_key: Some(sk), + }; + alloc.insert(account.pubkey, genesis_acc); + + let genesis = Genesis { + nonce: 0, + timestamp: 0, + extra_data: fixed_bytes!("00").into(), + gas_limit: 30_000_000, + difficulty: U256::from(0), + mix_hash: B256::ZERO, + coinbase: Address::ZERO, + alloc, + number: Some(0), + config: TestSuite::chain_config(), + base_fee_per_gas: None, + blob_gas_used: None, + excess_blob_gas: None, + }; + + Arc::new(genesis.into()) + } + + fn chain_config() -> ChainConfig { + let chain_config = r#" +{ + "chainId": 1, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "shanghaiTime": 0, + "cancunTime":0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true +} +"#; + serde_json::from_str(chain_config).unwrap() + } +} + +/// The main account used for the e2e tests +pub struct Account { + pubkey: Address, + secret_key: SecretKey, +} + +impl Account { + /// Creates a new account from a random secret key and pub key + fn new() -> Self { + let (secret_key, pubkey) = Account::random(); + Self { pubkey, secret_key } + } + + /// Generates a random secret key and pub key + fn random() -> (SecretKey, Address) { + let secret_key = SecretKey::new(&mut rand::thread_rng()); + let secp = Secp256k1::new(); + let public_key = PublicKey::from_secret_key(&secp, &secret_key); + let hash = keccak256(&public_key.serialize_uncompressed()[1..]); + let pubkey = Address::from_slice(&hash[12..]); + (secret_key, pubkey) + } + + /// Creates a new transfer transaction + pub fn transfer_tx(&self) -> TransactionSigned { + let tx = Transaction::Eip1559(TxEip1559 { + chain_id: 1, + nonce: 0, + gas_limit: 21000, + to: TransactionKind::Call(Address::random()), + value: U256::from(1000), + input: Bytes::default(), + max_fee_per_gas: 875000000, + max_priority_fee_per_gas: 0, + access_list: AccessList::default(), + }); + Account::sign_transaction(&self.secret_key, tx) + } + /// Helper function to sign a transaction + fn sign_transaction(secret_key: &SecretKey, transaction: Transaction) -> TransactionSigned { + let tx_signature_hash = transaction.signature_hash(); + let signature = + sign_message(B256::from_slice(secret_key.as_ref()), tx_signature_hash).unwrap(); + TransactionSigned::from_transaction_and_signature(transaction, signature) + } +} diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 7982946cbccdf..3455775454877 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -114,6 +114,7 @@ pub mod noop; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; +pub use events::Events; pub use payload::{EthBuiltPayload, EthPayloadBuilderAttributes}; pub use reth_rpc_types::engine::PayloadId; pub use service::{PayloadBuilderHandle, PayloadBuilderService, PayloadStore}; From 0be61cd5f12936c9138158e61b4d8d9653163c5a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 1 Apr 2024 15:24:25 +0200 Subject: [PATCH 021/700] fix(grafana): time frame (#7407) --- etc/grafana/dashboards/overview.json | 4 ++-- etc/grafana/dashboards/reth-mempool.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 2301c99b9cab0..c91b9bafaf8c8 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -8387,7 +8387,7 @@ "type": "timeseries" } ], - "refresh": "5s", + "refresh": "30s", "revision": 1, "schemaVersion": 39, "tags": [], @@ -8464,7 +8464,7 @@ ] }, "time": { - "from": "now-3h", + "from": "now-1h", "to": "now" }, "timepicker": {}, diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index 7128dc6a398bb..8e26df446edd7 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -2848,7 +2848,7 @@ ] }, "time": { - "from": "now-6h", + "from": "now-1h", "to": "now" }, "timepicker": {}, From ab8a4f1ff62b195c03a387d8b7f2705a531b827a Mon Sep 17 00:00:00 2001 From: lmittmann <3458786+lmittmann@users.noreply.github.com> Date: Mon, 1 Apr 2024 18:18:57 +0200 Subject: [PATCH 022/700] CI: Disable hive job on forked repositories (#7405) Co-authored-by: lmittmann --- .github/workflows/hive.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 9247901aa554c..eb8bb79454634 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -17,6 +17,7 @@ concurrency: jobs: prepare: + if: github.repository == 'paradigmxyz/reth' timeout-minutes: 45 runs-on: group: Reth From d6933bad999e6001e615919e8cb61b603284b36d Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 1 Apr 2024 12:27:34 -0400 Subject: [PATCH 023/700] fix: shrink_to_fit PrefixSetMut on freeze (#7342) --- crates/trie/src/prefix_set/mod.rs | 42 +++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/crates/trie/src/prefix_set/mod.rs b/crates/trie/src/prefix_set/mod.rs index a6bdfa9d8b0c9..b556dd3790752 100644 --- a/crates/trie/src/prefix_set/mod.rs +++ b/crates/trie/src/prefix_set/mod.rs @@ -121,6 +121,9 @@ impl PrefixSetMut { self.keys.dedup(); } + // we need to shrink in both the sorted and non-sorted cases because deduping may have + // occurred either on `freeze`, or during `contains`. + self.keys.shrink_to_fit(); PrefixSet { keys: Arc::new(self.keys), index: self.index } } } @@ -186,4 +189,43 @@ mod tests { assert!(!prefix_set.contains(&[7, 8])); assert_eq!(prefix_set.len(), 3); // Length should be 3 (excluding duplicate) } + + #[test] + fn test_freeze_shrinks_capacity() { + let mut prefix_set = PrefixSetMut::default(); + prefix_set.insert(Nibbles::from_nibbles([1, 2, 3])); + prefix_set.insert(Nibbles::from_nibbles([1, 2, 4])); + prefix_set.insert(Nibbles::from_nibbles([4, 5, 6])); + prefix_set.insert(Nibbles::from_nibbles([1, 2, 3])); // Duplicate + + assert!(prefix_set.contains(&[1, 2])); + assert!(prefix_set.contains(&[4, 5])); + assert!(!prefix_set.contains(&[7, 8])); + assert_eq!(prefix_set.keys.len(), 3); // Length should be 3 (excluding duplicate) + assert_eq!(prefix_set.keys.capacity(), 4); // Capacity should be 4 (including duplicate) + + let frozen = prefix_set.freeze(); + assert_eq!(frozen.keys.len(), 3); // Length should be 3 (excluding duplicate) + assert_eq!(frozen.keys.capacity(), 3); // Capacity should be 3 after shrinking + } + + #[test] + fn test_freeze_shrinks_existing_capacity() { + // do the above test but with preallocated capacity + let mut prefix_set = PrefixSetMut::with_capacity(101); + prefix_set.insert(Nibbles::from_nibbles([1, 2, 3])); + prefix_set.insert(Nibbles::from_nibbles([1, 2, 4])); + prefix_set.insert(Nibbles::from_nibbles([4, 5, 6])); + prefix_set.insert(Nibbles::from_nibbles([1, 2, 3])); // Duplicate + + assert!(prefix_set.contains(&[1, 2])); + assert!(prefix_set.contains(&[4, 5])); + assert!(!prefix_set.contains(&[7, 8])); + assert_eq!(prefix_set.keys.len(), 3); // Length should be 3 (excluding duplicate) + assert_eq!(prefix_set.keys.capacity(), 101); // Capacity should be 101 (including duplicate) + + let frozen = prefix_set.freeze(); + assert_eq!(frozen.keys.len(), 3); // Length should be 3 (excluding duplicate) + assert_eq!(frozen.keys.capacity(), 3); // Capacity should be 3 after shrinking + } } From 2de0bc49765f264b94ac1c314f11c890977a4386 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 1 Apr 2024 18:31:02 +0200 Subject: [PATCH 024/700] feat: add reth-evm crate (#7397) --- Cargo.lock | 14 +++++++++++--- Cargo.toml | 4 +++- crates/evm/Cargo.toml | 18 ++++++++++++++++++ .../src/evm/traits.rs => evm/src/lib.rs} | 10 ++++++++++ crates/node-api/Cargo.toml | 7 +++---- crates/node-api/src/evm/mod.rs | 5 ----- crates/node-api/src/lib.rs | 3 +-- crates/storage/provider/Cargo.toml | 2 +- .../provider/src/providers/database/mod.rs | 2 +- .../src/providers/database/provider.rs | 2 +- crates/storage/provider/src/providers/mod.rs | 2 +- crates/storage/provider/src/test_utils/mock.rs | 2 +- crates/storage/provider/src/test_utils/noop.rs | 2 +- crates/storage/provider/src/traits/evm_env.rs | 2 +- 14 files changed, 53 insertions(+), 22 deletions(-) create mode 100644 crates/evm/Cargo.toml rename crates/{node-api/src/evm/traits.rs => evm/src/lib.rs} (86%) delete mode 100644 crates/node-api/src/evm/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 72861c1bbdf39..53577ee619129 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5880,6 +5880,15 @@ dependencies = [ "tempfile", ] +[[package]] +name = "reth-evm" +version = "0.2.0-beta.4" +dependencies = [ + "reth-primitives", + "revm", + "revm-primitives", +] + [[package]] name = "reth-interfaces" version = "0.2.0-beta.4" @@ -6090,10 +6099,9 @@ dependencies = [ name = "reth-node-api" version = "0.2.0-beta.4" dependencies = [ + "reth-evm", "reth-primitives", "reth-rpc-types", - "revm", - "revm-primitives", "serde", "thiserror", ] @@ -6366,10 +6374,10 @@ dependencies = [ "rayon", "reth-codecs", "reth-db", + "reth-evm", "reth-interfaces", "reth-metrics", "reth-nippy-jar", - "reth-node-api", "reth-primitives", "reth-trie", "revm", diff --git a/Cargo.toml b/Cargo.toml index 5bfc4e0acedd0..33d3e45f407be 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,8 @@ members = [ "crates/consensus/beacon-core/", "crates/consensus/common/", "crates/ethereum-forks/", - "crates/etl", + "crates/etl/", + "crates/evm/", "crates/interfaces/", "crates/metrics/", "crates/metrics/metrics-derive/", @@ -209,6 +210,7 @@ reth-eth-wire = { path = "crates/net/eth-wire" } reth-ethereum-forks = { path = "crates/ethereum-forks" } reth-ethereum-payload-builder = { path = "crates/payload/ethereum" } reth-etl = { path = "crates/etl" } +reth-evm = { path = "crates/evm" } reth-optimism-payload-builder = { path = "crates/payload/optimism" } reth-interfaces = { path = "crates/interfaces" } reth-ipc = { path = "crates/rpc/ipc" } diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml new file mode 100644 index 0000000000000..b100c83b7b668 --- /dev/null +++ b/crates/evm/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "reth-evm" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true +revm-primitives.workspace = true +revm.workspace = true + diff --git a/crates/node-api/src/evm/traits.rs b/crates/evm/src/lib.rs similarity index 86% rename from crates/node-api/src/evm/traits.rs rename to crates/evm/src/lib.rs index bc0b900719a35..2bf1f72a1cc84 100644 --- a/crates/node-api/src/evm/traits.rs +++ b/crates/evm/src/lib.rs @@ -1,3 +1,13 @@ +//! Traits for configuring an EVM specifics. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + use reth_primitives::{revm::env::fill_block_env, Address, ChainSpec, Header, Transaction, U256}; use revm::{Database, Evm, EvmBuilder}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId, TxEnv}; diff --git a/crates/node-api/Cargo.toml b/crates/node-api/Cargo.toml index 6b01dd8c07542..0dbd20adee83f 100644 --- a/crates/node-api/Cargo.toml +++ b/crates/node-api/Cargo.toml @@ -14,9 +14,8 @@ workspace = true # reth reth-primitives.workspace = true reth-rpc-types.workspace = true -revm-primitives.workspace = true -thiserror.workspace = true -revm.workspace = true +reth-evm.workspace = true -# io +# misc serde.workspace = true +thiserror.workspace = true diff --git a/crates/node-api/src/evm/mod.rs b/crates/node-api/src/evm/mod.rs deleted file mode 100644 index e93d66480f6f5..0000000000000 --- a/crates/node-api/src/evm/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Traits and structs for working with a configurable EVM. - -/// Traits for working with a configurable EVM. -mod traits; -pub use traits::{ConfigureEvm, ConfigureEvmEnv}; diff --git a/crates/node-api/src/lib.rs b/crates/node-api/src/lib.rs index e103f59ff6f31..d3e08fc9be914 100644 --- a/crates/node-api/src/lib.rs +++ b/crates/node-api/src/lib.rs @@ -20,8 +20,7 @@ pub use engine::{ }; /// Traits and helper types used to abstract over EVM methods and types. -pub mod evm; -pub use evm::{ConfigureEvm, ConfigureEvmEnv}; +pub use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; pub mod primitives; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 26e6a84b5df21..c54adc050c5e9 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -19,7 +19,7 @@ reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-codecs.workspace = true -reth-node-api.workspace = true +reth-evm.workspace = true revm.workspace = true diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index b6ea00cd4ea24..5d9d8ae6b25ef 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -8,8 +8,8 @@ use crate::{ TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; +use reth_evm::ConfigureEvmEnv; use reth_interfaces::{provider::ProviderResult, RethError, RethResult}; -use reth_node_api::ConfigureEvmEnv; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 85b46550d993e..df51b19b8c6ff 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -26,12 +26,12 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, }; +use reth_evm::ConfigureEvmEnv; use reth_interfaces::{ p2p::headers::downloader::SyncTarget, provider::{ProviderResult, RootMismatch}, RethResult, }; -use reth_node_api::ConfigureEvmEnv; use reth_primitives::{ keccak256, revm::{config::revm_spec, env::fill_block_env}, diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 4af73fcbad4e4..611aca349591c 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -10,6 +10,7 @@ use reth_db::{ database::Database, models::{AccountBeforeTx, StoredBlockBodyIndices}, }; +use reth_evm::ConfigureEvmEnv; use reth_interfaces::{ blockchain_tree::{ error::{CanonicalError, InsertBlockError}, @@ -20,7 +21,6 @@ use reth_interfaces::{ provider::ProviderResult, RethResult, }; -use reth_node_api::ConfigureEvmEnv; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumber, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 26e09e5a1a59f..13e4a5a8ff595 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -7,8 +7,8 @@ use crate::{ }; use parking_lot::Mutex; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; +use reth_evm::ConfigureEvmEnv; use reth_interfaces::provider::{ProviderError, ProviderResult}; -use reth_node_api::ConfigureEvmEnv; use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, BlockWithSenders, Bytecode, Bytes, ChainInfo, ChainSpec, Header, Receipt, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 2272f09a03b9a..13a2f3b4018f5 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -7,8 +7,8 @@ use crate::{ WithdrawalsProvider, }; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; +use reth_evm::ConfigureEvmEnv; use reth_interfaces::provider::ProviderResult; -use reth_node_api::ConfigureEvmEnv; use reth_primitives::{ stage::{StageCheckpoint, StageId}, trie::AccountProof, diff --git a/crates/storage/provider/src/traits/evm_env.rs b/crates/storage/provider/src/traits/evm_env.rs index 2eb8f3e5d889b..8c821984601d9 100644 --- a/crates/storage/provider/src/traits/evm_env.rs +++ b/crates/storage/provider/src/traits/evm_env.rs @@ -1,5 +1,5 @@ +use reth_evm::ConfigureEvmEnv; use reth_interfaces::provider::ProviderResult; -use reth_node_api::ConfigureEvmEnv; use reth_primitives::{BlockHashOrNumber, Header}; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; From 9e55ba6d1307cb2a4a209c67efadf91e53920241 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 1 Apr 2024 19:25:49 +0200 Subject: [PATCH 025/700] feat: add helpers to obtain the engine API client (#7413) --- crates/node-builder/src/node.rs | 19 +++++++++++++- crates/node-e2e-tests/tests/it/eth.rs | 38 +++++++++++++-------------- 2 files changed, 36 insertions(+), 21 deletions(-) diff --git a/crates/node-builder/src/node.rs b/crates/node-builder/src/node.rs index 1768cdb5abed4..ccc765c8f5816 100644 --- a/crates/node-builder/src/node.rs +++ b/crates/node-builder/src/node.rs @@ -9,7 +9,10 @@ pub use reth_node_api::NodeTypes; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, - rpc::builder::{auth::AuthServerHandle, RpcServerHandle}, + rpc::{ + api::EngineApiClient, + builder::{auth::AuthServerHandle, RpcServerHandle}, + }, }; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::ChainSpec; @@ -124,6 +127,20 @@ impl FullNode { pub fn auth_server_handle(&self) -> &AuthServerHandle { &self.rpc_server_handles.auth } + + /// Returns the [EngineApiClient] interface for the authenticated engine API. + /// + /// This will send authenticated http requests to the node's auth server. + pub fn engine_http_client(&self) -> impl EngineApiClient { + self.auth_server_handle().http_client() + } + + /// Returns the [EngineApiClient] interface for the authenticated engine API. + /// + /// This will send authenticated ws requests to the node's auth server. + pub async fn engine_ws_client(&self) -> impl EngineApiClient { + self.auth_server_handle().ws_client().await + } } impl Clone for FullNode { diff --git a/crates/node-e2e-tests/tests/it/eth.rs b/crates/node-e2e-tests/tests/it/eth.rs index 633541508dedb..763b6af1fbc8c 100644 --- a/crates/node-e2e-tests/tests/it/eth.rs +++ b/crates/node-e2e-tests/tests/it/eth.rs @@ -12,7 +12,7 @@ use reth::{ tasks::TaskManager, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{EthEngineTypes, EthereumNode}; +use reth_node_ethereum::EthereumNode; use reth_primitives::{Address, BlockNumberOrTag, B256}; use std::time::{SystemTime, UNIX_EPOCH}; @@ -46,8 +46,10 @@ async fn can_run_eth_node() -> eyre::Result<()> { let payload_id = node.payload_builder.new_payload(eth_attr.clone()).await?; // resolve best payload via engine api - let client = node.auth_server_handle().http_client(); - EngineApiClient::::get_payload_v3(&client, payload_id).await?; + let client = node.engine_http_client(); + + // ensure we can get the payload over the engine api + let _payload = client.get_payload_v3(payload_id).await?; let mut payload_event_stream = payload_events.into_stream(); @@ -67,29 +69,25 @@ async fn can_run_eth_node() -> eyre::Result<()> { let payload_v3 = envelope_v3.execution_payload; // submit payload to engine api - let submission = EngineApiClient::::new_payload_v3( - &client, - payload_v3, - vec![], - eth_attr.parent_beacon_block_root.unwrap(), - ) - .await?; + let submission = client + .new_payload_v3(payload_v3, vec![], eth_attr.parent_beacon_block_root.unwrap()) + .await?; assert!(submission.is_valid()); // get latest valid hash from blockchain tree let hash = submission.latest_valid_hash.unwrap(); // trigger forkchoice update via engine api to commit the block to the blockchain - let fcu = EngineApiClient::::fork_choice_updated_v2( - &client, - ForkchoiceState { - head_block_hash: hash, - safe_block_hash: hash, - finalized_block_hash: hash, - }, - None, - ) - .await?; + let fcu = client + .fork_choice_updated_v2( + ForkchoiceState { + head_block_hash: hash, + safe_block_hash: hash, + finalized_block_hash: hash, + }, + None, + ) + .await?; assert!(fcu.is_valid()); // get head block from notifications stream and verify the tx has been pushed to the pool From 1ad13b95d8f258b8af7516cdf878c22fa53c10af Mon Sep 17 00:00:00 2001 From: 0xAtreides <103257861+JackG-eth@users.noreply.github.com> Date: Mon, 1 Apr 2024 18:29:15 +0100 Subject: [PATCH 026/700] feature: eth-wire types standalone crate (#7373) Co-authored-by: Matthias Seitz --- Cargo.lock | 27 ++++++- Cargo.toml | 2 + crates/interfaces/Cargo.toml | 5 +- crates/interfaces/src/p2p/headers/client.rs | 2 +- crates/net/eth-wire-types/Cargo.toml | 51 ++++++++++++ .../types => eth-wire-types/src}/blocks.rs | 4 +- .../types => eth-wire-types/src}/broadcast.rs | 0 crates/net/eth-wire-types/src/lib.rs | 33 ++++++++ .../types => eth-wire-types/src}/message.rs | 34 ++++---- .../types => eth-wire-types/src}/receipts.rs | 5 +- .../types => eth-wire-types/src}/response.rs | 0 .../src/types => eth-wire-types/src}/state.rs | 0 .../types => eth-wire-types/src}/status.rs | 81 ++++++++++++++++++- .../src}/transactions.rs | 0 .../types => eth-wire-types/src}/version.rs | 7 +- crates/net/eth-wire/Cargo.toml | 1 + crates/net/eth-wire/src/errors/eth.rs | 19 ++--- crates/net/eth-wire/src/ethstream.rs | 11 ++- crates/net/eth-wire/src/lib.rs | 9 +-- crates/net/eth-wire/src/p2pstream.rs | 2 +- crates/net/eth-wire/src/types/mod.rs | 25 ------ 21 files changed, 237 insertions(+), 81 deletions(-) create mode 100644 crates/net/eth-wire-types/Cargo.toml rename crates/net/{eth-wire/src/types => eth-wire-types/src}/blocks.rs (99%) rename crates/net/{eth-wire/src/types => eth-wire-types/src}/broadcast.rs (100%) create mode 100644 crates/net/eth-wire-types/src/lib.rs rename crates/net/{eth-wire/src/types => eth-wire-types/src}/message.rs (96%) rename crates/net/{eth-wire/src/types => eth-wire-types/src}/receipts.rs (99%) rename crates/net/{eth-wire/src/types => eth-wire-types/src}/response.rs (100%) rename crates/net/{eth-wire/src/types => eth-wire-types/src}/state.rs (100%) rename crates/net/{eth-wire/src/types => eth-wire-types/src}/status.rs (85%) rename crates/net/{eth-wire/src/types => eth-wire-types/src}/transactions.rs (100%) rename crates/net/{eth-wire/src/types => eth-wire-types/src}/version.rs (94%) delete mode 100644 crates/net/eth-wire/src/types/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 53577ee619129..f6e2268067504 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5826,6 +5826,7 @@ dependencies = [ "reth-codecs", "reth-discv4", "reth-ecies", + "reth-eth-wire-types", "reth-metrics", "reth-net-common", "reth-primitives", @@ -5841,6 +5842,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-eth-wire-types" +version = "0.2.0-beta.4" +dependencies = [ + "alloy-chains", + "alloy-rlp", + "arbitrary", + "async-stream", + "bytes", + "derive_more", + "proptest", + "proptest-derive", + "rand 0.8.5", + "reth-codecs", + "reth-net-common", + "reth-primitives", + "reth-tracing", + "secp256k1 0.27.0", + "serde", + "test-fuzz", + "thiserror", + "tokio-util", +] + [[package]] name = "reth-ethereum-forks" version = "0.2.0-beta.4" @@ -5898,7 +5923,7 @@ dependencies = [ "futures", "parking_lot 0.12.1", "rand 0.8.5", - "reth-eth-wire", + "reth-eth-wire-types", "reth-network-api", "reth-primitives", "reth-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index 33d3e45f407be..492a5f85b8de2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ members = [ "crates/net/downloaders/", "crates/net/ecies/", "crates/net/eth-wire/", + "crates/net/eth-wire-types", "crates/net/nat/", "crates/net/network/", "crates/net/network-api/", @@ -207,6 +208,7 @@ reth-node-api = { path = "crates/node-api" } reth-downloaders = { path = "crates/net/downloaders" } reth-ecies = { path = "crates/net/ecies" } reth-eth-wire = { path = "crates/net/eth-wire" } +reth-eth-wire-types = { path = "crates/net/eth-wire-types" } reth-ethereum-forks = { path = "crates/ethereum-forks" } reth-ethereum-payload-builder = { path = "crates/payload/ethereum" } reth-etl = { path = "crates/etl" } diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 971e80ae0185e..2dde42801e3ab 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -14,8 +14,7 @@ workspace = true reth-primitives.workspace = true reth-rpc-types.workspace = true reth-network-api.workspace = true -# TODO(onbjerg): We only need this for [BlockBody] -reth-eth-wire.workspace = true +reth-eth-wire-types.workspace = true # async futures.workspace = true @@ -43,4 +42,4 @@ secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] test-utils = ["secp256k1", "rand", "parking_lot"] cli = ["clap"] -optimism = ["reth-eth-wire/optimism"] +optimism = ["reth-eth-wire-types/optimism"] diff --git a/crates/interfaces/src/p2p/headers/client.rs b/crates/interfaces/src/p2p/headers/client.rs index 845240cabe9ab..cf5355308695b 100644 --- a/crates/interfaces/src/p2p/headers/client.rs +++ b/crates/interfaces/src/p2p/headers/client.rs @@ -1,6 +1,6 @@ use crate::p2p::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::{Future, FutureExt}; -pub use reth_eth_wire::BlockHeaders; +pub use reth_eth_wire_types::BlockHeaders; use reth_primitives::{BlockHashOrNumber, Header, HeadersDirection}; use std::{ fmt::Debug, diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml new file mode 100644 index 0000000000000..2d54eb1879b81 --- /dev/null +++ b/crates/net/eth-wire-types/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "reth-eth-wire-types" +description = "types for eth-wire" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-codecs.workspace = true +reth-primitives.workspace = true +alloy-rlp = { workspace = true, features = ["derive"] } +alloy-chains = { workspace = true, features = ["serde", "rlp", "arbitrary"] } + +bytes.workspace = true +derive_more.workspace = true +thiserror.workspace = true +serde = { workspace = true, optional = true } + +# arbitrary utils +arbitrary = { workspace = true, features = ["derive"], optional = true } +proptest = { workspace = true, optional = true } +proptest-derive = { workspace = true, optional = true } + +[dev-dependencies] +reth-net-common.workspace = true +reth-primitives = { workspace = true, features = ["arbitrary"] } +reth-tracing.workspace = true + +test-fuzz.workspace = true +tokio-util = { workspace = true, features = ["io", "codec"] } +rand.workspace = true +secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } + +arbitrary = { workspace = true, features = ["derive"] } +proptest.workspace = true +proptest-derive.workspace = true +async-stream = "0.3" + +[features] +default = ["serde"] +serde = ["dep:serde"] +arbitrary = ["reth-primitives/arbitrary", "dep:arbitrary", "dep:proptest", "dep:proptest-derive"] +optimism = ["reth-primitives/optimism"] diff --git a/crates/net/eth-wire/src/types/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs similarity index 99% rename from crates/net/eth-wire/src/types/blocks.rs rename to crates/net/eth-wire-types/src/blocks.rs index 6e442f0b564b2..fa6365c206230 100644 --- a/crates/net/eth-wire/src/types/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -128,9 +128,7 @@ impl From> for BlockBodies { #[cfg(test)] mod tests { - use crate::types::{ - message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, - }; + use crate::{message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{ hex, BlockHashOrNumber, Header, HeadersDirection, Signature, Transaction, TransactionKind, diff --git a/crates/net/eth-wire/src/types/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs similarity index 100% rename from crates/net/eth-wire/src/types/broadcast.rs rename to crates/net/eth-wire-types/src/broadcast.rs diff --git a/crates/net/eth-wire-types/src/lib.rs b/crates/net/eth-wire-types/src/lib.rs new file mode 100644 index 0000000000000..ab18f4d84a54e --- /dev/null +++ b/crates/net/eth-wire-types/src/lib.rs @@ -0,0 +1,33 @@ +//! Types for the eth wire protocol: + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod status; +pub use status::{Status, StatusBuilder}; + +pub mod version; +pub use version::EthVersion; + +pub mod message; +pub use message::{EthMessage, EthMessageID, ProtocolMessage}; + +pub mod blocks; +pub use blocks::*; + +pub mod broadcast; +pub use broadcast::*; + +pub mod transactions; +pub use transactions::*; + +pub mod state; +pub use state::*; + +pub mod receipts; +pub use receipts::*; diff --git a/crates/net/eth-wire/src/types/message.rs b/crates/net/eth-wire-types/src/message.rs similarity index 96% rename from crates/net/eth-wire/src/types/message.rs rename to crates/net/eth-wire-types/src/message.rs index d367a8b613a48..dc8011879ba1f 100644 --- a/crates/net/eth-wire/src/types/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -11,7 +11,8 @@ use super::{ GetNodeData, GetPooledTransactions, GetReceipts, NewBlock, NewPooledTransactionHashes66, NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, Transactions, }; -use crate::{errors::EthStreamError, EthVersion, SharedTransactions}; +use crate::{EthVersion, SharedTransactions}; + use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use reth_primitives::bytes::{Buf, BufMut}; #[cfg(feature = "serde")] @@ -22,6 +23,17 @@ use std::{fmt::Debug, sync::Arc}; // https://github.com/ethereum/go-ethereum/blob/30602163d5d8321fbc68afdcbbaf2362b2641bde/eth/protocols/eth/protocol.go#L50 pub const MAX_MESSAGE_SIZE: usize = 10 * 1024 * 1024; +/// Error when sending/receiving a message +#[derive(thiserror::Error, Debug)] +pub enum MessageError { + /// Flags an unrecognized message ID for a given protocol version. + #[error("message id {1:?} is invalid for version {0:?}")] + Invalid(EthVersion, EthMessageID), + /// Thrown when rlp decoding a message message failed. + #[error("RLP error: {0}")] + RlpError(#[from] alloy_rlp::Error), +} + /// An `eth` protocol message, containing a message ID and payload. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -34,7 +46,7 @@ pub struct ProtocolMessage { impl ProtocolMessage { /// Create a new ProtocolMessage from a message type and message rlp bytes. - pub fn decode_message(version: EthVersion, buf: &mut &[u8]) -> Result { + pub fn decode_message(version: EthVersion, buf: &mut &[u8]) -> Result { let message_type = EthMessageID::decode(buf)?; let message = match message_type { @@ -81,20 +93,14 @@ impl ProtocolMessage { } EthMessageID::GetNodeData => { if version >= EthVersion::Eth67 { - return Err(EthStreamError::EthInvalidMessageError( - version, - EthMessageID::GetNodeData, - )) + return Err(MessageError::Invalid(version, EthMessageID::GetNodeData)) } let request_pair = RequestPair::::decode(buf)?; EthMessage::GetNodeData(request_pair) } EthMessageID::NodeData => { if version >= EthVersion::Eth67 { - return Err(EthStreamError::EthInvalidMessageError( - version, - EthMessageID::GetNodeData, - )) + return Err(MessageError::Invalid(version, EthMessageID::GetNodeData)) } let request_pair = RequestPair::::decode(buf)?; EthMessage::NodeData(request_pair) @@ -487,9 +493,9 @@ where #[cfg(test)] mod tests { + use super::MessageError; use crate::{ - errors::EthStreamError, types::message::RequestPair, EthMessage, EthMessageID, GetNodeData, - NodeData, ProtocolMessage, + message::RequestPair, EthMessage, EthMessageID, GetNodeData, NodeData, ProtocolMessage, }; use alloy_rlp::{Decodable, Encodable, Error}; use reth_primitives::hex; @@ -509,14 +515,14 @@ mod tests { message: get_node_data, }); let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); - assert!(matches!(msg, Err(EthStreamError::EthInvalidMessageError(..)))); + assert!(matches!(msg, Err(MessageError::Invalid(..)))); let node_data = EthMessage::NodeData(RequestPair { request_id: 1337, message: NodeData(vec![]) }); let buf = encode(ProtocolMessage { message_type: EthMessageID::NodeData, message: node_data }); let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); - assert!(matches!(msg, Err(EthStreamError::EthInvalidMessageError(..)))); + assert!(matches!(msg, Err(MessageError::Invalid(..)))); } #[test] diff --git a/crates/net/eth-wire/src/types/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs similarity index 99% rename from crates/net/eth-wire/src/types/receipts.rs rename to crates/net/eth-wire-types/src/receipts.rs index 41d40d9c4c7df..3f260de0ef1fe 100644 --- a/crates/net/eth-wire/src/types/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -34,10 +34,7 @@ pub struct Receipts( #[cfg(test)] mod tests { - use crate::{ - types::{message::RequestPair, GetReceipts}, - Receipts, - }; + use crate::{message::RequestPair, GetReceipts, Receipts}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{hex, Log, Receipt, ReceiptWithBloom, TxType}; diff --git a/crates/net/eth-wire/src/types/response.rs b/crates/net/eth-wire-types/src/response.rs similarity index 100% rename from crates/net/eth-wire/src/types/response.rs rename to crates/net/eth-wire-types/src/response.rs diff --git a/crates/net/eth-wire/src/types/state.rs b/crates/net/eth-wire-types/src/state.rs similarity index 100% rename from crates/net/eth-wire/src/types/state.rs rename to crates/net/eth-wire-types/src/state.rs diff --git a/crates/net/eth-wire/src/types/status.rs b/crates/net/eth-wire-types/src/status.rs similarity index 85% rename from crates/net/eth-wire/src/types/status.rs rename to crates/net/eth-wire-types/src/status.rs index 5b498b220ff36..0ba17ef68745c 100644 --- a/crates/net/eth-wire/src/types/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -1,4 +1,4 @@ -use crate::{EthVersion, StatusBuilder}; +use crate::EthVersion; use alloy_chains::{Chain, NamedChain}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs::derive_arbitrary; @@ -147,9 +147,86 @@ impl Default for Status { } } +/// Builder for [`Status`] messages. +/// +/// # Example +/// ``` +/// use reth_eth_wire_types::{EthVersion, Status}; +/// use reth_primitives::{Chain, Hardfork, B256, MAINNET, MAINNET_GENESIS_HASH, U256}; +/// +/// // this is just an example status message! +/// let status = Status::builder() +/// .version(EthVersion::Eth66.into()) +/// .chain(Chain::mainnet()) +/// .total_difficulty(U256::from(100)) +/// .blockhash(B256::from(MAINNET_GENESIS_HASH)) +/// .genesis(B256::from(MAINNET_GENESIS_HASH)) +/// .forkid(MAINNET.hardfork_fork_id(Hardfork::Paris).unwrap()) +/// .build(); +/// +/// assert_eq!( +/// status, +/// Status { +/// version: EthVersion::Eth66.into(), +/// chain: Chain::mainnet(), +/// total_difficulty: U256::from(100), +/// blockhash: B256::from(MAINNET_GENESIS_HASH), +/// genesis: B256::from(MAINNET_GENESIS_HASH), +/// forkid: MAINNET.hardfork_fork_id(Hardfork::Paris).unwrap(), +/// } +/// ); +/// ``` +#[derive(Debug, Default)] +pub struct StatusBuilder { + status: Status, +} + +impl StatusBuilder { + /// Consumes the type and creates the actual [`Status`] message. + pub fn build(self) -> Status { + self.status + } + + /// Sets the protocol version. + pub fn version(mut self, version: u8) -> Self { + self.status.version = version; + self + } + + /// Sets the chain id. + pub fn chain(mut self, chain: Chain) -> Self { + self.status.chain = chain; + self + } + + /// Sets the total difficulty. + pub fn total_difficulty(mut self, total_difficulty: U256) -> Self { + self.status.total_difficulty = total_difficulty; + self + } + + /// Sets the block hash. + pub fn blockhash(mut self, blockhash: B256) -> Self { + self.status.blockhash = blockhash; + self + } + + /// Sets the genesis hash. + pub fn genesis(mut self, genesis: B256) -> Self { + self.status.genesis = genesis; + self + } + + /// Sets the fork id. + pub fn forkid(mut self, forkid: ForkId) -> Self { + self.status.forkid = forkid; + self + } +} + #[cfg(test)] mod tests { - use crate::types::{EthVersion, Status}; + use crate::{EthVersion, Status}; use alloy_chains::{Chain, NamedChain}; use alloy_rlp::{Decodable, Encodable}; use rand::Rng; diff --git a/crates/net/eth-wire/src/types/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs similarity index 100% rename from crates/net/eth-wire/src/types/transactions.rs rename to crates/net/eth-wire-types/src/transactions.rs diff --git a/crates/net/eth-wire/src/types/version.rs b/crates/net/eth-wire-types/src/version.rs similarity index 94% rename from crates/net/eth-wire/src/types/version.rs rename to crates/net/eth-wire-types/src/version.rs index c7d49cabcd4b3..e121ea6d7ab2f 100644 --- a/crates/net/eth-wire/src/types/version.rs +++ b/crates/net/eth-wire-types/src/version.rs @@ -1,5 +1,4 @@ -//! Support for representing the version of the `eth`. [`Capability`](crate::capability::Capability) -//! and [Protocol](crate::protocol::Protocol). +//! Support for representing the version of the `eth` use std::str::FromStr; @@ -59,7 +58,7 @@ impl EthVersion { /// /// # Example /// ``` -/// use reth_eth_wire::types::EthVersion; +/// use reth_eth_wire_types::EthVersion; /// /// let version = EthVersion::try_from("67").unwrap(); /// assert_eq!(version, EthVersion::Eth67); @@ -82,7 +81,7 @@ impl TryFrom<&str> for EthVersion { /// /// # Example /// ``` -/// use reth_eth_wire::types::EthVersion; +/// use reth_eth_wire_types::EthVersion; /// /// let version = EthVersion::try_from(67).unwrap(); /// assert_eq!(version, EthVersion::Eth67); diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index dba0d168ed1ea..fc55a684cb055 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -19,6 +19,7 @@ reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } alloy-chains = { workspace = true, features = ["serde", "rlp", "arbitrary"] } reth-discv4.workspace = true +reth-eth-wire-types.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index d392d8e48e9bb..3a3d46eb0b122 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -2,8 +2,9 @@ use crate::{ errors::{MuxDemuxError, P2PStreamError}, + message::MessageError, version::ParseVersionError, - DisconnectReason, EthMessageID, EthVersion, + DisconnectReason, }; use alloy_chains::Chain; use reth_primitives::{GotExpected, GotExpectedBoxed, ValidationError, B256}; @@ -24,9 +25,9 @@ pub enum EthStreamError { #[error(transparent)] /// Failed Ethereum handshake. EthHandshakeError(#[from] EthHandshakeError), - #[error("message id {1:?} is invalid for version {0:?}")] - /// Flags an unrecognized message ID for a given protocol version. - EthInvalidMessageError(EthVersion, EthMessageID), + /// Thrown when decoding a message message failed. + #[error(transparent)] + InvalidMessage(#[from] MessageError), #[error("message size ({0}) exceeds max length (10MB)")] /// Received a message whose size exceeds the standard limit. MessageTooBig(usize), @@ -40,8 +41,8 @@ pub enum EthStreamError { /// The number of transaction sizes. sizes_len: usize, }, - /// Error when data is not recieved from peer for a prolonged period. - #[error("never recieved data from remote peer")] + /// Error when data is not received from peer for a prolonged period. + #[error("never received data from remote peer")] StreamTimeout, } @@ -74,12 +75,6 @@ impl From for EthStreamError { } } -impl From for EthStreamError { - fn from(err: alloy_rlp::Error) -> Self { - P2PStreamError::from(err).into() - } -} - /// Error that can occur during the `eth` sub-protocol handshake. #[derive(thiserror::Error, Debug)] pub enum EthHandshakeError { diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index a157ce52ebf6f..df46b3db83d50 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -2,8 +2,7 @@ use crate::{ errors::{EthHandshakeError, EthStreamError}, message::{EthBroadcastMessage, ProtocolBroadcastMessage}, p2pstream::HANDSHAKE_TIMEOUT, - types::{EthMessage, ProtocolMessage, Status}, - CanDisconnect, DisconnectReason, EthVersion, + CanDisconnect, DisconnectReason, EthMessage, EthVersion, ProtocolMessage, Status, }; use futures::{ready, Sink, SinkExt, StreamExt}; use pin_project::pin_project; @@ -111,7 +110,7 @@ where Err(err) => { debug!("decode error in eth handshake: msg={their_msg:x}"); self.inner.disconnect(DisconnectReason::DisconnectRequested).await?; - return Err(err) + return Err(EthStreamError::InvalidMessage(err)) } }; @@ -278,7 +277,7 @@ where %msg, "failed to decode protocol message" ); - return Poll::Ready(Some(Err(err))) + return Poll::Ready(Some(Err(EthStreamError::InvalidMessage(err)))) } }; @@ -347,10 +346,10 @@ where mod tests { use super::UnauthedEthStream; use crate::{ + broadcast::BlockHashNumber, errors::{EthHandshakeError, EthStreamError}, p2pstream::{ProtocolVersion, UnauthedP2PStream}, - types::{broadcast::BlockHashNumber, EthMessage, EthVersion, Status}, - EthStream, HelloMessageWithProtocols, PassthroughCodec, + EthMessage, EthStream, EthVersion, HelloMessageWithProtocols, PassthroughCodec, Status, }; use alloy_chains::NamedChain; use futures::{SinkExt, StreamExt}; diff --git a/crates/net/eth-wire/src/lib.rs b/crates/net/eth-wire/src/lib.rs index ed347439f763b..e09ba95188071 100644 --- a/crates/net/eth-wire/src/lib.rs +++ b/crates/net/eth-wire/src/lib.rs @@ -15,7 +15,6 @@ #![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub mod builder; pub mod capability; mod disconnect; pub mod errors; @@ -26,9 +25,6 @@ pub mod muxdemux; mod p2pstream; mod pinger; pub mod protocol; -pub use builder::*; -pub mod types; -pub use types::*; #[cfg(test)] pub mod test_utils; @@ -48,5 +44,8 @@ pub use crate::{ DisconnectP2P, P2PMessage, P2PMessageID, P2PStream, ProtocolVersion, UnauthedP2PStream, MAX_RESERVED_MESSAGE_ID, }, - types::EthVersion, }; + +// Re-export wire types +#[doc(inline)] +pub use reth_eth_wire_types::*; diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 82cbcb4f1bb6d..3e8bb096a74fa 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -226,7 +226,7 @@ where /// This stream emits _non-empty_ Bytes that start with the normalized message id, so that the first /// byte of each message starts from 0. If this stream only supports a single capability, for /// example `eth` then the first byte of each message will match -/// [EthMessageID](crate::types::EthMessageID). +/// [EthMessageID](reth_eth_wire_types::message::EthMessageID). #[pin_project] #[derive(Debug)] pub struct P2PStream { diff --git a/crates/net/eth-wire/src/types/mod.rs b/crates/net/eth-wire/src/types/mod.rs deleted file mode 100644 index f330958ee479f..0000000000000 --- a/crates/net/eth-wire/src/types/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! Types for the eth wire protocol. - -mod status; -pub use status::Status; - -pub mod version; -pub use version::EthVersion; - -pub mod message; -pub use message::{EthMessage, EthMessageID, ProtocolMessage}; - -pub mod blocks; -pub use blocks::*; - -pub mod broadcast; -pub use broadcast::*; - -pub mod transactions; -pub use transactions::*; - -pub mod state; -pub use state::*; - -pub mod receipts; -pub use receipts::*; From 4fb5889aa9cad0028dda803c657f7b42cdd54ed4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 1 Apr 2024 22:01:09 +0200 Subject: [PATCH 027/700] fix: check finalized sync target is not zero (#7412) --- crates/consensus/beacon/src/engine/mod.rs | 13 +++++++++---- crates/consensus/beacon/src/engine/sync.rs | 6 ++++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 3ae1a5fd4aba4..d014b69011253 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -622,7 +622,7 @@ where } /// Returns the finalized hash to sync to if the distance from the local tip to the block is - /// greater than the configured threshold and we're not synced to the finalized block yet block + /// greater than the configured threshold and we're not synced to the finalized block yet /// yet (if we've seen that block already). /// /// If this is invoked after a new block has been downloaded, the downloaded block could be the @@ -671,9 +671,12 @@ where warn!(target: "consensus::engine", %err, "Failed to get finalized block header"); } Ok(None) => { - // we don't have the block yet and the distance exceeds the allowed - // threshold - return Some(state.finalized_block_hash) + // ensure the finalized block is known (not the zero hash) + if !state.finalized_block_hash.is_zero() { + // we don't have the block yet and the distance exceeds the allowed + // threshold + return Some(state.finalized_block_hash) + } } Ok(Some(_)) => { // we're fully synced to the finalized block @@ -1708,6 +1711,7 @@ where ) .is_none() { + // get the block number of the finalized block, if we have it let newest_finalized = self .forkchoice_state_tracker .sync_target_state() @@ -2086,6 +2090,7 @@ mod tests { let _ = env .send_forkchoice_updated(ForkchoiceState { head_block_hash: rng.gen(), + finalized_block_hash: rng.gen(), ..Default::default() }) .await; diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index e6e954efec9c3..1c41c9ffe323d 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -214,7 +214,13 @@ where } /// Sets a new target to sync the pipeline to. + /// + /// But ensures the target is not the zero hash. pub(crate) fn set_pipeline_sync_target(&mut self, target: B256) { + if target.is_zero() { + // precaution to never sync to the zero hash + return + } self.pending_pipeline_target = Some(target); } From e087e7ef8099042634be40cd171d6e977ca6035d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 1 Apr 2024 23:47:58 +0200 Subject: [PATCH 028/700] Remove outdated debug assertion (#7417) --- .../net/network/src/transactions/fetcher.rs | 33 +++++++++---------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 831a633453dff..781ba0ac5d300 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -375,11 +375,10 @@ impl TransactionFetcher { let mut max_retried_and_evicted_hashes = vec![]; for hash in hashes.into_iter() { - debug_assert!( - self.hashes_fetch_inflight_and_pending_fetch.peek(&hash).is_some(), - "`%hash` in `@buffered_hashes` that's not in `@hashes_fetch_inflight_and_pending_fetch`, `@buffered_hashes` should be a subset of keys in `@hashes_fetch_inflight_and_pending_fetch`, broken invariant `@buffered_hashes` and `@hashes_fetch_inflight_and_pending_fetch`, -`%hash`: {hash}" - ); + // hash could have been evicted from bounded lru map + if self.hashes_fetch_inflight_and_pending_fetch.peek(&hash).is_none() { + continue + } let Some(TxFetchMetadata { retries, fallback_peers, .. }) = self.hashes_fetch_inflight_and_pending_fetch.get(&hash) @@ -665,20 +664,18 @@ impl TransactionFetcher { *inflight_count += 1; - debug_assert!( - || -> bool { - for hash in new_announced_hashes.iter() { - if self.hashes_pending_fetch.contains(hash) { - return false - } - } - true - }(), - "`%new_announced_hashes` should been taken out of buffer before packing in a request, breaks invariant `@buffered_hashes` and `@inflight_requests`, + #[cfg(debug_assertions)] + { + for hash in new_announced_hashes.iter() { + if self.hashes_pending_fetch.contains(hash) { + panic!("`%new_announced_hashes` should been taken out of buffer before packing in a request, breaks invariant `@hashes_pending_fetch` and +`@inflight_requests`, `@hashes_fetch_inflight_and_pending_fetch` for `%new_announced_hashes`: {:?}", - new_announced_hashes.iter().map(|hash| - (*hash, self.hashes_fetch_inflight_and_pending_fetch.get(hash).cloned()) - ).collect::)>>()); + new_announced_hashes.iter().map(|hash| + (*hash, self.hashes_fetch_inflight_and_pending_fetch.get(hash).cloned())).collect::)>>()) + } + } + } let (response, rx) = oneshot::channel(); let req: PeerRequest = PeerRequest::GetPooledTransactions { From 8fc7a13ccd5237cf61f6dd3f955f3c35f53db632 Mon Sep 17 00:00:00 2001 From: Nil Medvedev Date: Tue, 2 Apr 2024 13:01:09 +0100 Subject: [PATCH 029/700] feat: make more descriptive errors instead of ProviderError (#7380) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/consensus/auto-seal/src/lib.rs | 6 ++++-- crates/interfaces/src/blockchain_tree/error.rs | 2 +- crates/interfaces/src/error.rs | 4 ++-- crates/interfaces/src/executor.rs | 9 ++++++--- crates/revm/src/optimism/processor.rs | 7 ++++++- 5 files changed, 19 insertions(+), 9 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index bcb1b0d102362..f48c5cea1360d 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -409,7 +409,7 @@ impl StorageInner { // calculate the state root let state_root = client .latest() - .map_err(|_| BlockExecutionError::ProviderError)? + .map_err(BlockExecutionError::LatestBlock)? .state_root(bundle_state.state()) .unwrap(); header.state_root = state_root; @@ -439,7 +439,9 @@ impl StorageInner { // now execute the block let db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new(client.latest().unwrap()))) + .with_database_boxed(Box::new(StateProviderDatabase::new( + client.latest().map_err(BlockExecutionError::LatestBlock)?, + ))) .with_bundle_update() .build(); let mut executor = EVMProcessor::new_with_state(chain_spec.clone(), db, evm_config); diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index 3dd352b3b2eb9..4f4261ab6ca8e 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -248,7 +248,7 @@ impl InsertBlockErrorKind { true } // these are internal errors, not caused by an invalid block - BlockExecutionError::ProviderError | + BlockExecutionError::LatestBlock(_) | BlockExecutionError::Pruning(_) | BlockExecutionError::CanonicalRevert { .. } | BlockExecutionError::CanonicalCommit { .. } | diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index 9f7aef55b9207..c49323595dceb 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -70,8 +70,8 @@ mod size_asserts { }; } - static_assert_size!(RethError, 56); - static_assert_size!(BlockExecutionError, 48); + static_assert_size!(RethError, 64); + static_assert_size!(BlockExecutionError, 56); static_assert_size!(ConsensusError, 48); static_assert_size!(DatabaseError, 40); static_assert_size!(ProviderError, 48); diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index cc493e0fd3fc0..25e2f5710e47b 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -88,9 +88,6 @@ pub enum BlockExecutionError { /// Pruning error, transparently wrapping `PruneSegmentError` #[error(transparent)] Pruning(#[from] PruneSegmentError), - /// Error representing a provider error - #[error("provider error")] - ProviderError, /// Transaction error on revert with inner details #[error("transaction error on revert: {inner}")] CanonicalRevert { @@ -118,6 +115,9 @@ pub enum BlockExecutionError { /// Note: this is not feature gated for convenience. #[error("execution unavailable for tests")] UnavailableForTest, + /// Error when fetching latest block state. + #[error(transparent)] + LatestBlock(#[from] ProviderError), /// Optimism Block Executor Errors #[cfg(feature = "optimism")] @@ -141,6 +141,9 @@ pub enum OptimismBlockExecutionError { /// Thrown when a blob transaction is included in a sequencer's block. #[error("blob transaction included in sequencer block")] BlobTransactionRejected, + /// Thrown when a database account could not be loaded. + #[error("failed to load account {0}")] + AccountLoadFailed(reth_primitives::Address), } impl BlockExecutionError { diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs index 734fe898c56ba..f59da9abf1138 100644 --- a/crates/revm/src/optimism/processor.rs +++ b/crates/revm/src/optimism/processor.rs @@ -2,6 +2,7 @@ use crate::processor::{compare_receipts_root_and_logs_bloom, EVMProcessor}; use reth_interfaces::executor::{ BlockExecutionError, BlockValidationError, OptimismBlockExecutionError, }; + use reth_node_api::ConfigureEvm; use reth_primitives::{ proofs::calculate_receipt_root_optimism, revm_primitives::ResultAndState, BlockWithSenders, @@ -145,7 +146,11 @@ where .map(|acc| acc.account_info().unwrap_or_default()) }) .transpose() - .map_err(|_| BlockExecutionError::ProviderError)?; + .map_err(|_| { + BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::AccountLoadFailed(*sender), + ) + })?; // Execute transaction. let ResultAndState { result, state } = self.transact(transaction, *sender)?; From afa33a2a4683c26024ac3c8ea675a2a2fd4d6883 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 2 Apr 2024 14:01:24 +0200 Subject: [PATCH 030/700] fix: dont unwind genesis block in stage run (#7418) --- bin/reth/src/commands/stage/run.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index a055f5a4e0dcc..8887a7ae02e1c 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -157,7 +157,7 @@ impl Command { .await?; } - let batch_size = self.batch_size.unwrap_or(self.to - self.from + 1); + let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1); let etl_config = EtlConfig::new( Some( From a2d53a6f7b5e06b80fd44ec966c9c3b5b9f9a3d4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 2 Apr 2024 16:00:33 +0200 Subject: [PATCH 031/700] chore: move FullProvider trait to reth-provider (#7423) --- Cargo.lock | 1 + crates/node-api/Cargo.toml | 1 + crates/node-api/src/lib.rs | 3 +++ crates/node-builder/src/lib.rs | 1 - crates/node-builder/src/node.rs | 2 +- .../src/provider.rs => storage/provider/src/traits/full.rs} | 4 ++-- crates/storage/provider/src/traits/mod.rs | 3 +++ 7 files changed, 11 insertions(+), 4 deletions(-) rename crates/{node-builder/src/provider.rs => storage/provider/src/traits/full.rs} (97%) diff --git a/Cargo.lock b/Cargo.lock index f6e2268067504..5a6e5c80eb720 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6126,6 +6126,7 @@ version = "0.2.0-beta.4" dependencies = [ "reth-evm", "reth-primitives", + "reth-provider", "reth-rpc-types", "serde", "thiserror", diff --git a/crates/node-api/Cargo.toml b/crates/node-api/Cargo.toml index 0dbd20adee83f..2d8cf84ddcd30 100644 --- a/crates/node-api/Cargo.toml +++ b/crates/node-api/Cargo.toml @@ -15,6 +15,7 @@ workspace = true reth-primitives.workspace = true reth-rpc-types.workspace = true reth-evm.workspace = true +reth-provider.workspace = true # misc serde.workspace = true diff --git a/crates/node-api/src/lib.rs b/crates/node-api/src/lib.rs index d3e08fc9be914..15240e21b09f5 100644 --- a/crates/node-api/src/lib.rs +++ b/crates/node-api/src/lib.rs @@ -26,3 +26,6 @@ pub mod primitives; pub mod node; pub use node::NodeTypes; + +// re-export for convenience +pub use reth_provider::FullProvider; diff --git a/crates/node-builder/src/lib.rs b/crates/node-builder/src/lib.rs index 757e37f043e37..5dae4d96828bf 100644 --- a/crates/node-builder/src/lib.rs +++ b/crates/node-builder/src/lib.rs @@ -24,7 +24,6 @@ pub use builder::*; mod handle; pub use handle::NodeHandle; -pub mod provider; pub mod rpc; /// Support for installing the ExExs (execution extensions) in a node. diff --git a/crates/node-builder/src/node.rs b/crates/node-builder/src/node.rs index ccc765c8f5816..0cf6132690f8c 100644 --- a/crates/node-builder/src/node.rs +++ b/crates/node-builder/src/node.rs @@ -1,7 +1,7 @@ use crate::{ components::{ComponentsBuilder, FullNodeComponents}, - provider::FullProvider, rpc::{RethRpcServerHandles, RpcRegistry}, + FullProvider, }; use reth_db::database::Database; use reth_network::NetworkHandle; diff --git a/crates/node-builder/src/provider.rs b/crates/storage/provider/src/traits/full.rs similarity index 97% rename from crates/node-builder/src/provider.rs rename to crates/storage/provider/src/traits/full.rs index 62235d1a104cb..e73357f4a7cd4 100644 --- a/crates/node-builder/src/provider.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -1,10 +1,10 @@ //! Helper provider traits to encapsulate all provider traits for simplicity. -use reth_db::database::Database; -use reth_provider::{ +use crate::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, StateProviderFactory, }; +use reth_db::database::Database; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 1d841b025a398..7b64d09d556b2 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -77,3 +77,6 @@ pub use database_provider::DatabaseProviderFactory; mod stats; pub use stats::StatsReader; + +mod full; +pub use full::FullProvider; From bd2099a11218befe2898b5684bb27e11891cd9db Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 2 Apr 2024 16:37:53 +0200 Subject: [PATCH 032/700] fix: off by one trace_filter (#7422) --- crates/rpc/rpc/src/eth/api/transactions.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index f1016e475e7d9..67e80408690b1 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -445,6 +445,8 @@ pub trait EthTransactions: Send + Sync { /// transactions, in other words, it will stop executing transactions after the /// `highest_index`th transaction. /// + /// Note: This expect tx index to be 0-indexed, so the first transaction is at index 0. + /// /// This accepts a `inspector_setup` closure that returns the inspector to be used for tracing /// the transactions. async fn trace_block_until_with_inspector( @@ -1109,8 +1111,10 @@ where let base_fee = block_env.basefee.saturating_to::(); // prepare transactions, we do everything upfront to reduce time spent with open state - let max_transactions = - highest_index.map_or(block.body.len(), |highest| highest as usize); + let max_transactions = highest_index.map_or(block.body.len(), |highest| { + // we need + 1 because the index is 0-based + highest as usize + 1 + }); let mut results = Vec::with_capacity(max_transactions); let mut transactions = block From 41049af99a845fc191f8f744538e17945ed39daa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 2 Apr 2024 16:57:05 +0200 Subject: [PATCH 033/700] chore: move FullNodeTypes to node-api (#7425) --- Cargo.lock | 1 + crates/node-api/Cargo.toml | 1 + crates/node-api/src/lib.rs | 2 +- crates/node-api/src/node.rs | 55 ++++++++++++++++++ crates/node-builder/src/builder.rs | 4 +- crates/node-builder/src/components/builder.rs | 3 +- crates/node-builder/src/components/mod.rs | 2 +- crates/node-builder/src/components/network.rs | 2 +- crates/node-builder/src/components/payload.rs | 2 +- crates/node-builder/src/components/pool.rs | 2 +- crates/node-builder/src/components/traits.rs | 4 +- crates/node-builder/src/node.rs | 56 ++----------------- 12 files changed, 71 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a6e5c80eb720..c18802de01fe8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6124,6 +6124,7 @@ dependencies = [ name = "reth-node-api" version = "0.2.0-beta.4" dependencies = [ + "reth-db", "reth-evm", "reth-primitives", "reth-provider", diff --git a/crates/node-api/Cargo.toml b/crates/node-api/Cargo.toml index 2d8cf84ddcd30..323f730d923d4 100644 --- a/crates/node-api/Cargo.toml +++ b/crates/node-api/Cargo.toml @@ -16,6 +16,7 @@ reth-primitives.workspace = true reth-rpc-types.workspace = true reth-evm.workspace = true reth-provider.workspace = true +reth-db.workspace = true # misc serde.workspace = true diff --git a/crates/node-api/src/lib.rs b/crates/node-api/src/lib.rs index 15240e21b09f5..2f189732b60a7 100644 --- a/crates/node-api/src/lib.rs +++ b/crates/node-api/src/lib.rs @@ -25,7 +25,7 @@ pub use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; pub mod primitives; pub mod node; -pub use node::NodeTypes; +pub use node::*; // re-export for convenience pub use reth_provider::FullProvider; diff --git a/crates/node-api/src/node.rs b/crates/node-api/src/node.rs index e1a0038969549..d1ca897cb0cef 100644 --- a/crates/node-api/src/node.rs +++ b/crates/node-api/src/node.rs @@ -1,6 +1,9 @@ //! Traits for configuring a node use crate::{primitives::NodePrimitives, ConfigureEvm, EngineTypes}; +use reth_db::database::Database; +use reth_provider::FullProvider; +use std::marker::PhantomData; /// The type that configures the essential types of an ethereum like node. /// @@ -17,3 +20,55 @@ pub trait NodeTypes: Send + Sync + 'static { /// Returns the node's evm config. fn evm_config(&self) -> Self::Evm; } + +/// A helper type that is downstream of the [NodeTypes] trait and adds stateful components to the +/// node. +pub trait FullNodeTypes: NodeTypes + 'static { + /// Underlying database type. + type DB: Database + Clone + 'static; + /// The provider type used to interact with the node. + type Provider: FullProvider; +} + +/// An adapter type that adds the builtin provider type to the user configured node types. +#[derive(Debug)] +pub struct FullNodeTypesAdapter { + /// An instance of the user configured node types. + pub types: Types, + /// The database type used by the node. + pub db: PhantomData, + /// The provider type used by the node. + pub provider: PhantomData, +} + +impl FullNodeTypesAdapter { + /// Create a new adapter from the given node types. + pub fn new(types: Types) -> Self { + Self { types, db: Default::default(), provider: Default::default() } + } +} + +impl NodeTypes for FullNodeTypesAdapter +where + Types: NodeTypes, + DB: Send + Sync + 'static, + Provider: Send + Sync + 'static, +{ + type Primitives = Types::Primitives; + type Engine = Types::Engine; + type Evm = Types::Evm; + + fn evm_config(&self) -> Self::Evm { + self.types.evm_config() + } +} + +impl FullNodeTypes for FullNodeTypesAdapter +where + Types: NodeTypes, + Provider: FullProvider, + DB: Database + Clone + 'static, +{ + type DB = DB; + type Provider = Provider; +} diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 74a0564d6218e..72372d15e49b0 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -9,7 +9,7 @@ use crate::{ }, exex::{BoxedLaunchExEx, ExExContext}, hooks::NodeHooks, - node::{FullNode, FullNodeTypes, FullNodeTypesAdapter}, + node::FullNode, rpc::{RethRpcServerHandles, RpcContext, RpcHooks}, Node, NodeHandle, }; @@ -30,7 +30,7 @@ use reth_db::{ }; use reth_interfaces::p2p::either::EitherDownloader; use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; -use reth_node_api::NodeTypes; +use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethRpcConfig, RethTransactionPoolConfig}, dirs::{ChainPath, DataDirPath, MaybePlatformPath}, diff --git a/crates/node-builder/src/components/builder.rs b/crates/node-builder/src/components/builder.rs index c60a0204851b3..f145846035fc5 100644 --- a/crates/node-builder/src/components/builder.rs +++ b/crates/node-builder/src/components/builder.rs @@ -4,8 +4,7 @@ use crate::{ components::{ NetworkBuilder, NodeComponents, NodeComponentsBuilder, PayloadServiceBuilder, PoolBuilder, }, - node::FullNodeTypes, - BuilderContext, + BuilderContext, FullNodeTypes, }; use std::marker::PhantomData; diff --git a/crates/node-builder/src/components/mod.rs b/crates/node-builder/src/components/mod.rs index 9cb8f800de33c..c0d2bc8906d0c 100644 --- a/crates/node-builder/src/components/mod.rs +++ b/crates/node-builder/src/components/mod.rs @@ -7,7 +7,7 @@ //! //! Components depend on a fully type configured node: [FullNodeTypes](crate::node::FullNodeTypes). -use crate::node::FullNodeTypes; +use crate::FullNodeTypes; pub use builder::*; pub use network::*; pub use payload::*; diff --git a/crates/node-builder/src/components/network.rs b/crates/node-builder/src/components/network.rs index e6bda16c02fee..f899162205818 100644 --- a/crates/node-builder/src/components/network.rs +++ b/crates/node-builder/src/components/network.rs @@ -1,6 +1,6 @@ //! Network component for the node builder. -use crate::{node::FullNodeTypes, BuilderContext}; +use crate::{BuilderContext, FullNodeTypes}; use reth_network::NetworkHandle; use reth_transaction_pool::TransactionPool; use std::future::Future; diff --git a/crates/node-builder/src/components/payload.rs b/crates/node-builder/src/components/payload.rs index 7c50299bcfba9..bf5eb13eded79 100644 --- a/crates/node-builder/src/components/payload.rs +++ b/crates/node-builder/src/components/payload.rs @@ -1,6 +1,6 @@ //! Payload service component for the node builder. -use crate::{node::FullNodeTypes, BuilderContext}; +use crate::{BuilderContext, FullNodeTypes}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::TransactionPool; use std::future::Future; diff --git a/crates/node-builder/src/components/pool.rs b/crates/node-builder/src/components/pool.rs index 8a6bd62c81fa0..029079a99947c 100644 --- a/crates/node-builder/src/components/pool.rs +++ b/crates/node-builder/src/components/pool.rs @@ -1,5 +1,5 @@ //! Pool component for the node builder. -use crate::{node::FullNodeTypes, BuilderContext}; +use crate::{BuilderContext, FullNodeTypes}; use reth_transaction_pool::TransactionPool; use std::future::Future; diff --git a/crates/node-builder/src/components/traits.rs b/crates/node-builder/src/components/traits.rs index 1c2da88df5483..f73de22304518 100644 --- a/crates/node-builder/src/components/traits.rs +++ b/crates/node-builder/src/components/traits.rs @@ -1,8 +1,8 @@ //! Traits for the builder -use crate::{components::NodeComponents, node::FullNodeTypes, BuilderContext}; +use crate::{components::NodeComponents, BuilderContext}; use reth_network::NetworkHandle; -use reth_node_api::NodeTypes; +use reth_node_api::{FullNodeTypes, NodeTypes}; use reth_payload_builder::PayloadBuilderHandle; use reth_tasks::TaskExecutor; use reth_transaction_pool::TransactionPool; diff --git a/crates/node-builder/src/node.rs b/crates/node-builder/src/node.rs index 0cf6132690f8c..dd32e8c0caf97 100644 --- a/crates/node-builder/src/node.rs +++ b/crates/node-builder/src/node.rs @@ -1,11 +1,8 @@ use crate::{ components::{ComponentsBuilder, FullNodeComponents}, rpc::{RethRpcServerHandles, RpcRegistry}, - FullProvider, }; -use reth_db::database::Database; use reth_network::NetworkHandle; -pub use reth_node_api::NodeTypes; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, @@ -18,7 +15,10 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::ChainSpec; use reth_provider::ChainSpecProvider; use reth_tasks::TaskExecutor; -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; + +// re-export the node api types +pub use reth_node_api::{FullNodeTypes, NodeTypes}; /// A [Node] is a [NodeTypes] that comes with preconfigured components. /// @@ -37,54 +37,6 @@ pub trait Node: NodeTypes + Clone { ) -> ComponentsBuilder; } -/// A helper type that is downstream of the node types and adds stateful components to the node. -pub trait FullNodeTypes: NodeTypes + 'static { - /// Underlying database type. - type DB: Database + Clone + 'static; - /// The provider type used to interact with the node. - type Provider: FullProvider; -} - -/// An adapter type that adds the builtin provider type to the user configured node types. -#[derive(Debug)] -pub struct FullNodeTypesAdapter { - pub(crate) types: Types, - _db: PhantomData, - _provider: PhantomData, -} - -impl FullNodeTypesAdapter { - /// Create a new adapter from the given node types. - pub fn new(types: Types) -> Self { - Self { types, _db: Default::default(), _provider: Default::default() } - } -} - -impl NodeTypes for FullNodeTypesAdapter -where - Types: NodeTypes, - DB: Send + Sync + 'static, - Provider: Send + Sync + 'static, -{ - type Primitives = Types::Primitives; - type Engine = Types::Engine; - type Evm = Types::Evm; - - fn evm_config(&self) -> Self::Evm { - self.types.evm_config() - } -} - -impl FullNodeTypes for FullNodeTypesAdapter -where - Types: NodeTypes, - Provider: FullProvider, - DB: Database + Clone + 'static, -{ - type DB = DB; - type Provider = Provider; -} - /// The launched node with all components including RPC handlers. /// /// This can be used to interact with the launched node. From 112c8a3f1e1bcab1a1418e6150400012bc195548 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 2 Apr 2024 17:20:51 +0200 Subject: [PATCH 034/700] chore(node-core): unify stage progress log style (#7426) --- crates/node-core/src/events/node.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/node-core/src/events/node.rs b/crates/node-core/src/events/node.rs index 35d2c2b3ee912..c1a9c1b0f364e 100644 --- a/crates/node-core/src/events/node.rs +++ b/crates/node-core/src/events/node.rs @@ -167,8 +167,7 @@ impl NodeState { .and_then(|entities| entities.fmt_percentage()); let stage_eta = current_stage.eta.fmt_for_stage(stage_id); - let message = - if done { "Stage finished executing" } else { "Stage committed progress" }; + let message = if done { "Finished stage" } else { "Committed stage progress" }; match (stage_progress, stage_eta) { (Some(stage_progress), Some(stage_eta)) => { From b653b3de15fb9e13eeae4936eef68e357e1649bf Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 2 Apr 2024 18:59:38 +0200 Subject: [PATCH 035/700] fix(static-file): run producer only if passed non-empty targets (#7424) --- crates/static-file/src/static_file_producer.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index aa77ec1a9e2cc..2af4f8cac0651 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -124,6 +124,11 @@ impl StaticFileProducerInner { /// NOTE: it doesn't delete the data from database, and the actual deleting (aka pruning) logic /// lives in the `prune` crate. pub fn run(&mut self, targets: StaticFileTargets) -> StaticFileProducerResult { + // If there are no targets, do not produce any static files and return early + if !targets.any() { + return Ok(targets) + } + debug_assert!(targets.is_contiguous_to_highest_static_files( self.static_file_provider.get_highest_static_files() )); From f63cebbdb675a12da983e08611d8a6319a9a4085 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 2 Apr 2024 13:37:05 -0400 Subject: [PATCH 036/700] fix: reduce merkle clean_threshold to 5000 (#7364) --- book/run/config.md | 2 +- crates/config/src/config.rs | 2 +- crates/stages/src/stages/merkle.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/book/run/config.md b/book/run/config.md index ec559e901cce4..f2da90ea4f95f 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -179,7 +179,7 @@ The merkle stage uses the indexes built in the hashing stages (storage and accou # The threshold in number of blocks before the stage starts from scratch # and re-computes the state root, discarding the trie that has already been built, # as opposed to incrementally updating the trie. -clean_threshold = 50000 +clean_threshold = 5000 ``` ### `transaction_lookup` diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 7c43c27831f94..7ce947b508689 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -222,7 +222,7 @@ pub struct MerkleConfig { impl Default for MerkleConfig { fn default() -> Self { - Self { clean_threshold: 50_000 } + Self { clean_threshold: 5_000 } } } diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index cb61e99314561..e1d651169100f 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -21,7 +21,7 @@ use tracing::*; /// The default threshold (in number of blocks) for switching from incremental trie building /// of changes to whole rebuild. -pub const MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD: u64 = 50_000; +pub const MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD: u64 = 5_000; /// The merkle hashing stage uses input from /// [`AccountHashingStage`][crate::stages::AccountHashingStage] and From 16c76b6ce6beec6411a82168ad05b8c2217adb6f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 2 Apr 2024 19:44:21 +0200 Subject: [PATCH 037/700] fix: use modify specid instead (#7427) --- crates/revm/src/processor.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index c91257bb55910..198c80b041406 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -19,7 +19,7 @@ use revm::{ inspector_handle_register, interpreter::Host, primitives::{CfgEnvWithHandlerCfg, ResultAndState}, - Evm, Handler, State, StateBuilder, + Evm, State, StateBuilder, }; use std::{sync::Arc, time::Instant}; @@ -180,7 +180,9 @@ where total_difficulty, ); *self.evm.cfg_mut() = cfg.cfg_env; - self.evm.handler = Handler::new(cfg.handler_cfg); + + // This will update the spec in case it changed + self.evm.modify_spec_id(cfg.handler_cfg.spec_id); } /// Applies the pre-block call to the EIP-4788 beacon block root contract. From 3726cd17e88bcf567cf76eb683d8805faffbd1b7 Mon Sep 17 00:00:00 2001 From: Panagiotis Ganelis <50522617+PanGan21@users.noreply.github.com> Date: Tue, 2 Apr 2024 20:50:05 +0300 Subject: [PATCH 038/700] feat: use pipeline for `reth stage unwind` (#7085) Co-authored-by: joshieDo Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> Co-authored-by: Alexey Shekhirin --- bin/reth/src/commands/stage/unwind.rs | 197 ++++++++++++++++++++--- crates/primitives/src/static_file/mod.rs | 5 + crates/stages/src/pipeline/mod.rs | 2 +- crates/stages/src/stages/bodies.rs | 70 +++++--- crates/stages/src/stages/execution.rs | 25 ++- 5 files changed, 248 insertions(+), 51 deletions(-) diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 17847b16168b3..c7483870a8c7b 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -8,10 +8,37 @@ use crate::{ dirs::{DataDirPath, MaybePlatformPath}, }; use clap::{Parser, Subcommand}; -use reth_db::{cursor::DbCursorRO, database::Database, open_db, tables, transaction::DbTx}; -use reth_primitives::{BlockHashOrNumber, ChainSpec}; -use reth_provider::{BlockExecutionWriter, ProviderFactory}; +use reth_beacon_consensus::BeaconConsensus; +use reth_config::{Config, PruneConfig}; +use reth_db::{database::Database, open_db}; +use reth_downloaders::{ + bodies::bodies::BodiesDownloaderBuilder, + headers::reverse_headers::ReverseHeadersDownloaderBuilder, +}; +use reth_interfaces::consensus::Consensus; +use reth_node_core::{ + args::{get_secret_key, NetworkArgs}, + dirs::ChainPath, +}; +use reth_node_ethereum::EthEvmConfig; +use reth_primitives::{BlockHashOrNumber, ChainSpec, PruneModes, B256}; +use reth_provider::{ + BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, +}; +use reth_prune::PrunerBuilder; +use reth_stages::{ + sets::DefaultStages, + stages::{ + AccountHashingStage, ExecutionStage, ExecutionStageThresholds, IndexAccountHistoryStage, + IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, + TransactionLookupStage, + }, + Pipeline, StageSet, +}; +use reth_static_file::StaticFileProducer; use std::{ops::RangeInclusive, sync::Arc}; +use tokio::sync::watch; +use tracing::info; /// `reth stage unwind` command #[derive(Debug, Parser)] @@ -42,6 +69,9 @@ pub struct Command { #[command(flatten)] db: DatabaseArgs, + #[command(flatten)] + network: NetworkArgs, + #[command(subcommand)] command: Subcommands, } @@ -55,28 +85,150 @@ impl Command { if !db_path.exists() { eyre::bail!("Database {db_path:?} does not exist.") } + let config_path = data_dir.config_path(); + let config: Config = confy::load_path(config_path).unwrap_or_default(); - let db = open_db(db_path.as_ref(), self.db.database_args())?; - - let range = self.command.unwind_range(&db)?; + let db = Arc::new(open_db(db_path.as_ref(), self.db.database_args())?); + let provider_factory = + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + let range = self.command.unwind_range(provider_factory.clone())?; if *range.start() == 0 { eyre::bail!("Cannot unwind genesis block") } - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; - let provider = factory.provider_rw()?; + // Only execute a pipeline unwind if the start of the range overlaps the existing static + // files. If that's the case, then copy all available data from MDBX to static files, and + // only then, proceed with the unwind. + if let Some(highest_static_block) = provider_factory + .static_file_provider() + .get_highest_static_files() + .max() + .filter(|highest_static_file_block| highest_static_file_block >= range.start()) + { + info!(target: "reth::cli", ?range, ?highest_static_block, "Executing a pipeline unwind."); + let mut pipeline = + self.build_pipeline(data_dir, config, provider_factory.clone()).await?; - let blocks_and_execution = provider - .take_block_and_execution_range(&self.chain, range) - .map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?; + // Move all applicable data from database to static files. + pipeline.produce_static_files()?; - provider.commit()?; + // Run the pruner so we don't potentially end up with higher height in the database vs + // static files. + let mut pruner = PrunerBuilder::new(PruneConfig::default()) + .prune_delete_limit(usize::MAX) + .build(provider_factory); + pruner.run(*range.end())?; - println!("Unwound {} blocks", blocks_and_execution.len()); + pipeline.unwind((*range.start()).saturating_sub(1), None)?; + } else { + info!(target: "reth::cli", ?range, "Executing a database unwind."); + let provider = provider_factory.provider_rw()?; + + let _ = provider + .take_block_and_execution_range(&self.chain, range.clone()) + .map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?; + + provider.commit()?; + } + + println!("Unwound {} blocks", range.count()); Ok(()) } + + async fn build_pipeline( + self, + data_dir: ChainPath, + config: Config, + provider_factory: ProviderFactory>, + ) -> Result>, eyre::Error> { + // Even though we are not planning to download anything, we need to initialize Body and + // Header stage with a network client + let network_secret_path = + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + let p2p_secret_key = get_secret_key(&network_secret_path)?; + let default_peers_path = data_dir.known_peers_path(); + let network = self + .network + .network_config( + &config, + provider_factory.chain_spec(), + p2p_secret_key, + default_peers_path, + ) + .build(provider_factory.clone()) + .start_network() + .await?; + + let consensus: Arc = + Arc::new(BeaconConsensus::new(provider_factory.chain_spec())); + + // building network downloaders using the fetch client + let fetch_client = network.fetch_client().await?; + let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) + .build(fetch_client.clone(), Arc::clone(&consensus)); + let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies).build( + fetch_client, + Arc::clone(&consensus), + provider_factory.clone(), + ); + let stage_conf = &config.stages; + + let (tip_tx, tip_rx) = watch::channel(B256::ZERO); + let factory = reth_revm::EvmProcessorFactory::new( + provider_factory.chain_spec(), + EthEvmConfig::default(), + ); + + let header_mode = HeaderSyncMode::Tip(tip_rx); + let pipeline = Pipeline::builder() + .with_tip_sender(tip_tx) + .add_stages( + DefaultStages::new( + provider_factory.clone(), + header_mode, + Arc::clone(&consensus), + header_downloader, + body_downloader, + factory.clone(), + stage_conf.etl.clone(), + ) + .set(SenderRecoveryStage { + commit_threshold: stage_conf.sender_recovery.commit_threshold, + }) + .set(ExecutionStage::new( + factory, + ExecutionStageThresholds { + max_blocks: None, + max_changes: None, + max_cumulative_gas: None, + max_duration: None, + }, + stage_conf + .merkle + .clean_threshold + .max(stage_conf.account_hashing.clean_threshold) + .max(stage_conf.storage_hashing.clean_threshold), + config.prune.clone().map(|prune| prune.segments).unwrap_or_default(), + )) + .set(AccountHashingStage::default()) + .set(StorageHashingStage::default()) + .set(MerkleStage::default_unwind()) + .set(TransactionLookupStage::default()) + .set(IndexAccountHistoryStage::default()) + .set(IndexStorageHistoryStage::default()), + ) + .build( + provider_factory.clone(), + StaticFileProducer::new( + provider_factory.clone(), + provider_factory.static_file_provider(), + PruneModes::default(), + ), + ); + Ok(pipeline) + } } /// `reth stage unwind` subcommand @@ -94,21 +246,22 @@ impl Subcommands { /// Returns the block range to unwind. /// /// This returns an inclusive range: [target..=latest] - fn unwind_range(&self, db: DB) -> eyre::Result> { - let tx = db.tx()?; - let mut cursor = tx.cursor_read::()?; - let last = cursor.last()?.ok_or_else(|| eyre::eyre!("No blocks in database"))?; - + fn unwind_range( + &self, + factory: ProviderFactory, + ) -> eyre::Result> { + let provider = factory.provider()?; + let last = provider.last_block_number()?; let target = match self { Subcommands::ToBlock { target } => match target { - BlockHashOrNumber::Hash(hash) => tx - .get::(*hash)? + BlockHashOrNumber::Hash(hash) => provider + .block_number(*hash)? .ok_or_else(|| eyre::eyre!("Block hash not found in database: {hash:?}"))?, BlockHashOrNumber::Number(num) => *num, }, - Subcommands::NumBlocks { amount } => last.0.saturating_sub(*amount), + Subcommands::NumBlocks { amount } => last.saturating_sub(*amount), } + 1; - Ok(target..=last.0) + Ok(target..=last) } } diff --git a/crates/primitives/src/static_file/mod.rs b/crates/primitives/src/static_file/mod.rs index fe15bd1c759ab..e7e9e47fd2588 100644 --- a/crates/primitives/src/static_file/mod.rs +++ b/crates/primitives/src/static_file/mod.rs @@ -44,6 +44,11 @@ impl HighestStaticFiles { StaticFileSegment::Receipts => &mut self.receipts, } } + + /// Returns the maximum block of all segments. + pub fn max(&self) -> Option { + [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).max() + } } /// Each static file has a fixed number of blocks. This gives out the range where the requested diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index e4ad70fac86b1..eb1f40cbd17c2 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -232,7 +232,7 @@ where /// /// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the /// lock is occupied. - fn produce_static_files(&mut self) -> RethResult<()> { + pub fn produce_static_files(&mut self) -> RethResult<()> { let mut static_file_producer = self.static_file_producer.lock(); let provider = self.provider_factory.provider()?; diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index 8d4519af2c7e9..b52274b1e3dfb 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -13,9 +13,12 @@ use reth_interfaces::{ }; use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, - StaticFileSegment, + StaticFileSegment, TxNumber, +}; +use reth_provider::{ + providers::{StaticFileProvider, StaticFileWriter}, + BlockReader, DatabaseProviderRW, HeaderProvider, ProviderError, StatsReader, }; -use reth_provider::{providers::StaticFileWriter, DatabaseProviderRW, HeaderProvider, StatsReader}; use std::{ cmp::Ordering, task::{ready, Context, Poll}, @@ -145,17 +148,11 @@ impl Stage for BodyStage { // error will trigger an unwind, that will bring the database to the same height as the // static files. Ordering::Less => { - let last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Transactions) - .unwrap_or_default(); - - let missing_block = - Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - return Err(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Transactions, - }) + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + static_file_provider, + provider, + )?) } Ordering::Equal => {} } @@ -311,17 +308,11 @@ impl Stage for BodyStage { // If there are more transactions on database, then we are missing static file data and we // need to unwind further. if db_tx_num > static_file_tx_num { - let last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Transactions) - .unwrap_or_default(); - - let missing_block = - Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - return Err(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Transactions, - }) + return Err(missing_static_data_error( + static_file_tx_num, + static_file_provider, + provider, + )?) } // Unwinds static file @@ -335,6 +326,37 @@ impl Stage for BodyStage { } } +fn missing_static_data_error( + last_tx_num: TxNumber, + static_file_provider: &StaticFileProvider, + provider: &DatabaseProviderRW, +) -> Result { + let mut last_block = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Transactions) + .unwrap_or_default(); + + // To be extra safe, we make sure that the last tx num matches the last block from its indices. + // If not, get it. + loop { + if let Some(indices) = provider.block_body_indices(last_block)? { + if indices.last_tx_num() <= last_tx_num { + break + } + } + if last_block == 0 { + break + } + last_block -= 1; + } + + let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); + + Ok(StageError::MissingStaticFileData { + block: missing_block, + segment: StaticFileSegment::Transactions, + }) +} + // TODO(alexey): ideally, we want to measure Bodies stage progress in bytes, but it's hard to know // beforehand how many bytes we need to download. So the good solution would be to measure the // progress in gas as a proxy to size. Execution stage uses a similar approach. diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 95f5cc2d45fcf..81aef2ad3aa2b 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -578,13 +578,30 @@ where start_block.saturating_sub(1), )?, Ordering::Less => { - let last_block = static_file_provider + let mut last_block = static_file_provider .get_highest_static_file_block(StaticFileSegment::Receipts) .unwrap_or(0); - let missing_block = Box::new( - tx.get::(last_block + 1)?.unwrap_or_default().seal_slow(), - ); + let last_receipt_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .unwrap_or(0); + + // To be extra safe, we make sure that the last receipt num matches the last block from + // its indices. If not, get it. + loop { + if let Some(indices) = provider.block_body_indices(last_block)? { + if indices.last_tx_num() <= last_receipt_num { + break + } + } + if last_block == 0 { + break + } + last_block -= 1; + } + + let missing_block = + Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); return Err(StageError::MissingStaticFileData { block: missing_block, From ebc4bc893cc0f87ab7121e9a6bdd898472300473 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 2 Apr 2024 21:42:20 +0200 Subject: [PATCH 039/700] feat(discv5): add crate for interfacing reth network and sigp/discv5 (#7336) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 407 +++++++++++++- Cargo.toml | 4 +- crates/net/discv5/Cargo.toml | 45 ++ crates/net/discv5/README.md | 3 + crates/net/discv5/src/config.rs | 326 ++++++++++++ crates/net/discv5/src/enr.rs | 113 ++++ crates/net/discv5/src/error.rs | 38 ++ crates/net/discv5/src/filter.rs | 123 +++++ crates/net/discv5/src/lib.rs | 788 ++++++++++++++++++++++++++++ crates/net/discv5/src/metrics.rs | 117 +++++ crates/primitives/src/chain/spec.rs | 24 + crates/primitives/src/lib.rs | 5 +- crates/primitives/src/net.rs | 2 +- crates/rpc/rpc-types/src/net.rs | 31 +- crates/rpc/rpc-types/src/peer.rs | 5 + deny.toml | 1 + 16 files changed, 2012 insertions(+), 20 deletions(-) create mode 100644 crates/net/discv5/Cargo.toml create mode 100644 crates/net/discv5/README.md create mode 100644 crates/net/discv5/src/config.rs create mode 100644 crates/net/discv5/src/enr.rs create mode 100644 crates/net/discv5/src/error.rs create mode 100644 crates/net/discv5/src/filter.rs create mode 100644 crates/net/discv5/src/lib.rs create mode 100644 crates/net/discv5/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index c18802de01fe8..9a7f4d4bc61c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -151,7 +151,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "sha2", + "sha2 0.10.8", ] [[package]] @@ -612,12 +612,24 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "arrayref" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" + [[package]] name = "arrayvec" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "asn1_der" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" + [[package]] name = "assert_matches" version = "1.5.0" @@ -783,6 +795,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + [[package]] name = "base16ct" version = "0.2.0" @@ -1110,6 +1128,15 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "tinyvec", +] + [[package]] name = "bstr" version = "0.2.17" @@ -1487,6 +1514,15 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + [[package]] name = "cpp_demangle" version = "0.4.3" @@ -1654,6 +1690,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array", + "subtle", +] + [[package]] name = "ctr" version = "0.7.0" @@ -1934,6 +1980,26 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +[[package]] +name = "data-encoding-macro" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" +dependencies = [ + "data-encoding", + "syn 1.0.109", +] + [[package]] name = "debug-helper" version = "0.3.13" @@ -2114,8 +2180,7 @@ dependencies = [ [[package]] name = "discv5" version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac33cb3f99889a57e56a8c6ccb77aaf0cfc7787602b7af09783f736d77314e1" +source = "git+https://github.com/sigp/discv5?rev=04ac004#04ac0042a345a9edf93b090007e5d31c008261ed" dependencies = [ "aes 0.7.5", "aes-gcm", @@ -2128,6 +2193,7 @@ dependencies = [ "hex", "hkdf", "lazy_static", + "libp2p", "lru", "more-asserts", "parking_lot 0.11.2", @@ -2222,7 +2288,7 @@ dependencies = [ "ed25519", "rand_core 0.6.4", "serde", - "sha2", + "sha2 0.10.8", "subtle", "zeroize", ] @@ -2724,6 +2790,7 @@ dependencies = [ "futures-core", "futures-task", "futures-util", + "num_cpus", ] [[package]] @@ -3064,7 +3131,17 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ - "hmac", + "hmac 0.12.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac", + "digest 0.9.0", ] [[package]] @@ -3076,6 +3153,17 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array", + "hmac 0.8.1", +] + [[package]] name = "hostname" version = "0.3.1" @@ -3835,7 +3923,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", - "sha2", + "sha2 0.10.8", "signature", ] @@ -3914,6 +4002,122 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +[[package]] +name = "libp2p" +version = "0.53.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "681fb3f183edfbedd7a57d32ebe5dcdc0b9f94061185acf3c30249349cc6fc99" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom 0.2.12", + "instant", + "libp2p-allow-block-list", + "libp2p-connection-limits", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "multiaddr", + "pin-project", + "rw-stream-sink", + "thiserror", +] + +[[package]] +name = "libp2p-allow-block-list" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" +dependencies = [ + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "void", +] + +[[package]] +name = "libp2p-connection-limits" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" +dependencies = [ + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "void", +] + +[[package]] +name = "libp2p-core" +version = "0.41.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8130a8269e65a2554d55131c770bdf4bcd94d2b8d4efb24ca23699be65066c05" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-identity", + "multiaddr", + "multihash", + "multistream-select", + "once_cell", + "parking_lot 0.12.1", + "pin-project", + "quick-protobuf", + "rand 0.8.5", + "rw-stream-sink", + "smallvec", + "thiserror", + "tracing", + "unsigned-varint 0.8.0", + "void", +] + +[[package]] +name = "libp2p-identity" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" +dependencies = [ + "asn1_der", + "bs58", + "ed25519-dalek", + "hkdf", + "libsecp256k1", + "multihash", + "quick-protobuf", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror", + "tracing", + "zeroize", +] + +[[package]] +name = "libp2p-swarm" +version = "0.44.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-core", + "libp2p-identity", + "multistream-select", + "once_cell", + "rand 0.8.5", + "smallvec", + "tracing", + "void", +] + [[package]] name = "libproc" version = "0.14.6" @@ -3936,6 +4140,54 @@ dependencies = [ "redox_syscall 0.4.1", ] +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64 0.13.1", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -4273,6 +4525,60 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" +[[package]] +name = "multiaddr" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "libp2p-identity", + "multibase", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.2", + "url", +] + +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +dependencies = [ + "core2", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "multistream-select" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project", + "smallvec", + "unsigned-varint 0.7.2", +] + [[package]] name = "nibble_vec" version = "0.1.0" @@ -5132,6 +5438,15 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + [[package]] name = "quick-xml" version = "0.26.0" @@ -5724,6 +6039,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-discv5" +version = "0.2.0-beta.4" +dependencies = [ + "alloy-rlp", + "derive_more", + "discv5", + "enr", + "futures", + "itertools 0.12.1", + "libp2p-identity", + "metrics", + "multiaddr", + "rand 0.8.5", + "reth-metrics", + "reth-primitives", + "reth-tracing", + "rlp", + "secp256k1 0.27.0", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "reth-dns-discovery" version = "0.2.0-beta.4" @@ -5791,13 +6130,13 @@ dependencies = [ "educe", "futures", "generic-array", - "hmac", + "hmac 0.12.1", "pin-project", "rand 0.8.5", "reth-net-common", "reth-primitives", "secp256k1 0.27.0", - "sha2", + "sha2 0.10.8", "sha3", "thiserror", "tokio", @@ -6293,7 +6632,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-transaction-pool", "revm", - "sha2", + "sha2 0.10.8", "thiserror", "tracing", ] @@ -6316,7 +6655,7 @@ dependencies = [ "revm", "revm-primitives", "serde_json", - "sha2", + "sha2 0.10.8", "thiserror", "tokio", "tokio-stream", @@ -6374,7 +6713,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "sha2", + "sha2 0.10.8", "strum 0.26.2", "sucds", "tempfile", @@ -6881,7 +7220,7 @@ dependencies = [ "revm-primitives", "ripemd", "secp256k1 0.28.2", - "sha2", + "sha2 0.10.8", "substrate-bn", ] @@ -6912,7 +7251,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" dependencies = [ - "hmac", + "hmac 0.12.1", "subtle", ] @@ -7175,6 +7514,17 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "rw-stream-sink" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" +dependencies = [ + "futures", + "pin-project", + "static_assertions", +] + [[package]] name = "ryu" version = "1.0.17" @@ -7523,6 +7873,19 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + [[package]] name = "sha2" version = "0.10.8" @@ -8775,6 +9138,18 @@ dependencies = [ "subtle", ] +[[package]] +name = "unsigned-varint" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" + [[package]] name = "untrusted" version = "0.7.1" @@ -8858,6 +9233,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + [[package]] name = "wait-timeout" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 492a5f85b8de2..52251dff289da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ members = [ "crates/metrics/metrics-derive/", "crates/net/common/", "crates/net/discv4/", + "crates/net/discv5/", "crates/net/dns/", "crates/net/downloaders/", "crates/net/ecies/", @@ -199,6 +200,7 @@ reth-config = { path = "crates/config" } reth-consensus-common = { path = "crates/consensus/common" } reth-db = { path = "crates/storage/db" } reth-discv4 = { path = "crates/net/discv4" } +reth-discv5 = { path = "crates/net/discv5" } reth-dns-discovery = { path = "crates/net/dns" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } @@ -320,7 +322,7 @@ tower = "0.4" tower-http = "0.4" # p2p -discv5 = "0.4" +discv5 = { git = "https://github.com/sigp/discv5", rev = "04ac004" } igd-next = "0.14.3" # rpc diff --git a/crates/net/discv5/Cargo.toml b/crates/net/discv5/Cargo.toml new file mode 100644 index 0000000000000..03b856be9a00f --- /dev/null +++ b/crates/net/discv5/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "reth-discv5" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Ethereum peer discovery V5" + +[lints] +workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true +reth-metrics.workspace = true + +# ethereum +alloy-rlp.workspace = true +rlp = "0.5.2" +discv5 = { workspace = true, features = ["libp2p"] } +enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } +multiaddr = { version = "0.18", default-features = false } +libp2p-identity = "0.2" +secp256k1.workspace = true + +# async/futures +tokio.workspace = true +futures.workspace = true + +# io +rand.workspace = true + +# misc +derive_more.workspace = true +tracing.workspace = true +thiserror.workspace = true +itertools.workspace = true +metrics.workspace = true + +[dev-dependencies] +reth-tracing.workspace = true +tokio = { workspace = true, features = ["rt-multi-thread"] } +secp256k1 = { workspace = true, features = ["rand-std"] } diff --git a/crates/net/discv5/README.md b/crates/net/discv5/README.md new file mode 100644 index 0000000000000..03c031e7924dc --- /dev/null +++ b/crates/net/discv5/README.md @@ -0,0 +1,3 @@ +# Discv5 + +Thin wrapper around sigp/discv5. \ No newline at end of file diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs new file mode 100644 index 0000000000000..1fe25c482da97 --- /dev/null +++ b/crates/net/discv5/src/config.rs @@ -0,0 +1,326 @@ +//! Wrapper around [`discv5::Config`]. + +use std::{ + collections::HashSet, + net::{IpAddr, SocketAddr}, +}; + +use derive_more::Display; +use discv5::ListenConfig; +use multiaddr::{Multiaddr, Protocol}; +use reth_primitives::{Bytes, ForkId, NodeRecord, MAINNET}; + +use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys}; + +/// L1 EL +pub const ETH: &[u8] = b"eth"; +/// L1 CL +pub const ETH2: &[u8] = b"eth2"; +/// Optimism +pub const OPSTACK: &[u8] = b"opstack"; + +/// Default interval in seconds at which to run a self-lookup up query. +/// +/// Default is 60 seconds. +const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; + +/// Optimism mainnet and base mainnet boot nodes. +const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &["enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301"]; + +/// Optimism sepolia and base sepolia boot nodes. +const BOOT_NODES_OP_SEPOLIA_AND_BASE_SEPOLIA: &[&str] = &["enode://09d1a6110757b95628cc54ab6cc50a29773075ed00e3a25bd9388807c9a6c007664e88646a6fefd82baad5d8374ba555e426e8aed93f0f0c517e2eb5d929b2a2@34.65.21.188:30304?discport=30303"]; + +/// Builds a [`Config`]. +#[derive(Debug, Default)] +pub struct ConfigBuilder { + /// Config used by [`discv5::Discv5`]. Contains the discovery listen socket. + discv5_config: Option, + /// Nodes to boot from. + bootstrap_nodes: HashSet, + /// [`ForkId`] to set in local node record. + fork: Option<(&'static [u8], ForkId)>, + /// RLPx TCP port to advertise. Note: so long as `reth_network` handles [`NodeRecord`]s as + /// opposed to [`Enr`](enr::Enr)s, TCP is limited to same IP address as UDP, since + /// [`NodeRecord`] doesn't supply an extra field for and alternative TCP address. + tcp_port: u16, + /// Additional kv-pairs that should be advertised to peers by including in local node record. + other_enr_data: Vec<(&'static str, Bytes)>, + /// Interval in seconds at which to run a lookup up query to populate kbuckets. + lookup_interval: Option, + /// Custom filter rules to apply to a discovered peer in order to determine if it should be + /// passed up to rlpx or dropped. + discovered_peer_filter: Option, +} + +impl ConfigBuilder { + /// Returns a new builder, with all fields set like given instance. + pub fn new_from(discv5_config: Config) -> Self { + let Config { + discv5_config, + bootstrap_nodes, + fork: fork_id, + tcp_port, + other_enr_data, + lookup_interval, + discovered_peer_filter, + } = discv5_config; + + Self { + discv5_config: Some(discv5_config), + bootstrap_nodes, + fork: Some(fork_id), + tcp_port, + other_enr_data, + lookup_interval: Some(lookup_interval), + discovered_peer_filter: Some(discovered_peer_filter), + } + } + + /// Set [`discv5::Config`], which contains the [`discv5::Discv5`] listen socket. + pub fn discv5_config(mut self, discv5_config: discv5::Config) -> Self { + self.discv5_config = Some(discv5_config); + self + } + + /// Adds multiple boot nodes from a list of [`Enr`](discv5::Enr)s. + pub fn add_signed_boot_nodes(mut self, nodes: impl IntoIterator) -> Self { + self.bootstrap_nodes.extend(nodes.into_iter().map(BootNode::Enr)); + self + } + + /// Parses a comma-separated list of serialized [`Enr`](discv5::Enr)s, signed node records, and + /// adds any successfully deserialized records to boot nodes. Note: this type is serialized in + /// CL format since [`discv5`] is originally a CL library. + pub fn add_cl_serialized_signed_boot_nodes(mut self, enrs: &str) -> Self { + let bootstrap_nodes = &mut self.bootstrap_nodes; + for node in enrs.split(&[',']).flat_map(|record| record.trim().parse::()) { + bootstrap_nodes.insert(BootNode::Enr(node)); + } + self + } + + /// Adds boot nodes in the form a list of [`NodeRecord`]s, parsed enodes. + pub fn add_unsigned_boot_nodes(mut self, enodes: Vec) -> Self { + for node in enodes { + if let Ok(node) = BootNode::from_unsigned(node) { + self.bootstrap_nodes.insert(node); + } + } + + self + } + + /// Adds a comma-separated list of enodes, serialized unsigned node records, to boot nodes. + pub fn add_serialized_unsigned_boot_nodes(mut self, enodes: &[&str]) -> Self { + for node in enodes { + if let Ok(node) = node.parse() { + if let Ok(node) = BootNode::from_unsigned(node) { + self.bootstrap_nodes.insert(node); + } + } + } + + self + } + + /// Add optimism mainnet boot nodes. + pub fn add_optimism_mainnet_boot_nodes(self) -> Self { + self.add_serialized_unsigned_boot_nodes(BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET) + } + + /// Add optimism sepolia boot nodes. + pub fn add_optimism_sepolia_boot_nodes(self) -> Self { + self.add_serialized_unsigned_boot_nodes(BOOT_NODES_OP_SEPOLIA_AND_BASE_SEPOLIA) + } + + /// Set [`ForkId`], and key used to identify it, to set in local [`Enr`](discv5::enr::Enr). + pub fn fork(mut self, key: &'static [u8], value: ForkId) -> Self { + self.fork = Some((key, value)); + self + } + + /// Sets the tcp port to advertise in the local [`Enr`](discv5::enr::Enr). + fn tcp_port(mut self, port: u16) -> Self { + self.tcp_port = port; + self + } + + /// Adds an additional kv-pair to include in the local [`Enr`](discv5::enr::Enr). + pub fn add_enr_kv_pair(mut self, kv_pair: (&'static str, Bytes)) -> Self { + self.other_enr_data.push(kv_pair); + self + } + + /// Adds keys to disallow when filtering a discovered peer, to determine whether or not it + /// should be passed to rlpx. The discovered node record is scanned for any kv-pairs where the + /// key matches the disallowed keys. If not explicitly set, b"eth2" key will be disallowed. + pub fn must_not_include_keys(mut self, not_keys: &[&'static [u8]]) -> Self { + let mut filter = self.discovered_peer_filter.unwrap_or_default(); + filter.add_disallowed_keys(not_keys); + self.discovered_peer_filter = Some(filter); + self + } + + /// Returns a new [`Config`]. + pub fn build(self) -> Config { + let Self { + discv5_config, + bootstrap_nodes, + fork, + tcp_port, + other_enr_data, + lookup_interval, + discovered_peer_filter, + } = self; + + let discv5_config = discv5_config + .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); + + let fork = fork.unwrap_or((ETH, MAINNET.latest_fork_id())); + + let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); + + let discovered_peer_filter = + discovered_peer_filter.unwrap_or_else(|| MustNotIncludeKeys::new(&[ETH2])); + + Config { + discv5_config, + bootstrap_nodes, + fork, + tcp_port, + other_enr_data, + lookup_interval, + discovered_peer_filter, + } + } +} + +/// Config used to bootstrap [`discv5::Discv5`]. +#[derive(Debug)] +pub struct Config { + /// Config used by [`discv5::Discv5`]. Contains the [`ListenConfig`], with the discovery listen + /// socket. + pub(super) discv5_config: discv5::Config, + /// Nodes to boot from. + pub(super) bootstrap_nodes: HashSet, + /// [`ForkId`] to set in local node record. + pub(super) fork: (&'static [u8], ForkId), + /// RLPx TCP port to advertise. + pub(super) tcp_port: u16, + /// Additional kv-pairs to include in local node record. + pub(super) other_enr_data: Vec<(&'static str, Bytes)>, + /// Interval in seconds at which to run a lookup up query with to populate kbuckets. + pub(super) lookup_interval: u64, + /// Custom filter rules to apply to a discovered peer in order to determine if it should be + /// passed up to rlpx or dropped. + pub(super) discovered_peer_filter: MustNotIncludeKeys, +} + +impl Config { + /// Returns a new [`ConfigBuilder`], with the RLPx TCP port set to the given port. + pub fn builder(rlpx_tcp_port: u16) -> ConfigBuilder { + ConfigBuilder::default().tcp_port(rlpx_tcp_port) + } +} + +impl Config { + /// Returns the discovery (UDP) socket contained in the [`discv5::Config`]. Returns the IPv6 + /// socket, if both IPv4 and v6 are configured. This socket will be advertised to peers in the + /// local [`Enr`](discv5::enr::Enr). + pub fn discovery_socket(&self) -> SocketAddr { + match self.discv5_config.listen_config { + ListenConfig::Ipv4 { ip, port } => (ip, port).into(), + ListenConfig::Ipv6 { ip, port } => (ip, port).into(), + ListenConfig::DualStack { ipv6, ipv6_port, .. } => (ipv6, ipv6_port).into(), + } + } + + /// Returns the RLPx (TCP) socket contained in the [`discv5::Config`]. This socket will be + /// advertised to peers in the local [`Enr`](discv5::enr::Enr). + pub fn rlpx_socket(&self) -> SocketAddr { + let port = self.tcp_port; + match self.discv5_config.listen_config { + ListenConfig::Ipv4 { ip, .. } => (ip, port).into(), + ListenConfig::Ipv6 { ip, .. } => (ip, port).into(), + ListenConfig::DualStack { ipv4, .. } => (ipv4, port).into(), + } + } +} + +/// A boot node can be added either as a string in either 'enode' URL scheme or serialized from +/// [`Enr`](discv5::Enr) type. +#[derive(Debug, PartialEq, Eq, Hash, Display)] +pub enum BootNode { + /// An unsigned node record. + #[display(fmt = "{_0}")] + Enode(Multiaddr), + /// A signed node record. + #[display(fmt = "{_0:?}")] + Enr(discv5::Enr), +} + +impl BootNode { + /// Parses a [`NodeRecord`] and serializes according to CL format. Note: [`discv5`] is + /// originally a CL library hence needs this format to add the node. + pub fn from_unsigned(node_record: NodeRecord) -> Result { + let NodeRecord { address, udp_port, id, .. } = node_record; + let mut multi_address = Multiaddr::empty(); + match address { + IpAddr::V4(ip) => multi_address.push(Protocol::Ip4(ip)), + IpAddr::V6(ip) => multi_address.push(Protocol::Ip6(ip)), + } + + multi_address.push(Protocol::Udp(udp_port)); + let id = discv4_id_to_multiaddr_id(id)?; + multi_address.push(Protocol::P2p(id)); + + Ok(Self::Enode(multi_address)) + } +} + +#[cfg(test)] +mod test { + use std::net::SocketAddrV4; + + use reth_primitives::hex; + + use super::*; + + const MULTI_ADDRESSES: &str = "/ip4/184.72.129.189/udp/30301/p2p/16Uiu2HAmSG2hdLwyQHQmG4bcJBgD64xnW63WMTLcrNq6KoZREfGb,/ip4/3.231.11.52/udp/30301/p2p/16Uiu2HAmMy4V8bi3XP7KDfSLQcLACSvTLroRRwEsTyFUKo8NCkkp,/ip4/54.198.153.150/udp/30301/p2p/16Uiu2HAmSVsb7MbRf1jg3Dvd6a3n5YNqKQwn1fqHCFgnbqCsFZKe,/ip4/3.220.145.177/udp/30301/p2p/16Uiu2HAm74pBDGdQ84XCZK27GRQbGFFwQ7RsSqsPwcGmCR3Cwn3B,/ip4/3.231.138.188/udp/30301/p2p/16Uiu2HAmMnTiJwgFtSVGV14ZNpwAvS1LUoF4pWWeNtURuV6C3zYB"; + + #[test] + fn parse_boot_nodes() { + const OP_SEPOLIA_CL_BOOTNODES: &str ="enr:-J64QBwRIWAco7lv6jImSOjPU_W266lHXzpAS5YOh7WmgTyBZkgLgOwo_mxKJq3wz2XRbsoBItbv1dCyjIoNq67mFguGAYrTxM42gmlkgnY0gmlwhBLSsHKHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDmoWSi8hcsRpQf2eJsNUx-sqv6fH4btmo2HsAzZFAKnKDdGNwgiQGg3VkcIIkBg,enr:-J64QFa3qMsONLGphfjEkeYyF6Jkil_jCuJmm7_a42ckZeUQGLVzrzstZNb1dgBp1GGx9bzImq5VxJLP-BaptZThGiWGAYrTytOvgmlkgnY0gmlwhGsV-zeHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDahfSECTIS_cXyZ8IyNf4leANlZnrsMEWTkEYxf4GMCmDdGNwgiQGg3VkcIIkBg"; + + let config = Config::builder(30303) + .add_cl_serialized_signed_boot_nodes(OP_SEPOLIA_CL_BOOTNODES) + .build(); + + let socket_1 = "18.210.176.114:9222".parse::().unwrap(); + let socket_2 = "107.21.251.55:9222".parse::().unwrap(); + + for node in config.bootstrap_nodes { + let BootNode::Enr(node) = node else { panic!() }; + assert!( + socket_1 == node.udp4_socket().unwrap() && socket_1 == node.tcp4_socket().unwrap() || + socket_2 == node.udp4_socket().unwrap() && + socket_2 == node.tcp4_socket().unwrap() + ); + assert_eq!("84b4940500", hex::encode(node.get_raw_rlp("opstack").unwrap())); + } + } + + #[test] + fn parse_enodes() { + let config = Config::builder(30303) + .add_serialized_unsigned_boot_nodes(BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET) + .build(); + + let bootstrap_nodes = + config.bootstrap_nodes.into_iter().map(|node| format!("{node}")).collect::>(); + + for node in MULTI_ADDRESSES.split(&[',']) { + assert!(bootstrap_nodes.contains(&node.to_string())); + } + } +} diff --git a/crates/net/discv5/src/enr.rs b/crates/net/discv5/src/enr.rs new file mode 100644 index 0000000000000..51323b040a956 --- /dev/null +++ b/crates/net/discv5/src/enr.rs @@ -0,0 +1,113 @@ +//! Interface between node identification on protocol version 5 and 4. Specifically, between types +//! [`discv5::enr::NodeId`] and [`PeerId`]. + +use discv5::enr::{CombinedPublicKey, Enr, EnrPublicKey, NodeId}; +use reth_primitives::{id2pk, pk2id, PeerId}; +use secp256k1::{PublicKey, SecretKey}; + +/// Extracts a [`CombinedPublicKey::Secp256k1`] from a [`discv5::Enr`] and converts it to a +/// [`PeerId`]. Note: conversion from discv5 ID to discv4 ID is not possible. +pub fn enr_to_discv4_id(enr: &discv5::Enr) -> Option { + let pk = enr.public_key(); + if !matches!(pk, CombinedPublicKey::Secp256k1(_)) { + return None + } + + let pk = PublicKey::from_slice(&pk.encode()).unwrap(); + + Some(pk2id(&pk)) +} + +/// Converts a [`PeerId`] to a [`discv5::enr::NodeId`]. +pub fn discv4_id_to_discv5_id(peer_id: PeerId) -> Result { + Ok(id2pk(peer_id)?.into()) +} + +/// Converts a [`PeerId`] to a [`libp2p_identity::PeerId `]. +pub fn discv4_id_to_multiaddr_id( + peer_id: PeerId, +) -> Result { + let pk = id2pk(peer_id)?.encode(); + let pk: libp2p_identity::PublicKey = + libp2p_identity::secp256k1::PublicKey::try_from_bytes(&pk).unwrap().into(); + + Ok(pk.to_peer_id()) +} + +/// Wrapper around [`discv5::Enr`] ([`Enr`]). +#[derive(Debug, Clone)] +pub struct EnrCombinedKeyWrapper(pub discv5::Enr); + +impl From> for EnrCombinedKeyWrapper { + fn from(value: Enr) -> Self { + let encoded_enr = rlp::encode(&value); + let enr = rlp::decode::(&encoded_enr).unwrap(); + + Self(enr) + } +} + +impl From for Enr { + fn from(val: EnrCombinedKeyWrapper) -> Self { + let EnrCombinedKeyWrapper(enr) = val; + let encoded_enr = rlp::encode(&enr); + + rlp::decode::>(&encoded_enr).unwrap() + } +} + +#[cfg(test)] +mod tests { + use alloy_rlp::Encodable; + use discv5::enr::{CombinedKey, EnrKey}; + use reth_primitives::{pk_to_id, Hardfork, NodeRecord, MAINNET}; + + use super::*; + + #[test] + fn discv5_discv4_id_conversion() { + let discv5_pk = CombinedKey::generate_secp256k1().public(); + let discv5_peer_id = NodeId::from(discv5_pk.clone()); + + // convert to discv4 id + let pk = secp256k1::PublicKey::from_slice(&discv5_pk.encode()).unwrap(); + let discv4_peer_id = pk2id(&pk); + // convert back to discv5 id + let discv5_peer_id_from_discv4_peer_id = discv4_id_to_discv5_id(discv4_peer_id).unwrap(); + + assert_eq!(discv5_peer_id, discv5_peer_id_from_discv4_peer_id) + } + + #[test] + fn conversion_to_node_record_from_enr() { + const IP: &str = "::"; + const TCP_PORT: u16 = 30303; + const UDP_PORT: u16 = 9000; + + let key = CombinedKey::generate_secp256k1(); + + let mut buf = Vec::new(); + let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier); + fork_id.unwrap().encode(&mut buf); + + let enr = Enr::builder() + .ip6(IP.parse().unwrap()) + .udp6(UDP_PORT) + .tcp6(TCP_PORT) + .build(&key) + .unwrap(); + + let enr = EnrCombinedKeyWrapper(enr).into(); + let node_record = NodeRecord::try_from(&enr).unwrap(); + + assert_eq!( + NodeRecord { + address: IP.parse().unwrap(), + tcp_port: TCP_PORT, + udp_port: UDP_PORT, + id: pk_to_id(&enr.public_key()) + }, + node_record + ) + } +} diff --git a/crates/net/discv5/src/error.rs b/crates/net/discv5/src/error.rs new file mode 100644 index 0000000000000..96929b7936346 --- /dev/null +++ b/crates/net/discv5/src/error.rs @@ -0,0 +1,38 @@ +//! Errors interfacing with [`discv5::Discv5`]. + +use discv5::IpMode; + +/// Errors interfacing with [`discv5::Discv5`]. +#[derive(thiserror::Error, Debug)] +pub enum Error { + /// Failure adding node to [`discv5::Discv5`]. + #[error("failed adding node to discv5, {0}")] + AddNodeToDiscv5Failed(&'static str), + /// Node record has incompatible key type. + #[error("incompatible key type (not secp256k1)")] + IncompatibleKeyType, + /// Missing key used to identify rlpx network. + #[error("fork missing on enr, 'eth' key missing")] + ForkMissing, + /// Failed to decode [`ForkId`](reth_primitives::ForkId) rlp value. + #[error("failed to decode fork id, 'eth': {0:?}")] + ForkIdDecodeError(#[from] alloy_rlp::Error), + /// Peer is unreachable over discovery. + #[error("discovery socket missing")] + UnreachableDiscovery, + /// Peer is unreachable over rlpx. + #[error("RLPx TCP socket missing")] + UnreachableRlpx, + /// Peer is not using same IP version as local node in rlpx. + #[error("RLPx TCP socket is unsupported IP version, local ip mode: {0:?}")] + IpVersionMismatchRlpx(IpMode), + /// Failed to initialize [`discv5::Discv5`]. + #[error("init failed, {0}")] + InitFailure(&'static str), + /// An error from underlying [`discv5::Discv5`] node. + #[error("{0}")] + Discv5Error(discv5::Error), + /// An error from underlying [`discv5::Discv5`] node. + #[error("{0}")] + Discv5ErrorStr(&'static str), +} diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs new file mode 100644 index 0000000000000..5cb7be18c60a4 --- /dev/null +++ b/crates/net/discv5/src/filter.rs @@ -0,0 +1,123 @@ +//! Predicates to constraint peer lookups. + +use std::collections::HashSet; + +use derive_more::Constructor; +use itertools::Itertools; + +/// Outcome of applying filtering rules on node record. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum FilterOutcome { + /// ENR passes filter rules. + Ok, + /// ENR doesn't pass filter rules, for the given reason. + Ignore { + /// Reason for filtering out node record. + reason: String, + }, +} + +impl FilterOutcome { + /// Returns `true` for [`FilterOutcome::Ok`]. + pub fn is_ok(&self) -> bool { + matches!(self, FilterOutcome::Ok) + } +} + +/// Filter requiring that peers advertise that they belong to some fork of a certain key. +#[derive(Debug, Constructor, Clone, Copy, PartialEq, Eq, Hash)] +pub struct MustIncludeKey { + /// Kv-pair key which node record must advertise. + key: &'static [u8], +} + +impl MustIncludeKey { + /// Returns [`FilterOutcome::Ok`] if [`Enr`](discv5::Enr) contains the configured kv-pair key. + pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome { + if enr.get_raw_rlp(self.key).is_none() { + return FilterOutcome::Ignore { reason: self.ignore_reason() } + } + FilterOutcome::Ok + } + + fn ignore_reason(&self) -> String { + format!("{} fork required", String::from_utf8_lossy(self.key)) + } +} + +/// Filter requiring that peers not advertise kv-pairs using certain keys, e.g. b"eth2". +#[derive(Debug, Clone, Default)] +pub struct MustNotIncludeKeys { + keys: HashSet, +} + +impl MustNotIncludeKeys { + /// Returns a new instance that disallows node records with a kv-pair that has any of the given + /// keys. + pub fn new(disallow_keys: &[&'static [u8]]) -> Self { + let mut keys = HashSet::with_capacity(disallow_keys.len()); + for key in disallow_keys { + _ = keys.insert(MustIncludeKey::new(key)); + } + + MustNotIncludeKeys { keys } + } +} + +impl MustNotIncludeKeys { + /// Returns `true` if [`Enr`](discv5::Enr) passes filtering rules. + pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome { + for key in self.keys.iter() { + if matches!(key.filter(enr), FilterOutcome::Ok) { + return FilterOutcome::Ignore { reason: self.ignore_reason() } + } + } + + FilterOutcome::Ok + } + + fn ignore_reason(&self) -> String { + format!( + "{} forks not allowed", + self.keys.iter().map(|key| String::from_utf8_lossy(key.key)).format(",") + ) + } + + /// Adds a key that must not be present for any kv-pair in a node record. + pub fn add_disallowed_keys(&mut self, keys: &[&'static [u8]]) { + for key in keys { + self.keys.insert(MustIncludeKey::new(key)); + } + } +} + +#[cfg(test)] +mod tests { + use alloy_rlp::Bytes; + use discv5::enr::{CombinedKey, Enr}; + + use crate::config::{ETH, ETH2}; + + use super::*; + + #[test] + fn must_not_include_key_filter() { + // rig test + + let filter = MustNotIncludeKeys::new(&[ETH, ETH2]); + + // enr_1 advertises a fork from one of the keys configured in filter + let sk = CombinedKey::generate_secp256k1(); + let enr_1 = + Enr::builder().add_value_rlp(ETH as &[u8], Bytes::from("cancun")).build(&sk).unwrap(); + + // enr_2 advertises a fork from one the other key configured in filter + let sk = CombinedKey::generate_secp256k1(); + let enr_2 = Enr::builder().add_value_rlp(ETH2, Bytes::from("deneb")).build(&sk).unwrap(); + + // test + + assert!(matches!(filter.filter(&enr_1), FilterOutcome::Ignore { .. })); + assert!(matches!(filter.filter(&enr_2), FilterOutcome::Ignore { .. })); + } +} diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs new file mode 100644 index 0000000000000..360bed68bb99d --- /dev/null +++ b/crates/net/discv5/src/lib.rs @@ -0,0 +1,788 @@ +//! Wrapper around [`discv5::Discv5`]. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use std::{ + collections::HashSet, + fmt, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + time::Duration, +}; + +use ::enr::Enr; +use alloy_rlp::Decodable; +use derive_more::Deref; +use discv5::ListenConfig; +use enr::{discv4_id_to_discv5_id, EnrCombinedKeyWrapper}; +use futures::future::join_all; +use itertools::Itertools; +use reth_primitives::{bytes::Bytes, ForkId, NodeRecord, PeerId}; +use secp256k1::SecretKey; +use tokio::{sync::mpsc, task}; +use tracing::{debug, error, trace}; + +pub mod config; +pub mod enr; +pub mod error; +pub mod filter; +pub mod metrics; + +pub use discv5::{self, IpMode}; + +pub use config::{BootNode, Config, ConfigBuilder}; +pub use enr::enr_to_discv4_id; +pub use error::Error; +pub use filter::{FilterOutcome, MustNotIncludeKeys}; +use metrics::Discv5Metrics; + +/// The max log2 distance, is equivalent to the index of the last bit in a discv5 node id. +const MAX_LOG2_DISTANCE: usize = 255; + +/// Transparent wrapper around [`discv5::Discv5`]. +#[derive(Deref, Clone)] +pub struct Discv5 { + #[deref] + /// sigp/discv5 node. + discv5: Arc, + /// [`IpMode`] of the the node. + ip_mode: IpMode, + /// Key used in kv-pair to ID chain. + fork_id_key: &'static [u8], + /// Filter applied to a discovered peers before passing it up to app. + discovered_peer_filter: MustNotIncludeKeys, + /// Metrics for underlying [`discv5::Discv5`] node and filtered discovered peers. + metrics: Discv5Metrics, +} + +impl Discv5 { + //////////////////////////////////////////////////////////////////////////////////////////////// + // Minimal interface with `reth_network::discovery` + //////////////////////////////////////////////////////////////////////////////////////////////// + + /// Adds the node to the table, if it is not already present. + pub fn add_node_to_routing_table(&self, node_record: Enr) -> Result<(), Error> { + let EnrCombinedKeyWrapper(enr) = node_record.into(); + self.add_enr(enr).map_err(Error::AddNodeToDiscv5Failed) + } + + /// Sets the pair in the EIP-868 [`Enr`] of the node. + /// + /// If the key already exists, this will update it. + /// + /// CAUTION: The value **must** be rlp encoded + pub fn set_eip868_in_local_enr(&self, key: Vec, rlp: Bytes) { + let Ok(key_str) = std::str::from_utf8(&key) else { + error!(target: "discv5", + err="key not utf-8", + "failed to update local enr" + ); + return + }; + if let Err(err) = self.enr_insert(key_str, &rlp) { + error!(target: "discv5", + %err, + "failed to update local enr" + ); + } + } + + /// Sets the pair in the EIP-868 [`Enr`] of the node. + /// + /// If the key already exists, this will update it. + pub fn encode_and_set_eip868_in_local_enr( + &self, + key: Vec, + value: impl alloy_rlp::Encodable, + ) { + let mut buf = Vec::new(); + value.encode(&mut buf); + self.set_eip868_in_local_enr(key, buf.into()) + } + + /// Adds the peer and id to the ban list. + /// + /// This will prevent any future inclusion in the table + pub fn ban_peer_by_ip_and_node_id(&self, peer_id: PeerId, ip: IpAddr) { + match discv4_id_to_discv5_id(peer_id) { + Ok(node_id) => { + self.ban_node(&node_id, None); + self.ban_peer_by_ip(ip); + } + Err(err) => error!(target: "discv5", + %err, + "failed to ban peer" + ), + } + } + + /// Adds the ip to the ban list. + /// + /// This will prevent any future inclusion in the table + pub fn ban_peer_by_ip(&self, ip: IpAddr) { + self.ban_ip(ip, None); + } + + /// Returns the [`NodeRecord`] of the local node. + /// + /// This includes the currently tracked external IP address of the node. + pub fn node_record(&self) -> NodeRecord { + let enr: Enr<_> = EnrCombinedKeyWrapper(self.local_enr()).into(); + (&enr).try_into().unwrap() + } + + /// Spawns [`discv5::Discv5`]. Returns [`discv5::Discv5`] handle in reth compatible wrapper type + /// [`Discv5`], a receiver of [`discv5::Event`]s from the underlying node, and the local + /// [`Enr`](discv5::Enr) converted into the reth compatible [`NodeRecord`] type. + pub async fn start( + sk: &SecretKey, + discv5_config: Config, + ) -> Result<(Self, mpsc::Receiver, NodeRecord), Error> { + // + // 1. make local enr from listen config + // + let Config { + discv5_config, + bootstrap_nodes, + fork, + tcp_port, + other_enr_data, + lookup_interval, + discovered_peer_filter, + } = discv5_config; + + let (enr, bc_enr, ip_mode, fork_id_key) = { + let mut builder = discv5::enr::Enr::builder(); + + let (ip_mode, socket) = match discv5_config.listen_config { + ListenConfig::Ipv4 { ip, port } => { + if ip != Ipv4Addr::UNSPECIFIED { + builder.ip4(ip); + } + builder.udp4(port); + builder.tcp4(tcp_port); + + (IpMode::Ip4, (ip, port).into()) + } + ListenConfig::Ipv6 { ip, port } => { + if ip != Ipv6Addr::UNSPECIFIED { + builder.ip6(ip); + } + builder.udp6(port); + builder.tcp6(tcp_port); + + (IpMode::Ip6, (ip, port).into()) + } + ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { + if ipv4 != Ipv4Addr::UNSPECIFIED { + builder.ip4(ipv4); + } + builder.udp4(ipv4_port); + builder.tcp4(tcp_port); + + if ipv6 != Ipv6Addr::UNSPECIFIED { + builder.ip6(ipv6); + } + builder.udp6(ipv6_port); + + (IpMode::DualStack, (ipv6, ipv6_port).into()) + } + }; + + // add fork id + let (chain, fork_id) = fork; + builder.add_value_rlp(chain, alloy_rlp::encode(fork_id).into()); + + // add other data + for (key, value) in other_enr_data { + builder.add_value_rlp(key, alloy_rlp::encode(value).into()); + } + + // enr v4 not to get confused with discv4, independent versioning enr and + // discovery + let enr = builder.build(sk).expect("should build enr v4"); + let EnrCombinedKeyWrapper(enr) = enr.into(); + + trace!(target: "net::discv5", + ?enr, + "local ENR" + ); + + // backwards compatible enr + let bc_enr = NodeRecord::from_secret_key(socket, sk); + + (enr, bc_enr, ip_mode, chain) + }; + + // + // 3. start discv5 + // + let sk = discv5::enr::CombinedKey::secp256k1_from_bytes(&mut sk.secret_bytes()).unwrap(); + let mut discv5 = match discv5::Discv5::new(enr, sk, discv5_config) { + Ok(discv5) => discv5, + Err(err) => return Err(Error::InitFailure(err)), + }; + discv5.start().await.map_err(Error::Discv5Error)?; + + // start discv5 updates stream + let discv5_updates = discv5.event_stream().await.map_err(Error::Discv5Error)?; + + let discv5 = Arc::new(discv5); + + // + // 4. add boot nodes + // + Self::bootstrap(bootstrap_nodes, &discv5)?; + + let metrics = Discv5Metrics::default(); + + // + // 5. bg kbuckets maintenance + // + Self::spawn_populate_kbuckets_bg(lookup_interval, metrics.clone(), discv5.clone()); + + Ok(( + Self { discv5, ip_mode, fork_id_key, discovered_peer_filter, metrics }, + discv5_updates, + bc_enr, + )) + } + + /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. + fn bootstrap( + bootstrap_nodes: HashSet, + discv5: &Arc, + ) -> Result<(), Error> { + trace!(target: "net::discv5", + ?bootstrap_nodes, + "adding bootstrap nodes .." + ); + + let mut enr_requests = vec![]; + for node in bootstrap_nodes { + match node { + BootNode::Enr(node) => { + if let Err(err) = discv5.add_enr(node) { + return Err(Error::Discv5ErrorStr(err)) + } + } + BootNode::Enode(enode) => { + let discv5 = discv5.clone(); + enr_requests.push(async move { + if let Err(err) = discv5.request_enr(enode.to_string()).await { + debug!(target: "net::discv5", + ?enode, + %err, + "failed adding boot node" + ); + } + }) + } + } + } + _ = join_all(enr_requests); + + debug!(target: "net::discv5", + nodes=format!("[{:#}]", discv5.with_kbuckets(|kbuckets| kbuckets + .write() + .iter() + .map(|peer| format!("enr: {:?}, status: {:?}", peer.node.value, peer.status)).collect::>() + ).into_iter().format(", ")), + "added boot nodes" + ); + + Ok(()) + } + + /// Backgrounds regular look up queries, in order to keep kbuckets populated. + fn spawn_populate_kbuckets_bg( + lookup_interval: u64, + metrics: Discv5Metrics, + discv5: Arc, + ) { + // initiate regular lookups to populate kbuckets + task::spawn({ + let local_node_id = discv5.local_enr().node_id(); + let lookup_interval = Duration::from_secs(lookup_interval); + let mut metrics = metrics.discovered_peers; + let mut log2_distance = 0usize; + // todo: graceful shutdown + + async move { + loop { + metrics.set_total_sessions(discv5.metrics().active_sessions); + metrics.set_total_kbucket_peers( + discv5.with_kbuckets(|kbuckets| kbuckets.read().iter_ref().count()), + ); + + trace!(target: "net::discv5", + lookup_interval=format!("{:#?}", lookup_interval), + "starting periodic lookup query" + ); + // make sure node is connected to each subtree in the network by target + // selection (ref kademlia) + let target = get_lookup_target(log2_distance, local_node_id); + if log2_distance < MAX_LOG2_DISTANCE { + // try to populate bucket one step further away + log2_distance += 1 + } else { + // start over with self lookup + log2_distance = 0 + } + match discv5.find_node(target).await { + Err(err) => trace!(target: "net::discv5", + lookup_interval=format!("{:#?}", lookup_interval), + %err, + "periodic lookup query failed" + ), + Ok(peers) => trace!(target: "net::discv5", + lookup_interval=format!("{:#?}", lookup_interval), + peers_count=peers.len(), + peers=format!("[{:#}]", peers.iter() + .map(|enr| enr.node_id() + ).format(", ")), + "peers returned by periodic lookup query" + ), + } + + // `Discv5::connected_peers` can be subset of sessions, not all peers make it + // into kbuckets, e.g. incoming sessions from peers with + // unreachable enrs + debug!(target: "net::discv5", + connected_peers=discv5.connected_peers(), + "connected peers in routing table" + ); + tokio::time::sleep(lookup_interval).await; + } + } + }); + } + + /// Process an event from the underlying [`discv5::Discv5`] node. + pub fn on_discv5_update(&mut self, update: discv5::Event) -> Option { + match update { + discv5::Event::SocketUpdated(_) | discv5::Event::TalkRequest(_) | + // `EnrAdded` not used in discv5 codebase + discv5::Event::EnrAdded { .. } | + // `Discovered` not unique discovered peers + discv5::Event::Discovered(_) => None, + discv5::Event::NodeInserted { replaced: _, .. } => { + + // node has been inserted into kbuckets + + // `replaced` covers `reth_discv4::DiscoveryUpdate::Removed(_)` .. but we can't get + // a `PeerId` from a `NodeId` + + self.metrics.discovered_peers.increment_kbucket_insertions(1); + + None + } + discv5::Event::SessionEstablished(enr, remote_socket) => { + // covers `reth_discv4::DiscoveryUpdate` equivalents `DiscoveryUpdate::Added(_)` + // and `DiscoveryUpdate::DiscoveredAtCapacity(_) + + // peer has been discovered as part of query, or, by incoming session (peer has + // discovered us) + + self.metrics.discovered_peers_advertised_networks.increment_once_by_network_type(&enr); + + self.metrics.discovered_peers.increment_established_sessions_raw(1); + + self.on_discovered_peer(&enr, remote_socket) + } + } + } + + /// Processes a discovered peer. Returns `true` if peer is added to + fn on_discovered_peer( + &mut self, + enr: &discv5::Enr, + socket: SocketAddr, + ) -> Option { + let node_record = match self.try_into_reachable(enr, socket) { + Ok(enr_bc) => enr_bc, + Err(err) => { + trace!(target: "net::discovery::discv5", + %err, + "discovered peer is unreachable" + ); + + self.metrics.discovered_peers.increment_established_sessions_unreachable_enr(1); + + return None + } + }; + let fork_id = match self.filter_discovered_peer(enr) { + FilterOutcome::Ok => self.get_fork_id(enr).ok(), + FilterOutcome::Ignore { reason } => { + trace!(target: "net::discovery::discv5", + ?enr, + reason, + "filtered out discovered peer" + ); + + self.metrics.discovered_peers.increment_established_sessions_filtered(1); + + return None + } + }; + + trace!(target: "net::discovery::discv5", + ?fork_id, + ?enr, + "discovered peer" + ); + + Some(DiscoveredPeer { node_record, fork_id }) + } + + /// Tries to convert an [`Enr`](discv5::Enr) into the backwards compatible type [`NodeRecord`], + /// w.r.t. local [`IpMode`]. Tries the socket from which the ENR was sent, if socket is missing + /// from ENR. + /// + /// Note: [`discv5::Discv5`] won't initiate a session with any peer with a malformed node + /// record, that advertises a reserved IP address on a WAN network. + fn try_into_reachable( + &self, + enr: &discv5::Enr, + socket: SocketAddr, + ) -> Result { + let id = enr_to_discv4_id(enr).ok_or(Error::IncompatibleKeyType)?; + + let udp_socket = self.ip_mode().get_contactable_addr(enr).unwrap_or(socket); + + // since we, on bootstrap, set tcp4 in local ENR for `IpMode::Dual`, we prefer tcp4 here + // too + let Some(tcp_port) = (match self.ip_mode() { + IpMode::Ip4 | IpMode::DualStack => enr.tcp4(), + IpMode::Ip6 => enr.tcp6(), + }) else { + return Err(Error::IpVersionMismatchRlpx(self.ip_mode())) + }; + + Ok(NodeRecord { address: udp_socket.ip(), tcp_port, udp_port: udp_socket.port(), id }) + } + + /// Applies filtering rules on an ENR. Returns [`Ok`](FilterOutcome::Ok) if peer should be + /// passed up to app, and [`Ignore`](FilterOutcome::Ignore) if peer should instead be dropped. + fn filter_discovered_peer(&self, enr: &discv5::Enr) -> FilterOutcome { + self.discovered_peer_filter.filter(enr) + } + + /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr), if field is set. + fn get_fork_id( + &self, + enr: &discv5::enr::Enr, + ) -> Result { + let mut fork_id_bytes = enr.get_raw_rlp(self.fork_id_key()).ok_or(Error::ForkMissing)?; + + Ok(ForkId::decode(&mut fork_id_bytes)?) + } + + //////////////////////////////////////////////////////////////////////////////////////////////// + // Interface with sigp/discv5 + //////////////////////////////////////////////////////////////////////////////////////////////// + + /// Exposes API of [`discv5::Discv5`]. + pub fn with_discv5(&self, f: F) -> R + where + F: FnOnce(&Self) -> R, + { + f(self) + } + + //////////////////////////////////////////////////////////////////////////////////////////////// + // Complementary + //////////////////////////////////////////////////////////////////////////////////////////////// + + /// Returns the [`IpMode`] of the local node. + pub fn ip_mode(&self) -> IpMode { + self.ip_mode + } + + /// Returns the key to use to identify the [`ForkId`] kv-pair on the [`Enr`](discv5::Enr). + pub fn fork_id_key(&self) -> &[u8] { + self.fork_id_key + } +} + +impl fmt::Debug for Discv5 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "{ .. }".fmt(f) + } +} + +/// Result of successfully processing a peer discovered by [`discv5::Discv5`]. +#[derive(Debug)] +pub struct DiscoveredPeer { + /// A discovery v4 backwards compatible ENR. + pub node_record: NodeRecord, + /// [`ForkId`] extracted from ENR w.r.t. configured + pub fork_id: Option, +} + +/// Gets the next lookup target, based on which distance is currently being targeted. +pub fn get_lookup_target( + log2_distance: usize, + local_node_id: discv5::enr::NodeId, +) -> discv5::enr::NodeId { + let mut target = local_node_id.raw(); + //make sure target has a 'distance'-long suffix that differs from local node id + if log2_distance != 0 { + let suffix_bit_offset = MAX_LOG2_DISTANCE.saturating_sub(log2_distance); + let suffix_byte_offset = suffix_bit_offset / 8; + // todo: flip the precise bit + // let rel_suffix_bit_offset = suffix_bit_offset % 8; + target[suffix_byte_offset] = !target[suffix_byte_offset]; + + if suffix_byte_offset != 31 { + for b in target.iter_mut().take(31).skip(suffix_byte_offset + 1) { + *b = rand::random::(); + } + } + } + + target.into() +} + +#[cfg(test)] +mod tests { + use ::enr::{CombinedKey, EnrKey}; + use rand::Rng; + use secp256k1::rand::thread_rng; + use tracing::trace; + + use super::*; + + fn discv5_noop() -> Discv5 { + let sk = CombinedKey::generate_secp256k1(); + Discv5 { + discv5: Arc::new( + discv5::Discv5::new( + Enr::empty(&sk).unwrap(), + sk, + discv5::ConfigBuilder::new(ListenConfig::default()).build(), + ) + .unwrap(), + ), + ip_mode: IpMode::Ip4, + fork_id_key: b"noop", + discovered_peer_filter: MustNotIncludeKeys::default(), + metrics: Discv5Metrics::default(), + } + } + + async fn start_discovery_node( + udp_port_discv5: u16, + ) -> (Discv5, mpsc::Receiver, NodeRecord) { + let secret_key = SecretKey::new(&mut thread_rng()); + + let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port_discv5}").parse().unwrap(); + + let discv5_listen_config = ListenConfig::from(discv5_addr); + let discv5_config = Config::builder(30303) + .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) + .build(); + + Discv5::start(&secret_key, discv5_config).await.expect("should build discv5") + } + + #[tokio::test(flavor = "multi_thread")] + async fn discv5() { + reth_tracing::init_test_tracing(); + + // rig test + + // rig node_1 + let (node_1, mut stream_1, _) = start_discovery_node(30344).await; + let node_1_enr = node_1.with_discv5(|discv5| discv5.local_enr()); + + // rig node_2 + let (node_2, mut stream_2, _) = start_discovery_node(30355).await; + let node_2_enr = node_2.with_discv5(|discv5| discv5.local_enr()); + + trace!(target: "net::discovery::tests", + node_1_node_id=format!("{:#}", node_1_enr.node_id()), + node_2_node_id=format!("{:#}", node_2_enr.node_id()), + "started nodes" + ); + + // test + + // add node_2 to discovery handle of node_1 (should add node to discv5 kbuckets) + let node_2_enr_reth_compatible_ty: Enr = + EnrCombinedKeyWrapper(node_2_enr.clone()).into(); + node_1.add_node_to_routing_table(node_2_enr_reth_compatible_ty).unwrap(); + + // verify node_2 is in KBuckets of node_1:discv5 + assert!( + node_1.with_discv5(|discv5| discv5.table_entries_id().contains(&node_2_enr.node_id())) + ); + + // manually trigger connection from node_1 to node_2 + node_1.with_discv5(|discv5| discv5.send_ping(node_2_enr.clone())).await.unwrap(); + + // verify node_1:discv5 is connected to node_2:discv5 and vv + let event_2_v5 = stream_2.recv().await.unwrap(); + let event_1_v5 = stream_1.recv().await.unwrap(); + matches!( + event_1_v5, + discv5::Event::SessionEstablished(node, socket) if node == node_2_enr && socket == node_2_enr.udp4_socket().unwrap().into() + ); + matches!( + event_2_v5, + discv5::Event::SessionEstablished(node, socket) if node == node_1_enr && socket == node_1_enr.udp4_socket().unwrap().into() + ); + + // verify node_1 is in KBuckets of node_2:discv5 + let event_2_v5 = stream_2.recv().await.unwrap(); + matches!( + event_2_v5, + discv5::Event::NodeInserted { node_id, replaced } if node_id == node_1_enr.node_id() && replaced.is_none() + ); + } + + #[test] + fn discovered_enr_disc_socket_missing() { + reth_tracing::init_test_tracing(); + + // rig test + const REMOTE_RLPX_PORT: u16 = 30303; + let remote_socket = "104.28.44.25:9000".parse().unwrap(); + let remote_key = CombinedKey::generate_secp256k1(); + let remote_enr = Enr::builder().tcp4(REMOTE_RLPX_PORT).build(&remote_key).unwrap(); + + let mut discv5 = discv5_noop(); + + // test + let filtered_peer = discv5.on_discovered_peer(&remote_enr, remote_socket); + + assert_eq!( + NodeRecord { + address: remote_socket.ip(), + udp_port: remote_socket.port(), + tcp_port: REMOTE_RLPX_PORT, + id: enr_to_discv4_id(&remote_enr).unwrap(), + }, + filtered_peer.unwrap().node_record + ) + } + + // Copied from sigp/discv5 with slight modification (U256 type) + // + #[allow(unreachable_pub)] + #[allow(unused)] + #[allow(clippy::assign_op_pattern)] + mod sigp { + use enr::{ + k256::sha2::digest::generic_array::{typenum::U32, GenericArray}, + NodeId, + }; + use reth_primitives::U256; + + /// A `Key` is a cryptographic hash, identifying both the nodes participating in + /// the Kademlia DHT, as well as records stored in the DHT. + /// + /// The set of all `Key`s defines the Kademlia keyspace. + /// + /// `Key`s have an XOR metric as defined in the Kademlia paper, i.e. the bitwise XOR of + /// the hash digests, interpreted as an integer. See [`Key::distance`]. + /// + /// A `Key` preserves the preimage of type `T` of the hash function. See [`Key::preimage`]. + #[derive(Clone, Debug)] + pub struct Key { + preimage: T, + hash: GenericArray, + } + + impl PartialEq for Key { + fn eq(&self, other: &Key) -> bool { + self.hash == other.hash + } + } + + impl Eq for Key {} + + impl AsRef> for Key { + fn as_ref(&self) -> &Key { + self + } + } + + impl Key { + /// Construct a new `Key` by providing the raw 32 byte hash. + pub fn new_raw(preimage: T, hash: GenericArray) -> Key { + Key { preimage, hash } + } + + /// Borrows the preimage of the key. + pub fn preimage(&self) -> &T { + &self.preimage + } + + /// Converts the key into its preimage. + pub fn into_preimage(self) -> T { + self.preimage + } + + /// Computes the distance of the keys according to the XOR metric. + pub fn distance(&self, other: &Key) -> Distance { + let a = U256::from_be_slice(self.hash.as_slice()); + let b = U256::from_be_slice(other.hash.as_slice()); + Distance(a ^ b) + } + + // Used in the FINDNODE query outside of the k-bucket implementation. + /// Computes the integer log-2 distance between two keys, assuming a 256-bit + /// key. The output returns None if the key's are identical. The range is 1-256. + pub fn log2_distance(&self, other: &Key) -> Option { + let xor_dist = self.distance(other); + let log_dist = (256 - xor_dist.0.leading_zeros() as u64); + if log_dist == 0 { + None + } else { + Some(log_dist) + } + } + } + + impl From for Key { + fn from(node_id: NodeId) -> Self { + Key { preimage: node_id, hash: *GenericArray::from_slice(&node_id.raw()) } + } + } + + /// A distance between two `Key`s. + #[derive(Copy, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Debug)] + pub struct Distance(pub(super) U256); + } + + #[test] + fn select_lookup_target() { + // distance ceiled to the next byte + const fn expected_log2_distance(log2_distance: usize) -> u64 { + let log2_distance = log2_distance / 8; + ((log2_distance + 1) * 8) as u64 + } + + let log2_distance = rand::thread_rng().gen_range(0..=MAX_LOG2_DISTANCE); + + let sk = CombinedKey::generate_secp256k1(); + let local_node_id = discv5::enr::NodeId::from(sk.public()); + let target = get_lookup_target(log2_distance, local_node_id); + + let local_node_id = sigp::Key::from(local_node_id); + let target = sigp::Key::from(target); + + assert_eq!( + expected_log2_distance(log2_distance), + local_node_id.log2_distance(&target).unwrap() + ); + } +} diff --git a/crates/net/discv5/src/metrics.rs b/crates/net/discv5/src/metrics.rs new file mode 100644 index 0000000000000..e38fa0fae17f8 --- /dev/null +++ b/crates/net/discv5/src/metrics.rs @@ -0,0 +1,117 @@ +//! Tracks peer discovery for [`Discv5`](crate::Discv5). +use metrics::{Counter, Gauge}; +use reth_metrics::Metrics; + +use crate::config::{ETH, ETH2, OPSTACK}; + +/// Information tracked by [`Discv5`](crate::Discv5). +#[derive(Debug, Default, Clone)] +pub struct Discv5Metrics { + /// Frequency of networks advertised in discovered peers' node records. + pub discovered_peers_advertised_networks: AdvertisedChainMetrics, + /// Tracks discovered peers. + pub discovered_peers: DiscoveredPeersMetrics, +} + +/// Tracks discovered peers. +#[derive(Metrics, Clone)] +#[metrics(scope = "discv5")] +pub struct DiscoveredPeersMetrics { + //////////////////////////////////////////////////////////////////////////////////////////////// + // Kbuckets + //////////////////////////////////////////////////////////////////////////////////////////////// + /// Total peers currently in [`discv5::Discv5`]'s kbuckets. + total_kbucket_peers_raw: Gauge, + /// Total discovered peers that are inserted into [`discv5::Discv5`]'s kbuckets. + /// + /// This is a subset of the total established sessions, in which all peers advertise a udp + /// socket in their node record which is reachable from the local node. Only these peers make + /// it into [`discv5::Discv5`]'s kbuckets and will hence be included in queries. + /// + /// Note: the definition of 'discovered' is not exactly synonymous in `reth_discv4::Discv4`. + total_inserted_kbucket_peers_raw: Counter, + + //////////////////////////////////////////////////////////////////////////////////////////////// + // Sessions + //////////////////////////////////////////////////////////////////////////////////////////////// + /// Total peers currently connected to [`discv5::Discv5`]. + total_sessions_raw: Gauge, + /// Total number of sessions established by [`discv5::Discv5`]. + total_established_sessions_raw: Counter, + /// Total number of sessions established by [`discv5::Discv5`], with peers that don't advertise + /// a socket which is reachable from the local node in their node record. + /// + /// These peers can't make it into [`discv5::Discv5`]'s kbuckets, and hence won't be part of + /// queries (neither shared with peers in NODES responses, nor queried for peers with FINDNODE + /// requests). + total_established_sessions_unreachable_enr: Counter, + /// Total number of sessions established by [`discv5::Discv5`], that pass configured + /// [`filter`](crate::filter) rules. + total_established_sessions_custom_filtered: Counter, +} + +impl DiscoveredPeersMetrics { + /// Sets current total number of peers in [`discv5::Discv5`]'s kbuckets. + pub fn set_total_kbucket_peers(&mut self, num: usize) { + self.total_kbucket_peers_raw.set(num as f64) + } + + /// Increments the number of kbucket insertions in [`discv5::Discv5`]. + pub fn increment_kbucket_insertions(&mut self, num: u64) { + self.total_inserted_kbucket_peers_raw.increment(num) + } + + /// Sets current total number of peers connected to [`discv5::Discv5`]. + pub fn set_total_sessions(&mut self, num: usize) { + self.total_sessions_raw.set(num as f64) + } + + /// Increments number of sessions established by [`discv5::Discv5`]. + pub fn increment_established_sessions_raw(&mut self, num: u64) { + self.total_established_sessions_raw.increment(num) + } + + /// Increments number of sessions established by [`discv5::Discv5`], with peers that don't have + /// a reachable node record. + pub fn increment_established_sessions_unreachable_enr(&mut self, num: u64) { + self.total_established_sessions_unreachable_enr.increment(num) + } + + /// Increments number of sessions established by [`discv5::Discv5`], that pass configured + /// [`filter`](crate::filter) rules. + pub fn increment_established_sessions_filtered(&mut self, num: u64) { + self.total_established_sessions_custom_filtered.increment(num) + } +} + +/// Tracks frequency of networks that are advertised by discovered peers. +/// +/// Peers advertise the chain they belong to as a kv-pair in their node record, using the network +/// as key. +#[derive(Metrics, Clone)] +#[metrics(scope = "discv5")] +pub struct AdvertisedChainMetrics { + /// Frequency of node records with a kv-pair with [`OPSTACK`] as key. + opstack: Counter, + + /// Frequency of node records with a kv-pair with [`ETH`] as key. + eth: Counter, + + /// Frequency of node records with a kv-pair with [`ETH2`] as key. + eth2: Counter, +} + +impl AdvertisedChainMetrics { + /// Counts each recognised network type that is advertised on node record, once. + pub fn increment_once_by_network_type(&mut self, enr: &discv5::Enr) { + if enr.get_raw_rlp(OPSTACK).is_some() { + self.opstack.increment(1u64) + } + if enr.get_raw_rlp(ETH).is_some() { + self.eth.increment(1u64) + } + if enr.get_raw_rlp(ETH2).is_some() { + self.eth2.increment(1u64) + } + } +} diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index c583b7c212e4c..cb7ef8522b4ad 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -738,6 +738,13 @@ impl ChainSpec { self.hardfork_fork_id(Hardfork::Cancun) } + /// Convenience method to get the latest fork id from the chainspec. Panics if chainspec has no + /// hardforks. + #[inline] + pub fn latest_fork_id(&self) -> ForkId { + self.hardfork_fork_id(*self.hardforks().last_key_value().unwrap().0).unwrap() + } + /// Get the fork condition for the given fork. pub fn fork(&self, fork: Hardfork) -> ForkCondition { self.hardforks.get(&fork).copied().unwrap_or(ForkCondition::Never) @@ -3158,4 +3165,21 @@ Post-merge hard forks (timestamp based): // assert_eq!(base_fee, 980000000); } + + #[test] + fn latest_eth_mainnet_fork_id() { + assert_eq!( + ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 0 }, + MAINNET.latest_fork_id() + ) + } + + #[cfg(feature = "optimism")] + #[test] + fn latest_op_mainnet_fork_id() { + assert_eq!( + ForkId { hash: ForkHash([0x51, 0xcc, 0x98, 0xb3]), next: 0 }, + BASE_MAINNET.latest_fork_id() + ) + } } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 7cb3d054c83ed..8e548a233f3c5 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -71,8 +71,9 @@ pub use header::{Header, HeaderValidationError, HeadersDirection, SealedHeader}; pub use integer_list::IntegerList; pub use log::{logs_bloom, Log}; pub use net::{ - goerli_nodes, holesky_nodes, mainnet_nodes, parse_nodes, sepolia_nodes, NodeRecord, - GOERLI_BOOTNODES, HOLESKY_BOOTNODES, MAINNET_BOOTNODES, SEPOLIA_BOOTNODES, + goerli_nodes, holesky_nodes, mainnet_nodes, parse_nodes, pk_to_id, sepolia_nodes, NodeRecord, + NodeRecordParseError, GOERLI_BOOTNODES, HOLESKY_BOOTNODES, MAINNET_BOOTNODES, + SEPOLIA_BOOTNODES, }; pub use peer::{id2pk, pk2id, AnyNode, PeerId, WithPeerId}; pub use prune::{ diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index 7d122f71a18fd..2e0b77d5099f9 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -1,4 +1,4 @@ -pub use reth_rpc_types::NodeRecord; +pub use reth_rpc_types::{pk_to_id, NodeRecord, NodeRecordParseError}; // diff --git a/crates/rpc/rpc-types/src/net.rs b/crates/rpc/rpc-types/src/net.rs index c5d2f72e392c4..3fc3d74991a23 100644 --- a/crates/rpc/rpc-types/src/net.rs +++ b/crates/rpc/rpc-types/src/net.rs @@ -1,5 +1,6 @@ -use crate::PeerId; +use crate::{pk_to_id, PeerId}; use alloy_rlp::{RlpDecodable, RlpEncodable}; +use enr::Enr; use secp256k1::{SecretKey, SECP256K1}; use serde_with::{DeserializeFromStr, SerializeDisplay}; use std::{ @@ -9,6 +10,7 @@ use std::{ num::ParseIntError, str::FromStr, }; +use thiserror::Error; use url::{Host, Url}; /// Represents a ENR in discovery. @@ -114,8 +116,8 @@ impl fmt::Display for NodeRecord { } } -/// Possible error types when parsing a `NodeRecord` -#[derive(Debug, thiserror::Error)] +/// Possible error types when parsing a [`NodeRecord`] +#[derive(Debug, Error)] pub enum NodeRecordParseError { /// Invalid url #[error("Failed to parse url: {0}")] @@ -165,6 +167,29 @@ impl FromStr for NodeRecord { } } +impl TryFrom<&Enr> for NodeRecord { + type Error = NodeRecordParseError; + + fn try_from(enr: &Enr) -> Result { + let Some(address) = enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from)) + else { + return Err(NodeRecordParseError::InvalidUrl("ip missing".to_string())) + }; + + let Some(udp_port) = enr.udp4().or_else(|| enr.udp6()) else { + return Err(NodeRecordParseError::InvalidUrl("udp port missing".to_string())) + }; + + let Some(tcp_port) = enr.tcp4().or_else(|| enr.tcp6()) else { + return Err(NodeRecordParseError::InvalidUrl("tcp port missing".to_string())) + }; + + let id = pk_to_id(&enr.public_key()); + + Ok(NodeRecord { address, tcp_port, udp_port, id }.into_ipv4_mapped()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/rpc-types/src/peer.rs b/crates/rpc/rpc-types/src/peer.rs index a07e61d00285d..44dbe5d71f24c 100644 --- a/crates/rpc/rpc-types/src/peer.rs +++ b/crates/rpc/rpc-types/src/peer.rs @@ -2,3 +2,8 @@ use alloy_primitives::B512; /// Alias for a peer identifier pub type PeerId = B512; + +/// Converts a [`secp256k1::PublicKey`] to a [`PeerId`]. +pub fn pk_to_id(pk: &secp256k1::PublicKey) -> PeerId { + PeerId::from_slice(&pk.serialize_uncompressed()[1..]) +} diff --git a/deny.toml b/deny.toml index c0fc53921510b..347b609651fca 100644 --- a/deny.toml +++ b/deny.toml @@ -90,4 +90,5 @@ allow-git = [ # TODO: remove, see ./Cargo.toml "https://github.com/alloy-rs/alloy", "https://github.com/paradigmxyz/evm-inspectors", + "https://github.com/sigp/discv5", ] From 68727699db5d5979def23ff93af6f39d5d284b12 Mon Sep 17 00:00:00 2001 From: Mourad Kejji Date: Wed, 3 Apr 2024 11:53:30 +0200 Subject: [PATCH 040/700] chore: update kurtosis config in assertoor and book (#7261) (#7262) --- book/run/private-testnet.md | 16 +++++----- etc/assertoor/assertoor-template.yaml | 44 +++++++++++++-------------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index afa58449efce9..958d769e348d3 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -18,17 +18,17 @@ First, in your home directory, create a file with the name `network_params.json` { "participants": [ { - "el_client_type": "reth", - "el_client_image": "ghcr.io/paradigmxyz/reth", - "cl_client_type": "lighthouse", - "cl_client_image": "sigp/lighthouse:latest", + "el_type": "reth", + "el_image": "ghcr.io/paradigmxyz/reth", + "cl_type": "lighthouse", + "cl_image": "sigp/lighthouse:latest", "count": 1 }, { - "el_client_type": "reth", - "el_client_image": "ghcr.io/paradigmxyz/reth", - "cl_client_type": "teku", - "cl_client_image": "consensys/teku:latest", + "el_type": "reth", + "el_image": "ghcr.io/paradigmxyz/reth", + "cl_type": "teku", + "cl_image": "consensys/teku:latest", "count": 1 } ], diff --git a/etc/assertoor/assertoor-template.yaml b/etc/assertoor/assertoor-template.yaml index bf3e903cc1d0a..16e4be914e7ee 100644 --- a/etc/assertoor/assertoor-template.yaml +++ b/etc/assertoor/assertoor-template.yaml @@ -1,30 +1,30 @@ participants: -- el_client_type: reth - el_client_image: ghcr.io/paradigmxyz/reth - cl_client_type: lighthouse - cl_client_image: sigp/lighthouse:latest +- el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: lighthouse + cl_image: sigp/lighthouse:latest count: 1 -- el_client_type: reth - el_client_image: ghcr.io/paradigmxyz/reth - cl_client_type: teku - cl_client_image: consensys/teku:latest +- el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: teku + cl_image: consensys/teku:latest count: 1 -- el_client_type: reth - el_client_image: ghcr.io/paradigmxyz/reth - cl_client_type: prysm - cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:stable - validator_client_type: prysm - validator_client_image: gcr.io/prysmaticlabs/prysm/validator:stable +- el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: prysm + cl_image: gcr.io/prysmaticlabs/prysm/beacon-chain:stable + vc_type: prysm + vc_image: gcr.io/prysmaticlabs/prysm/validator:stable count: 1 -- el_client_type: reth - el_client_image: ghcr.io/paradigmxyz/reth - cl_client_type: nimbus - cl_client_image: statusim/nimbus-eth2:amd64-latest +- el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: nimbus + cl_image: statusim/nimbus-eth2:amd64-latest count: 1 -- el_client_type: reth - el_client_image: ghcr.io/paradigmxyz/reth - cl_client_type: lodestar - cl_client_image: chainsafe/lodestar:latest +- el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: lodestar + cl_image: chainsafe/lodestar:latest count: 1 network_params: genesis_delay: 120 From f71d9c000368af25eda73017cff8d985acb84c78 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Apr 2024 12:28:49 +0200 Subject: [PATCH 041/700] feat(prune): timeout (#6958) Co-authored-by: Alexey Shekhirin Co-authored-by: Matthias Seitz --- Cargo.lock | 2 + .../consensus/beacon/src/engine/test_utils.rs | 1 + crates/net/network/src/transactions/mod.rs | 2 +- crates/node-builder/src/builder.rs | 1 + crates/primitives/src/lib.rs | 5 +- crates/primitives/src/peer.rs | 6 +- crates/primitives/src/prune/limiter.rs | 122 +++++++++ crates/primitives/src/prune/mod.rs | 54 +++- crates/prune/Cargo.toml | 2 + crates/prune/src/builder.rs | 18 ++ crates/prune/src/pruner.rs | 72 +++-- crates/prune/src/segments/account_history.rs | 248 ++++++++++-------- crates/prune/src/segments/headers.rs | 248 +++++++++++++----- crates/prune/src/segments/mod.rs | 57 ++-- crates/prune/src/segments/receipts.rs | 45 +++- crates/prune/src/segments/receipts_by_logs.rs | 27 +- crates/prune/src/segments/sender_recovery.rs | 52 ++-- crates/prune/src/segments/storage_history.rs | 73 ++++-- .../prune/src/segments/transaction_lookup.rs | 57 ++-- crates/prune/src/segments/transactions.rs | 45 +++- crates/rpc/rpc/src/debug.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 2 +- .../src/providers/database/provider.rs | 117 ++++++--- 23 files changed, 907 insertions(+), 351 deletions(-) create mode 100644 crates/primitives/src/prune/limiter.rs diff --git a/Cargo.lock b/Cargo.lock index 9a7f4d4bc61c6..7d74ed3ae3502 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6759,6 +6759,7 @@ name = "reth-prune" version = "0.2.0-beta.4" dependencies = [ "assert_matches", + "derive_more", "itertools 0.12.1", "metrics", "rayon", @@ -6770,6 +6771,7 @@ dependencies = [ "reth-provider", "reth-stages", "reth-tokio-util", + "reth-tracing", "thiserror", "tokio-stream", "tracing", diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 55b37f812a6d7..7aeb8d746d339 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -434,6 +434,7 @@ where 5, self.base_config.chain_spec.prune_delete_limit, config.max_reorg_depth() as usize, + None, ); let mut hooks = EngineHooks::new(); diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 04851f42bc5dc..913001f6ba276 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1515,7 +1515,7 @@ impl PeerMetadata { fn new(request_tx: PeerRequestSender, version: EthVersion, client_version: Arc) -> Self { Self { seen_transactions: LruCache::new( - NonZeroUsize::new(DEFAULT_CAPACITY_CACHE_SEEN_BY_PEER).expect("infallible"), + NonZeroUsize::new(DEFAULT_CAPACITY_CACHE_SEEN_BY_PEER).unwrap(), ), request_tx, version, diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 72372d15e49b0..ee194386d3832 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -677,6 +677,7 @@ where let mut pruner = PrunerBuilder::new(prune_config.clone()) .max_reorg_depth(tree_config.max_reorg_depth() as usize) .prune_delete_limit(config.chain.prune_delete_limit) + .timeout(PrunerBuilder::DEFAULT_TIMEOUT) .build(provider_factory.clone()); let pruner_events = pruner.events(); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 8e548a233f3c5..8210e1a8cf360 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -77,8 +77,9 @@ pub use net::{ }; pub use peer::{id2pk, pk2id, AnyNode, PeerId, WithPeerId}; pub use prune::{ - PruneCheckpoint, PruneMode, PruneModes, PruneProgress, PrunePurpose, PruneSegment, - PruneSegmentError, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, + PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneModes, PruneProgress, + PrunePurpose, PruneSegment, PruneSegmentError, ReceiptsLogPruneConfig, + MINIMUM_PRUNING_DISTANCE, }; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts}; pub use static_file::StaticFileSegment; diff --git a/crates/primitives/src/peer.rs b/crates/primitives/src/peer.rs index 0f6a814c3abb2..f66361f39b204 100644 --- a/crates/primitives/src/peer.rs +++ b/crates/primitives/src/peer.rs @@ -93,13 +93,13 @@ impl FromStr for AnyNode { fn from_str(s: &str) -> Result { if let Some(rem) = s.strip_prefix("enode://") { if let Ok(record) = NodeRecord::from_str(s) { - return Ok(AnyNode::NodeRecord(record)); + return Ok(AnyNode::NodeRecord(record)) } // incomplete enode if let Ok(peer_id) = PeerId::from_str(rem) { - return Ok(AnyNode::PeerId(peer_id)); + return Ok(AnyNode::PeerId(peer_id)) } - return Err(format!("invalid public key: {rem}")); + return Err(format!("invalid public key: {rem}")) } if s.starts_with("enr:") { return Enr::from_str(s).map(AnyNode::Enr) diff --git a/crates/primitives/src/prune/limiter.rs b/crates/primitives/src/prune/limiter.rs new file mode 100644 index 0000000000000..94adc1563fc40 --- /dev/null +++ b/crates/primitives/src/prune/limiter.rs @@ -0,0 +1,122 @@ +use std::{ + num::NonZeroUsize, + time::{Duration, Instant}, +}; + +/// Limits a pruner run by either the number of entries (rows in the database) that can be deleted +/// or the time it can run. +#[derive(Debug, Clone, Default)] +pub struct PruneLimiter { + /// Maximum entries (rows in the database) to delete from the database per block. + deleted_entries_limit: Option, + /// Maximum duration of one prune run. + time_limit: Option, +} + +#[derive(Debug, Clone)] +struct PruneDeletedEntriesLimit { + /// Maximum entries (rows in the database) to delete from the database. + limit: usize, + /// Current number of entries (rows in the database) that have been deleted. + deleted: usize, +} + +impl PruneDeletedEntriesLimit { + fn new(limit: usize) -> Self { + Self { limit, deleted: 0 } + } + + fn is_limit_reached(&self) -> bool { + self.deleted >= self.limit + } +} + +#[derive(Debug, Clone)] +struct PruneTimeLimit { + /// Maximum duration of one prune run. + limit: Duration, + /// Time when the prune run has started. + start: Instant, +} + +impl PruneTimeLimit { + fn new(limit: Duration) -> Self { + Self { limit, start: Instant::now() } + } + + fn is_limit_reached(&self) -> bool { + self.start.elapsed() > self.limit + } +} + +impl PruneLimiter { + /// Sets the limit on the number of deleted entries (rows in the database). + /// If the limit was already set, it will be overwritten. + pub fn set_deleted_entries_limit(mut self, limit: usize) -> Self { + if let Some(deleted_entries_limit) = self.deleted_entries_limit.as_mut() { + deleted_entries_limit.limit = limit; + } else { + self.deleted_entries_limit = Some(PruneDeletedEntriesLimit::new(limit)); + } + + self + } + + /// Sets the limit on the number of deleted entries (rows in the database) to a biggest + /// multiple of the given denominator that is smaller than the existing limit. + /// + /// If the limit wasn't set, does nothing. + pub fn floor_deleted_entries_limit_to_multiple_of(mut self, denominator: NonZeroUsize) -> Self { + if let Some(deleted_entries_limit) = self.deleted_entries_limit.as_mut() { + deleted_entries_limit.limit = + (deleted_entries_limit.limit / denominator) * denominator.get(); + } + + self + } + + /// Returns `true` if the limit on the number of deleted entries (rows in the database) is + /// reached. + pub fn is_deleted_entries_limit_reached(&self) -> bool { + self.deleted_entries_limit.as_ref().map_or(false, |limit| limit.is_limit_reached()) + } + + /// Increments the number of deleted entries by the given number. + pub fn increment_deleted_entries_count_by(&mut self, entries: usize) { + if let Some(limit) = self.deleted_entries_limit.as_mut() { + limit.deleted += entries; + } + } + + /// Increments the number of deleted entries by one. + pub fn increment_deleted_entries_count(&mut self) { + self.increment_deleted_entries_count_by(1) + } + + /// Returns the number of deleted entries left before the limit is reached. + pub fn deleted_entries_limit_left(&self) -> Option { + self.deleted_entries_limit.as_ref().map(|limit| limit.limit - limit.deleted) + } + + /// Returns the limit on the number of deleted entries (rows in the database). + pub fn deleted_entries_limit(&self) -> Option { + self.deleted_entries_limit.as_ref().map(|limit| limit.limit) + } + + /// Sets the time limit. + pub fn set_time_limit(mut self, limit: Duration) -> Self { + self.time_limit = Some(PruneTimeLimit::new(limit)); + + self + } + + /// Returns `true` if time limit is reached. + pub fn is_time_limit_reached(&self) -> bool { + self.time_limit.as_ref().map_or(false, |limit| limit.is_limit_reached()) + } + + /// Returns `true` if any limit is reached. + pub fn is_limit_reached(&self) -> bool { + self.is_deleted_entries_limit_reached() || self.is_time_limit_reached() + } +} diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index b11aef43263ad..07da6132fc40c 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -1,10 +1,12 @@ mod checkpoint; +mod limiter; mod mode; mod segment; mod target; use crate::{Address, BlockNumber}; pub use checkpoint::PruneCheckpoint; +pub use limiter::PruneLimiter; pub use mode::PruneMode; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; use serde::{Deserialize, Serialize}; @@ -91,21 +93,61 @@ impl ReceiptsLogPruneConfig { #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum PruneProgress { /// There is more data to prune. - HasMoreData, + HasMoreData(PruneInterruptReason), /// Pruning has been finished. Finished, } +/// Reason for interrupting a prune run. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum PruneInterruptReason { + /// Prune run timed out. + Timeout, + /// Limit on the number of deleted entries (rows in the database) per prune run was reached. + DeletedEntriesLimitReached, + /// Unknown reason for stopping prune run. + Unknown, +} + +impl PruneInterruptReason { + /// Creates new [PruneInterruptReason] based on the [PruneLimiter]. + pub fn new(limiter: &PruneLimiter) -> Self { + if limiter.is_time_limit_reached() { + Self::Timeout + } else if limiter.is_deleted_entries_limit_reached() { + Self::DeletedEntriesLimitReached + } else { + Self::Unknown + } + } + + /// Returns `true` if the reason is timeout. + pub const fn is_timeout(&self) -> bool { + matches!(self, Self::Timeout) + } + + /// Returns `true` if the reason is reaching the limit on deleted entries. + pub const fn is_entries_limit_reached(&self) -> bool { + matches!(self, Self::DeletedEntriesLimitReached) + } +} + impl PruneProgress { - /// Creates new [PruneProgress] from `done` boolean value. + /// Creates new [PruneProgress]. /// - /// If `done == true`, returns [PruneProgress::Finished], otherwise [PruneProgress::HasMoreData] - /// is returned. - pub fn from_done(done: bool) -> Self { + /// If `done == true`, returns [PruneProgress::Finished], otherwise + /// [PruneProgress::HasMoreData] is returned with [PruneInterruptReason] according to the passed + /// limiter. + pub fn new(done: bool, limiter: &PruneLimiter) -> Self { if done { Self::Finished } else { - Self::HasMoreData + Self::HasMoreData(PruneInterruptReason::new(limiter)) } } + + /// Returns `true` if prune run is finished. + pub const fn is_finished(&self) -> bool { + matches!(self, Self::Finished) + } } diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index d59da8096c016..3a8971a667d32 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -35,7 +35,9 @@ tokio-stream.workspace = true # reth reth-db = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } +reth-tracing.workspace = true # misc +derive_more.workspace = true assert_matches.workspace = true diff --git a/crates/prune/src/builder.rs b/crates/prune/src/builder.rs index 5836688bf0fae..377a986647e17 100644 --- a/crates/prune/src/builder.rs +++ b/crates/prune/src/builder.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use crate::{segments::SegmentSet, Pruner}; use reth_config::PruneConfig; use reth_db::database::Database; @@ -17,9 +19,14 @@ pub struct PrunerBuilder { /// the amount of blocks between pruner runs to account for the difference in amount of new /// data coming in. pub prune_delete_limit: usize, + /// Time a pruner job can run before timing out. + pub timeout: Option, } impl PrunerBuilder { + /// Default timeout for a prune run. + pub const DEFAULT_TIMEOUT: Duration = Duration::from_millis(100); + /// Creates a new [PrunerBuilder] from the given [PruneConfig]. pub fn new(pruner_config: PruneConfig) -> Self { PrunerBuilder::default() @@ -51,6 +58,15 @@ impl PrunerBuilder { self } + /// Sets the timeout for pruner, per run. + /// + /// CAUTION: Account and Storage History prune segments treat this timeout as a soft limit, + /// meaning they can go beyond it. + pub fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = Some(timeout); + self + } + /// Builds a [Pruner] from the current configuration. pub fn build(self, provider_factory: ProviderFactory) -> Pruner { let segments = SegmentSet::::from_prune_modes(self.segments); @@ -61,6 +77,7 @@ impl PrunerBuilder { self.block_interval, self.prune_delete_limit, self.max_reorg_depth, + self.timeout, ) } } @@ -72,6 +89,7 @@ impl Default for PrunerBuilder { segments: PruneModes::none(), max_reorg_depth: 64, prune_delete_limit: MAINNET.prune_delete_limit, + timeout: Some(Self::DEFAULT_TIMEOUT), } } } diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index b5e0cc1de088c..750284ad59609 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -7,11 +7,15 @@ use crate::{ }; use reth_db::database::Database; use reth_primitives::{ - BlockNumber, PruneMode, PruneProgress, PrunePurpose, PruneSegment, StaticFileSegment, + BlockNumber, PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, + StaticFileSegment, }; use reth_provider::{DatabaseProviderRW, ProviderFactory, PruneCheckpointReader}; use reth_tokio_util::EventListeners; -use std::{collections::BTreeMap, time::Instant}; +use std::{ + collections::BTreeMap, + time::{Duration, Instant}, +}; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::debug; @@ -36,10 +40,13 @@ pub struct Pruner { /// conjunction with `min_block_interval` to determine when the pruning needs to be initiated. previous_tip_block_number: Option, /// Maximum total entries to prune (delete from database) per block. - delete_limit: usize, + delete_limit_per_block: usize, /// Maximum number of blocks to be pruned per run, as an additional restriction to /// `previous_tip_block_number`. prune_max_blocks_per_run: usize, + /// Maximum time for a one pruner run. + timeout: Option, + #[doc(hidden)] metrics: Metrics, listeners: EventListeners, } @@ -52,14 +59,16 @@ impl Pruner { min_block_interval: usize, delete_limit: usize, prune_max_blocks_per_run: usize, + timeout: Option, ) -> Self { Self { provider_factory, segments, min_block_interval, previous_tip_block_number: None, - delete_limit, + delete_limit_per_block: delete_limit, prune_max_blocks_per_run, + timeout, metrics: Metrics::default(), listeners: Default::default(), } @@ -100,11 +109,16 @@ impl Pruner { tip_block_number.saturating_sub(previous_tip_block_number) as usize }) .min(self.prune_max_blocks_per_run); - let delete_limit = self.delete_limit * blocks_since_last_run; + + let mut limiter = PruneLimiter::default() + .set_deleted_entries_limit(self.delete_limit_per_block * blocks_since_last_run); + if let Some(timeout) = self.timeout { + limiter = limiter.set_time_limit(timeout); + }; let provider = self.provider_factory.provider_rw()?; - let (stats, delete_limit, progress) = - self.prune_segments(&provider, tip_block_number, delete_limit)?; + let (stats, deleted_entries, progress) = + self.prune_segments(&provider, tip_block_number, &mut limiter)?; provider.commit()?; self.previous_tip_block_number = Some(tip_block_number); @@ -112,14 +126,20 @@ impl Pruner { let elapsed = start.elapsed(); self.metrics.duration_seconds.record(elapsed); + let message = match progress { + PruneProgress::HasMoreData(_) => "Pruner interrupted and has more data to prune", + PruneProgress::Finished => "Pruner finished", + }; + debug!( target: "pruner", %tip_block_number, ?elapsed, - %delete_limit, + ?deleted_entries, + ?limiter, ?progress, ?stats, - "Pruner finished" + "{message}", ); self.listeners.notify(PrunerEvent::Finished { tip_block_number, elapsed, stats }); @@ -128,15 +148,15 @@ impl Pruner { } /// Prunes the segments that the [Pruner] was initialized with, and the segments that needs to - /// be pruned according to the highest static_files. + /// be pruned according to the highest static_files. Segments are parts of the database that + /// represent one or more tables. /// - /// Returns [PrunerStats], `delete_limit` that remained after pruning all segments, and - /// [PruneProgress]. + /// Returns [PrunerStats], total number of entries pruned, and [PruneProgress]. fn prune_segments( &mut self, provider: &DatabaseProviderRW, tip_block_number: BlockNumber, - mut delete_limit: usize, + limiter: &mut PruneLimiter, ) -> Result<(PrunerStats, usize, PruneProgress), PrunerError> { let static_file_segments = self.static_file_segments(); let segments = static_file_segments @@ -144,11 +164,12 @@ impl Pruner { .map(|segment| (segment, PrunePurpose::StaticFile)) .chain(self.segments.iter().map(|segment| (segment, PrunePurpose::User))); - let mut done = true; let mut stats = PrunerStats::new(); + let mut pruned = 0; + let mut progress = PruneProgress::Finished; for (segment, purpose) in segments { - if delete_limit == 0 { + if limiter.is_limit_reached() { break } @@ -169,8 +190,10 @@ impl Pruner { let segment_start = Instant::now(); let previous_checkpoint = provider.get_prune_checkpoint(segment.segment())?; - let output = segment - .prune(provider, PruneInput { previous_checkpoint, to_block, delete_limit })?; + let output = segment.prune( + provider, + PruneInput { previous_checkpoint, to_block, limiter: limiter.clone() }, + )?; if let Some(checkpoint) = output.checkpoint { segment .save_checkpoint(provider, checkpoint.as_prune_checkpoint(prune_mode))?; @@ -185,8 +208,7 @@ impl Pruner { .highest_pruned_block .set(to_block as f64); - done = done && output.done; - delete_limit = delete_limit.saturating_sub(output.pruned); + progress = output.progress; debug!( target: "pruner", @@ -199,17 +221,16 @@ impl Pruner { ); if output.pruned > 0 { - stats.insert( - segment.segment(), - (PruneProgress::from_done(output.done), output.pruned), - ); + limiter.increment_deleted_entries_count_by(output.pruned); + pruned += output.pruned; + stats.insert(segment.segment(), (output.progress, output.pruned)); } } else { debug!(target: "pruner", segment = ?segment.segment(), ?purpose, "Nothing to prune for the segment"); } } - Ok((stats, delete_limit, PruneProgress::from_done(done))) + Ok((stats, pruned, progress)) } /// Returns pre-configured segments that needs to be pruned according to the highest @@ -266,6 +287,7 @@ impl Pruner { #[cfg(test)] mod tests { + use crate::Pruner; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; use reth_primitives::MAINNET; @@ -277,7 +299,7 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); let provider_factory = ProviderFactory::new(db, MAINNET.clone(), static_dir_path) .expect("create provide factory with static_files"); - let mut pruner = Pruner::new(provider_factory, vec![], 5, 0, 5); + let mut pruner = Pruner::new(provider_factory, vec![], 5, 0, 5, None); // No last pruned block number was set before let first_block_number = 1; diff --git a/crates/prune/src/segments/account_history.rs b/crates/prune/src/segments/account_history.rs index a18897640baaa..c0d92929801d6 100644 --- a/crates/prune/src/segments/account_history.rs +++ b/crates/prune/src/segments/account_history.rs @@ -5,10 +5,16 @@ use crate::{ PrunerError, }; use reth_db::{database::Database, models::ShardedKey, tables}; -use reth_primitives::{PruneMode, PruneSegment}; +use reth_primitives::{PruneInterruptReason, PruneMode, PruneProgress, PruneSegment}; use reth_provider::DatabaseProviderRW; use tracing::{instrument, trace}; +/// Number of account history tables to prune in one step. +/// +/// Account History consists of two tables: [tables::AccountChangeSets] and +/// [tables::AccountsHistory]. We want to prune them to the same block number. +const ACCOUNT_HISTORY_TABLES_TO_PRUNE: usize = 2; + #[derive(Debug)] pub struct AccountHistory { mode: PruneMode, @@ -44,11 +50,23 @@ impl Segment for AccountHistory { }; let range_end = *range.end(); + let mut limiter = if let Some(limit) = input.limiter.deleted_entries_limit() { + input.limiter.set_deleted_entries_limit(limit / ACCOUNT_HISTORY_TABLES_TO_PRUNE) + } else { + input.limiter + }; + if limiter.is_limit_reached() { + return Ok(PruneOutput::not_done( + PruneInterruptReason::new(&limiter), + input.previous_checkpoint.map(|checkpoint| checkpoint.into()), + )) + } + let mut last_changeset_pruned_block = None; let (pruned_changesets, done) = provider .prune_table_with_range::( range, - input.delete_limit / 2, + &mut limiter, |_| false, |row| last_changeset_pruned_block = Some(row.0), )?; @@ -66,10 +84,12 @@ impl Segment for AccountHistory { |a, b| a.key == b.key, |key| ShardedKey::last(key.key), )?; - trace!(target: "pruner", %processed, pruned = %pruned_indices, %done, "Pruned account history (history)" ); + trace!(target: "pruner", %processed, pruned = %pruned_indices, %done, "Pruned account history (history)"); + + let progress = PruneProgress::new(done, &limiter); Ok(PruneOutput { - done, + progress, pruned: pruned_changesets + pruned_indices, checkpoint: Some(PruneOutputCheckpoint { block_number: Some(last_changeset_pruned_block), @@ -81,14 +101,20 @@ impl Segment for AccountHistory { #[cfg(test)] mod tests { - use crate::segments::{AccountHistory, PruneInput, PruneOutput, Segment}; + use crate::segments::{ + account_history::ACCOUNT_HISTORY_TABLES_TO_PRUNE, AccountHistory, PruneInput, PruneOutput, + Segment, + }; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; use reth_interfaces::test_utils::{ generators, generators::{random_block_range, random_changeset_range, random_eoa_accounts}, }; - use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, B256}; + use reth_primitives::{ + BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, + PruneSegment, B256, + }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::{collections::BTreeMap, ops::AddAssign}; @@ -129,114 +155,126 @@ mod tests { let original_shards = db.table::().unwrap(); - let test_prune = |to_block: BlockNumber, run: usize, expected_result: (bool, usize)| { - let prune_mode = PruneMode::Before(to_block); - let input = PruneInput { - previous_checkpoint: db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::AccountHistory) - .unwrap(), - to_block, - delete_limit: 2000, - }; - let segment = AccountHistory::new(prune_mode); - - let provider = db.factory.provider_rw().unwrap(); - let result = segment.prune(&provider, input).unwrap(); - assert_matches!( - result, - PruneOutput {done, pruned, checkpoint: Some(_)} - if (done, pruned) == expected_result - ); - segment - .save_checkpoint( - &provider, - result.checkpoint.unwrap().as_prune_checkpoint(prune_mode), - ) - .unwrap(); - provider.commit().expect("commit"); - - let changesets = changesets - .iter() - .enumerate() - .flat_map(|(block_number, changeset)| { - changeset.iter().map(move |change| (block_number, change)) - }) - .collect::>(); - - #[allow(clippy::skip_while_next)] - let pruned = changesets - .iter() - .enumerate() - .skip_while(|(i, (block_number, _))| { - *i < input.delete_limit / 2 * run && *block_number <= to_block as usize - }) - .next() - .map(|(i, _)| i) - .unwrap_or_default(); + let test_prune = + |to_block: BlockNumber, run: usize, expected_result: (PruneProgress, usize)| { + let prune_mode = PruneMode::Before(to_block); + let deleted_entries_limit = 2000; + let mut limiter = + PruneLimiter::default().set_deleted_entries_limit(deleted_entries_limit); + let input = PruneInput { + previous_checkpoint: db + .factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::AccountHistory) + .unwrap(), + to_block, + limiter: limiter.clone(), + }; + let segment = AccountHistory::new(prune_mode); + + let provider = db.factory.provider_rw().unwrap(); + let result = segment.prune(&provider, input).unwrap(); + limiter.increment_deleted_entries_count_by(result.pruned); + + assert_matches!( + result, + PruneOutput {progress, pruned, checkpoint: Some(_)} + if (progress, pruned) == expected_result + ); + + segment + .save_checkpoint( + &provider, + result.checkpoint.unwrap().as_prune_checkpoint(prune_mode), + ) + .unwrap(); + provider.commit().expect("commit"); - let mut pruned_changesets = changesets - .iter() - // Skip what we've pruned so far, subtracting one to get last pruned block number - // further down - .skip(pruned.saturating_sub(1)); + let changesets = changesets + .iter() + .enumerate() + .flat_map(|(block_number, changeset)| { + changeset.iter().map(move |change| (block_number, change)) + }) + .collect::>(); - let last_pruned_block_number = pruned_changesets + #[allow(clippy::skip_while_next)] + let pruned = changesets + .iter() + .enumerate() + .skip_while(|(i, (block_number, _))| { + *i < deleted_entries_limit / ACCOUNT_HISTORY_TABLES_TO_PRUNE * run && + *block_number <= to_block as usize + }) + .next() + .map(|(i, _)| i) + .unwrap_or_default(); + + let mut pruned_changesets = changesets + .iter() + // Skip what we've pruned so far, subtracting one to get last pruned block + // number further down + .skip(pruned.saturating_sub(1)); + + let last_pruned_block_number = pruned_changesets .next() - .map(|(block_number, _)| if result.done { + .map(|(block_number, _)| if result.progress.is_finished() { *block_number } else { block_number.saturating_sub(1) } as BlockNumber) .unwrap_or(to_block); - let pruned_changesets = pruned_changesets.fold( - BTreeMap::<_, Vec<_>>::new(), - |mut acc, (block_number, change)| { - acc.entry(block_number).or_default().push(change); - acc - }, - ); - - assert_eq!( - db.table::().unwrap().len(), - pruned_changesets.values().flatten().count() - ); - - let actual_shards = db.table::().unwrap(); - - let expected_shards = original_shards - .iter() - .filter(|(key, _)| key.highest_block_number > last_pruned_block_number) - .map(|(key, blocks)| { - let new_blocks = blocks - .iter() - .skip_while(|block| *block <= last_pruned_block_number) - .collect::>(); - (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) - }) - .collect::>(); - - assert_eq!(actual_shards, expected_shards); - - assert_eq!( - db.factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::AccountHistory) - .unwrap(), - Some(PruneCheckpoint { - block_number: Some(last_pruned_block_number), - tx_number: None, - prune_mode - }) - ); - }; + let pruned_changesets = pruned_changesets.fold( + BTreeMap::<_, Vec<_>>::new(), + |mut acc, (block_number, change)| { + acc.entry(block_number).or_default().push(change); + acc + }, + ); + + assert_eq!( + db.table::().unwrap().len(), + pruned_changesets.values().flatten().count() + ); + + let actual_shards = db.table::().unwrap(); - test_prune(998, 1, (false, 1000)); - test_prune(998, 2, (true, 998)); - test_prune(1400, 3, (true, 804)); + let expected_shards = original_shards + .iter() + .filter(|(key, _)| key.highest_block_number > last_pruned_block_number) + .map(|(key, blocks)| { + let new_blocks = blocks + .iter() + .skip_while(|block| *block <= last_pruned_block_number) + .collect::>(); + (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) + }) + .collect::>(); + + assert_eq!(actual_shards, expected_shards); + + assert_eq!( + db.factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::AccountHistory) + .unwrap(), + Some(PruneCheckpoint { + block_number: Some(last_pruned_block_number), + tx_number: None, + prune_mode + }) + ); + }; + + test_prune( + 998, + 1, + (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 1000), + ); + test_prune(998, 2, (PruneProgress::Finished, 998)); + test_prune(1400, 3, (PruneProgress::Finished, 804)); } } diff --git a/crates/prune/src/segments/headers.rs b/crates/prune/src/segments/headers.rs index 2da191375899c..64263c88156d4 100644 --- a/crates/prune/src/segments/headers.rs +++ b/crates/prune/src/segments/headers.rs @@ -1,15 +1,24 @@ +use std::num::NonZeroUsize; + use crate::{ segments::{PruneInput, PruneOutput, PruneOutputCheckpoint, Segment}, PrunerError, }; use itertools::Itertools; -use reth_db::{database::Database, table::Table, tables}; -use reth_interfaces::RethResult; -use reth_primitives::{BlockNumber, PruneMode, PruneSegment}; +use reth_db::{ + cursor::{DbCursorRO, RangeWalker}, + database::Database, + tables, + transaction::DbTxMut, +}; + +use reth_primitives::{BlockNumber, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; use reth_provider::DatabaseProviderRW; -use std::ops::RangeInclusive; use tracing::{instrument, trace}; +/// Number of header tables to prune in one step +const HEADER_TABLES_TO_PRUNE: usize = 3; + #[derive(Debug)] pub struct Headers { mode: PruneMode, @@ -36,90 +45,168 @@ impl Segment for Headers { provider: &DatabaseProviderRW, input: PruneInput, ) -> Result { - let block_range = match input.get_next_block_range() { - Some(range) => range, + let (block_range_start, block_range_end) = match input.get_next_block_range() { + Some(range) => (*range.start(), *range.end()), None => { trace!(target: "pruner", "No headers to prune"); return Ok(PruneOutput::done()) } }; - let delete_limit = input.delete_limit / 3; - if delete_limit == 0 { - // Nothing to do, `input.delete_limit` is less than 3, so we can't prune all - // headers-related tables up to the same height - return Ok(PruneOutput::not_done()) - } + let last_pruned_block = + if block_range_start == 0 { None } else { Some(block_range_start - 1) }; - let results = [ - self.prune_table::(provider, block_range.clone(), delete_limit)?, - self.prune_table::( - provider, - block_range.clone(), - delete_limit, - )?, - self.prune_table::(provider, block_range, delete_limit)?, - ]; - - if !results.iter().map(|(_, _, last_pruned_block)| last_pruned_block).all_equal() { - return Err(PrunerError::InconsistentData( - "All headers-related tables should be pruned up to the same height", - )) - } + let range = last_pruned_block.map_or(0, |block| block + 1)..=block_range_end; - let (done, pruned, last_pruned_block) = results.into_iter().fold( - (true, 0, 0), - |(total_done, total_pruned, _), (done, pruned, last_pruned_block)| { - (total_done && done, total_pruned + pruned, last_pruned_block) - }, + let mut headers_cursor = provider.tx_ref().cursor_write::()?; + let mut header_tds_cursor = + provider.tx_ref().cursor_write::()?; + let mut canonical_headers_cursor = + provider.tx_ref().cursor_write::()?; + + let mut limiter = input.limiter.floor_deleted_entries_limit_to_multiple_of( + NonZeroUsize::new(HEADER_TABLES_TO_PRUNE).unwrap(), + ); + + let tables_iter = HeaderTablesIter::new( + provider, + &mut limiter, + headers_cursor.walk_range(range.clone())?, + header_tds_cursor.walk_range(range.clone())?, + canonical_headers_cursor.walk_range(range)?, ); + let mut last_pruned_block: Option = None; + let mut pruned = 0; + for res in tables_iter { + let HeaderTablesIterItem { pruned_block, entries_pruned } = res?; + last_pruned_block = Some(pruned_block); + pruned += entries_pruned; + } + + let done = last_pruned_block.map_or(false, |block| block == block_range_end); + let progress = PruneProgress::new(done, &limiter); + Ok(PruneOutput { - done, + progress, pruned, checkpoint: Some(PruneOutputCheckpoint { - block_number: Some(last_pruned_block), + block_number: last_pruned_block, tx_number: None, }), }) } } -impl Headers { - /// Prune one headers-related table. - /// - /// Returns `done`, number of pruned rows and last pruned block number. - fn prune_table>( - &self, - provider: &DatabaseProviderRW, - range: RangeInclusive, - delete_limit: usize, - ) -> RethResult<(bool, usize, BlockNumber)> { - let mut last_pruned_block = *range.end(); - let (pruned, done) = provider.prune_table_with_range::( - range, - delete_limit, - |_| false, - |row| last_pruned_block = row.0, - )?; - trace!(target: "pruner", %pruned, %done, table = %T::TABLE, "Pruned headers"); - - Ok((done, pruned, last_pruned_block)) +type Walker<'a, DB, T> = RangeWalker<'a, T, <::TXMut as DbTxMut>::CursorMut>; + +#[allow(missing_debug_implementations)] +struct HeaderTablesIter<'a, DB> +where + DB: Database, +{ + provider: &'a DatabaseProviderRW, + limiter: &'a mut PruneLimiter, + headers_walker: Walker<'a, DB, tables::Headers>, + header_tds_walker: Walker<'a, DB, tables::HeaderTerminalDifficulties>, + canonical_headers_walker: Walker<'a, DB, tables::CanonicalHeaders>, +} + +struct HeaderTablesIterItem { + pruned_block: BlockNumber, + entries_pruned: usize, +} + +impl<'a, DB> HeaderTablesIter<'a, DB> +where + DB: Database, +{ + fn new( + provider: &'a DatabaseProviderRW, + limiter: &'a mut PruneLimiter, + headers_walker: Walker<'a, DB, tables::Headers>, + header_tds_walker: Walker<'a, DB, tables::HeaderTerminalDifficulties>, + canonical_headers_walker: Walker<'a, DB, tables::CanonicalHeaders>, + ) -> Self { + Self { provider, limiter, headers_walker, header_tds_walker, canonical_headers_walker } + } +} + +impl<'a, DB> Iterator for HeaderTablesIter<'a, DB> +where + DB: Database, +{ + type Item = Result; + fn next(&mut self) -> Option { + if self.limiter.is_limit_reached() { + return None + } + + let mut pruned_block_headers = None; + let mut pruned_block_td = None; + let mut pruned_block_canonical = None; + + if let Err(err) = self.provider.prune_table_with_range_step( + &mut self.headers_walker, + self.limiter, + &mut |_| false, + &mut |row| pruned_block_headers = Some(row.0), + ) { + return Some(Err(err.into())) + } + + if let Err(err) = self.provider.prune_table_with_range_step( + &mut self.header_tds_walker, + self.limiter, + &mut |_| false, + &mut |row| pruned_block_td = Some(row.0), + ) { + return Some(Err(err.into())) + } + + if let Err(err) = self.provider.prune_table_with_range_step( + &mut self.canonical_headers_walker, + self.limiter, + &mut |_| false, + &mut |row| pruned_block_canonical = Some(row.0), + ) { + return Some(Err(err.into())) + } + + if ![pruned_block_headers, pruned_block_td, pruned_block_canonical].iter().all_equal() { + return Some(Err(PrunerError::InconsistentData( + "All headers-related tables should be pruned up to the same height", + ))) + } + + pruned_block_headers.map(move |block| { + Ok(HeaderTablesIterItem { pruned_block: block, entries_pruned: HEADER_TABLES_TO_PRUNE }) + }) } } #[cfg(test)] mod tests { - use crate::segments::{Headers, PruneInput, PruneOutput, Segment}; use assert_matches::assert_matches; use reth_db::{tables, transaction::DbTx}; use reth_interfaces::test_utils::{generators, generators::random_header_range}; - use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, B256, U256}; + use reth_primitives::{ + BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, + PruneSegment, B256, U256, + }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::TestStageDB; + use tracing::trace; + + use crate::segments::{ + headers::HEADER_TABLES_TO_PRUNE, Headers, PruneInput, PruneOutput, PruneOutputCheckpoint, + Segment, + }; #[test] fn prune() { + reth_tracing::init_test_tracing(); + let db = TestStageDB::default(); let mut rng = generators::rng(); @@ -134,8 +221,10 @@ mod tests { assert_eq!(db.table::().unwrap().len(), headers.len()); assert_eq!(db.table::().unwrap().len(), headers.len()); - let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { + let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { let prune_mode = PruneMode::Before(to_block); + let segment = Headers::new(prune_mode); + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); let input = PruneInput { previous_checkpoint: db .factory @@ -144,9 +233,8 @@ mod tests { .get_prune_checkpoint(PruneSegment::Headers) .unwrap(), to_block, - delete_limit: 10, + limiter: limiter.clone(), }; - let segment = Headers::new(prune_mode); let next_block_number_to_prune = db .factory @@ -159,11 +247,19 @@ mod tests { .unwrap_or_default(); let provider = db.factory.provider_rw().unwrap(); - let result = segment.prune(&provider, input).unwrap(); + let result = segment.prune(&provider, input.clone()).unwrap(); + limiter.increment_deleted_entries_count_by(result.pruned); + trace!(target: "pruner::test", + expected_prune_progress=?expected_result.0, + expected_pruned=?expected_result.1, + result=?result, + "PruneOutput" + ); + assert_matches!( result, - PruneOutput {done, pruned, checkpoint: Some(_)} - if (done, pruned) == expected_result + PruneOutput {progress, pruned, checkpoint: Some(_)} + if (progress, pruned) == expected_result ); segment .save_checkpoint( @@ -173,8 +269,11 @@ mod tests { .unwrap(); provider.commit().expect("commit"); - let last_pruned_block_number = to_block - .min(next_block_number_to_prune + input.delete_limit as BlockNumber / 3 - 1); + let last_pruned_block_number = to_block.min( + next_block_number_to_prune + + (input.limiter.deleted_entries_limit().unwrap() / HEADER_TABLES_TO_PRUNE - 1) + as u64, + ); assert_eq!( db.table::().unwrap().len(), @@ -198,24 +297,35 @@ mod tests { ); }; - test_prune(3, (false, 9)); - test_prune(3, (true, 3)); + test_prune( + 3, + (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 9), + ); + test_prune(3, (PruneProgress::Finished, 3)); } #[test] fn prune_cannot_be_done() { let db = TestStageDB::default(); + let segment = Headers::new(PruneMode::Full); + let limiter = PruneLimiter::default().set_deleted_entries_limit(0); + let input = PruneInput { previous_checkpoint: None, to_block: 1, // Less than total number of tables for `Headers` segment - delete_limit: 2, + limiter, }; - let segment = Headers::new(PruneMode::Full); let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); - assert_eq!(result, PruneOutput::not_done()); + assert_eq!( + result, + PruneOutput::not_done( + PruneInterruptReason::DeletedEntriesLimitReached, + Some(PruneOutputCheckpoint::default()) + ) + ); } } diff --git a/crates/prune/src/segments/mod.rs b/crates/prune/src/segments/mod.rs index 5e644e227e128..82b95bc07d08a 100644 --- a/crates/prune/src/segments/mod.rs +++ b/crates/prune/src/segments/mod.rs @@ -1,6 +1,6 @@ mod account_history; mod headers; -mod history; +pub(super) mod history; mod receipts; mod receipts_by_logs; mod sender_recovery; @@ -23,7 +23,10 @@ pub use transactions::Transactions; use crate::PrunerError; use reth_db::database::Database; use reth_interfaces::{provider::ProviderResult, RethResult}; -use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber}; +use reth_primitives::{ + BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, + PruneSegment, TxNumber, +}; use reth_provider::{BlockReader, DatabaseProviderRW, PruneCheckpointWriter}; use std::ops::RangeInclusive; use tracing::error; @@ -60,13 +63,14 @@ pub trait Segment: Debug + Send + Sync { } /// Segment pruning input, see [Segment::prune]. -#[derive(Debug, Clone, Copy)] +#[derive(Debug)] +#[cfg_attr(test, derive(Clone))] pub struct PruneInput { pub(crate) previous_checkpoint: Option, /// Target block up to which the pruning needs to be done, inclusive. pub(crate) to_block: BlockNumber, - /// Maximum entries to delete from the database. - pub(crate) delete_limit: usize, + /// Limits pruning of a segment. + pub(crate) limiter: PruneLimiter, } impl PruneInput { @@ -125,14 +129,7 @@ impl PruneInput { /// /// To get the range end: use block `to_block`. pub(crate) fn get_next_block_range(&self) -> Option> { - let from_block = self - .previous_checkpoint - .and_then(|checkpoint| checkpoint.block_number) - // Checkpoint exists, prune from the next block after the highest pruned one - .map(|block_number| block_number + 1) - // No checkpoint exists, prune from genesis - .unwrap_or(0); - + let from_block = self.get_start_next_block_range(); let range = from_block..=self.to_block; if range.is_empty() { return None @@ -140,14 +137,25 @@ impl PruneInput { Some(range) } + + /// Returns the start of the next block range. + /// + /// 1. If checkpoint exists, use next block. + /// 2. If checkpoint doesn't exist, use block 0. + pub(crate) fn get_start_next_block_range(&self) -> u64 { + self.previous_checkpoint + .and_then(|checkpoint| checkpoint.block_number) + // Checkpoint exists, prune from the next block after the highest pruned one + .map(|block_number| block_number + 1) + // No checkpoint exists, prune from genesis + .unwrap_or(0) + } } /// Segment pruning output, see [Segment::prune]. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct PruneOutput { - /// `true` if pruning has been completed up to the target block, and `false` if there's more - /// data to prune in further runs. - pub(crate) done: bool, + pub(crate) progress: PruneProgress, /// Number of entries pruned, i.e. deleted from the database. pub(crate) pruned: usize, /// Pruning checkpoint to save to database, if any. @@ -158,17 +166,20 @@ impl PruneOutput { /// Returns a [PruneOutput] with `done = true`, `pruned = 0` and `checkpoint = None`. /// Use when no pruning is needed. pub(crate) const fn done() -> Self { - Self { done: true, pruned: 0, checkpoint: None } + Self { progress: PruneProgress::Finished, pruned: 0, checkpoint: None } } /// Returns a [PruneOutput] with `done = false`, `pruned = 0` and `checkpoint = None`. /// Use when pruning is needed but cannot be done. - pub(crate) const fn not_done() -> Self { - Self { done: false, pruned: 0, checkpoint: None } + pub(crate) const fn not_done( + reason: PruneInterruptReason, + checkpoint: Option, + ) -> Self { + Self { progress: PruneProgress::HasMoreData(reason), pruned: 0, checkpoint } } } -#[derive(Debug, Clone, Copy, Eq, PartialEq)] +#[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] pub(crate) struct PruneOutputCheckpoint { /// Highest pruned block number. If it's [None], the pruning for block `0` is not finished yet. pub(crate) block_number: Option, @@ -182,3 +193,9 @@ impl PruneOutputCheckpoint { PruneCheckpoint { block_number: self.block_number, tx_number: self.tx_number, prune_mode } } } + +impl From for PruneOutputCheckpoint { + fn from(checkpoint: PruneCheckpoint) -> Self { + Self { block_number: checkpoint.block_number, tx_number: checkpoint.tx_number } + } +} diff --git a/crates/prune/src/segments/receipts.rs b/crates/prune/src/segments/receipts.rs index d1ce5324e6af3..4ae58db3ecdff 100644 --- a/crates/prune/src/segments/receipts.rs +++ b/crates/prune/src/segments/receipts.rs @@ -4,7 +4,7 @@ use crate::{ }; use reth_db::{database::Database, tables}; use reth_interfaces::provider::ProviderResult; -use reth_primitives::{PruneCheckpoint, PruneMode, PruneSegment}; +use reth_primitives::{PruneCheckpoint, PruneMode, PruneProgress, PruneSegment}; use reth_provider::{DatabaseProviderRW, PruneCheckpointWriter, TransactionsProvider}; use tracing::{instrument, trace}; @@ -43,10 +43,12 @@ impl Segment for Receipts { }; let tx_range_end = *tx_range.end(); + let mut limiter = input.limiter; + let mut last_pruned_transaction = tx_range_end; let (pruned, done) = provider.prune_table_with_range::( tx_range, - input.delete_limit, + &mut limiter, |_| false, |row| last_pruned_transaction = row.0, )?; @@ -59,8 +61,10 @@ impl Segment for Receipts { // so we could finish pruning its receipts on the next run. .checked_sub(if done { 0 } else { 1 }); + let progress = PruneProgress::new(done, &limiter); + Ok(PruneOutput { - done, + progress, pruned, checkpoint: Some(PruneOutputCheckpoint { block_number: last_pruned_block, @@ -97,7 +101,10 @@ mod tests { generators, generators::{random_block_range, random_receipt}, }; - use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber, B256}; + use reth_primitives::{ + BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, + PruneSegment, TxNumber, B256, + }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::ops::Sub; @@ -128,8 +135,10 @@ mod tests { db.table::().unwrap().len() ); - let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { + let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { let prune_mode = PruneMode::Before(to_block); + let segment = Receipts::new(prune_mode); + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); let input = PruneInput { previous_checkpoint: db .factory @@ -138,9 +147,8 @@ mod tests { .get_prune_checkpoint(PruneSegment::Receipts) .unwrap(), to_block, - delete_limit: 10, + limiter: limiter.clone(), }; - let segment = Receipts::new(prune_mode); let next_tx_number_to_prune = db .factory @@ -157,16 +165,22 @@ mod tests { .take(to_block as usize) .map(|block| block.body.len()) .sum::() - .min(next_tx_number_to_prune as usize + input.delete_limit) + .min( + next_tx_number_to_prune as usize + + input.limiter.deleted_entries_limit().unwrap(), + ) .sub(1); let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); + limiter.increment_deleted_entries_count_by(result.pruned); + assert_matches!( result, - PruneOutput {done, pruned, checkpoint: Some(_)} - if (done, pruned) == expected_result + PruneOutput {progress, pruned, checkpoint: Some(_)} + if (progress, pruned) == expected_result ); + segment .save_checkpoint( &provider, @@ -188,7 +202,7 @@ mod tests { }) .into_inner() .0 - .checked_sub(if result.done { 0 } else { 1 }); + .checked_sub(if result.progress.is_finished() { 0 } else { 1 }); assert_eq!( db.table::().unwrap().len(), @@ -208,8 +222,11 @@ mod tests { ); }; - test_prune(6, (false, 10)); - test_prune(6, (true, 2)); - test_prune(10, (true, 8)); + test_prune( + 6, + (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 10), + ); + test_prune(6, (PruneProgress::Finished, 2)); + test_prune(10, (PruneProgress::Finished, 8)); } } diff --git a/crates/prune/src/segments/receipts_by_logs.rs b/crates/prune/src/segments/receipts_by_logs.rs index efcbfe7611052..c9bf4a799c3c0 100644 --- a/crates/prune/src/segments/receipts_by_logs.rs +++ b/crates/prune/src/segments/receipts_by_logs.rs @@ -4,7 +4,7 @@ use crate::{ }; use reth_db::{database::Database, tables}; use reth_primitives::{ - PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, + PruneCheckpoint, PruneMode, PruneProgress, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; use reth_provider::{BlockReader, DatabaseProviderRW, PruneCheckpointWriter, TransactionsProvider}; @@ -113,8 +113,10 @@ impl Segment for ReceiptsByLogs { "Calculated block ranges and filtered addresses", ); - let mut limit = input.delete_limit; + let mut limiter = input.limiter; + let mut done = true; + let mut pruned = 0; let mut last_pruned_transaction = None; for (start_block, end_block, num_addresses) in block_ranges { let block_range = start_block..=end_block; @@ -138,7 +140,7 @@ impl Segment for ReceiptsByLogs { let deleted; (deleted, done) = provider.prune_table_with_range::( tx_range, - limit, + &mut limiter, |(tx_num, receipt)| { let skip = num_addresses > 0 && receipt.logs.iter().any(|log| { @@ -152,9 +154,10 @@ impl Segment for ReceiptsByLogs { }, |row| last_pruned_transaction = Some(row.0), )?; + trace!(target: "pruner", %deleted, %done, ?block_range, "Pruned receipts"); - limit = limit.saturating_sub(deleted); + pruned += deleted; // For accurate checkpoints we need to know that we have checked every transaction. // Example: we reached the end of the range, and the last receipt is supposed to skip @@ -172,7 +175,7 @@ impl Segment for ReceiptsByLogs { .saturating_sub(if done { 0 } else { 1 }), ); - if limit == 0 { + if limiter.is_limit_reached() { done &= end_block == to_block; break } @@ -203,7 +206,9 @@ impl Segment for ReceiptsByLogs { }, )?; - Ok(PruneOutput { done, pruned: input.delete_limit - limit, checkpoint: None }) + let progress = PruneProgress::new(done, &limiter); + + Ok(PruneOutput { progress, pruned, checkpoint: None }) } } @@ -216,13 +221,15 @@ mod tests { generators, generators::{random_block_range, random_eoa_account, random_log, random_receipt}, }; - use reth_primitives::{PruneMode, PruneSegment, ReceiptsLogPruneConfig, B256}; + use reth_primitives::{PruneLimiter, PruneMode, PruneSegment, ReceiptsLogPruneConfig, B256}; use reth_provider::{PruneCheckpointReader, TransactionsProvider}; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::collections::BTreeMap; #[test] fn prune_receipts_by_logs() { + reth_tracing::init_test_tracing(); + let db = TestStageDB::default(); let mut rng = generators::rng(); @@ -268,6 +275,8 @@ mod tests { let receipts_log_filter = ReceiptsLogPruneConfig(BTreeMap::from([(deposit_contract_addr, prune_mode)])); + let limiter = PruneLimiter::default().set_deleted_entries_limit(10); + let result = ReceiptsByLogs::new(receipts_log_filter).prune( &provider, PruneInput { @@ -278,7 +287,7 @@ mod tests { .get_prune_checkpoint(PruneSegment::ContractLogs) .unwrap(), to_block: tip, - delete_limit: 10, + limiter, }, ); provider.commit().expect("commit"); @@ -304,7 +313,7 @@ mod tests { ((pruned_tx + 1) - unprunable) as usize ); - output.done + output.progress.is_finished() }; while !run_prune() {} diff --git a/crates/prune/src/segments/sender_recovery.rs b/crates/prune/src/segments/sender_recovery.rs index 0684fbd37bd44..355d82f4576c7 100644 --- a/crates/prune/src/segments/sender_recovery.rs +++ b/crates/prune/src/segments/sender_recovery.rs @@ -3,7 +3,7 @@ use crate::{ PrunerError, }; use reth_db::{database::Database, tables}; -use reth_primitives::{PruneMode, PruneSegment}; +use reth_primitives::{PruneMode, PruneProgress, PruneSegment}; use reth_provider::{DatabaseProviderRW, TransactionsProvider}; use tracing::{instrument, trace}; @@ -42,10 +42,12 @@ impl Segment for SenderRecovery { }; let tx_range_end = *tx_range.end(); + let mut limiter = input.limiter; + let mut last_pruned_transaction = tx_range_end; let (pruned, done) = provider.prune_table_with_range::( tx_range, - input.delete_limit, + &mut limiter, |_| false, |row| last_pruned_transaction = row.0, )?; @@ -58,8 +60,10 @@ impl Segment for SenderRecovery { // previous, so we could finish pruning its transaction senders on the next run. .checked_sub(if done { 0 } else { 1 }); + let progress = PruneProgress::new(done, &limiter); + Ok(PruneOutput { - done, + progress, pruned, checkpoint: Some(PruneOutputCheckpoint { block_number: last_pruned_block, @@ -79,7 +83,10 @@ mod tests { }; use reth_db::tables; use reth_interfaces::test_utils::{generators, generators::random_block_range}; - use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber, B256}; + use reth_primitives::{ + BlockNumber, PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment, + TxNumber, B256, + }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::ops::Sub; @@ -113,8 +120,10 @@ mod tests { db.table::().unwrap().len() ); - let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { + let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { let prune_mode = PruneMode::Before(to_block); + let segment = SenderRecovery::new(prune_mode); + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); let input = PruneInput { previous_checkpoint: db .factory @@ -123,9 +132,8 @@ mod tests { .get_prune_checkpoint(PruneSegment::SenderRecovery) .unwrap(), to_block, - delete_limit: 10, + limiter: limiter.clone(), }; - let segment = SenderRecovery::new(prune_mode); let next_tx_number_to_prune = db .factory @@ -142,7 +150,10 @@ mod tests { .take(to_block as usize) .map(|block| block.body.len()) .sum::() - .min(next_tx_number_to_prune as usize + input.delete_limit) + .min( + next_tx_number_to_prune as usize + + input.limiter.deleted_entries_limit().unwrap(), + ) .sub(1); let last_pruned_block_number = blocks @@ -161,11 +172,14 @@ mod tests { let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); + limiter.increment_deleted_entries_count_by(result.pruned); + assert_matches!( result, - PruneOutput {done, pruned, checkpoint: Some(_)} - if (done, pruned) == expected_result + PruneOutput {progress, pruned, checkpoint: Some(_)} + if (progress, pruned) == expected_result ); + segment .save_checkpoint( &provider, @@ -174,8 +188,8 @@ mod tests { .unwrap(); provider.commit().expect("commit"); - let last_pruned_block_number = - last_pruned_block_number.checked_sub(if result.done { 0 } else { 1 }); + let last_pruned_block_number = last_pruned_block_number + .checked_sub(if result.progress.is_finished() { 0 } else { 1 }); assert_eq!( db.table::().unwrap().len(), @@ -195,8 +209,16 @@ mod tests { ); }; - test_prune(6, (false, 10)); - test_prune(6, (true, 2)); - test_prune(10, (true, 8)); + test_prune( + 6, + ( + PruneProgress::HasMoreData( + reth_primitives::PruneInterruptReason::DeletedEntriesLimitReached, + ), + 10, + ), + ); + test_prune(6, (PruneProgress::Finished, 2)); + test_prune(10, (PruneProgress::Finished, 8)); } } diff --git a/crates/prune/src/segments/storage_history.rs b/crates/prune/src/segments/storage_history.rs index eba8d1724242e..1ee8d20f4bf82 100644 --- a/crates/prune/src/segments/storage_history.rs +++ b/crates/prune/src/segments/storage_history.rs @@ -9,10 +9,16 @@ use reth_db::{ models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress}, tables, }; -use reth_primitives::{PruneMode, PruneSegment}; +use reth_primitives::{PruneInterruptReason, PruneMode, PruneProgress, PruneSegment}; use reth_provider::DatabaseProviderRW; use tracing::{instrument, trace}; +/// Number of storage history tables to prune in one step +/// +/// Storage History consists of two tables: [tables::StorageChangeSets] and +/// [tables::StoragesHistory]. We want to prune them to the same block number. +const STORAGE_HISTORY_TABLES_TO_PRUNE: usize = 2; + #[derive(Debug)] pub struct StorageHistory { mode: PruneMode, @@ -48,11 +54,23 @@ impl Segment for StorageHistory { }; let range_end = *range.end(); + let mut limiter = if let Some(limit) = input.limiter.deleted_entries_limit() { + input.limiter.set_deleted_entries_limit(limit / STORAGE_HISTORY_TABLES_TO_PRUNE) + } else { + input.limiter + }; + if limiter.is_limit_reached() { + return Ok(PruneOutput::not_done( + PruneInterruptReason::new(&limiter), + input.previous_checkpoint.map(|checkpoint| checkpoint.into()), + )) + } + let mut last_changeset_pruned_block = None; let (pruned_changesets, done) = provider .prune_table_with_range::( BlockNumberAddress::range(range), - input.delete_limit / 2, + &mut limiter, |_| false, |row| last_changeset_pruned_block = Some(row.0.block_number()), )?; @@ -70,10 +88,12 @@ impl Segment for StorageHistory { |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, |key| StorageShardedKey::last(key.address, key.sharded_key.key), )?; - trace!(target: "pruner", %processed, deleted = %pruned_indices, %done, "Pruned storage history (history)" ); + trace!(target: "pruner", %processed, deleted = %pruned_indices, %done, "Pruned storage history (history)"); + + let progress = PruneProgress::new(done, &limiter); Ok(PruneOutput { - done, + progress, pruned: pruned_changesets + pruned_indices, checkpoint: Some(PruneOutputCheckpoint { block_number: Some(last_changeset_pruned_block), @@ -85,14 +105,19 @@ impl Segment for StorageHistory { #[cfg(test)] mod tests { - use crate::segments::{PruneInput, PruneOutput, Segment, StorageHistory}; + use crate::segments::{ + storage_history::STORAGE_HISTORY_TABLES_TO_PRUNE, PruneInput, PruneOutput, Segment, + StorageHistory, + }; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; use reth_interfaces::test_utils::{ generators, generators::{random_block_range, random_changeset_range, random_eoa_accounts}, }; - use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, B256}; + use reth_primitives::{ + BlockNumber, PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment, B256, + }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::{collections::BTreeMap, ops::AddAssign}; @@ -133,8 +158,13 @@ mod tests { let original_shards = db.table::().unwrap(); - let test_prune = |to_block: BlockNumber, run: usize, expected_result: (bool, usize)| { + let test_prune = |to_block: BlockNumber, + run: usize, + expected_result: (PruneProgress, usize)| { let prune_mode = PruneMode::Before(to_block); + let deleted_entries_limit = 1000; + let mut limiter = + PruneLimiter::default().set_deleted_entries_limit(deleted_entries_limit); let input = PruneInput { previous_checkpoint: db .factory @@ -143,17 +173,20 @@ mod tests { .get_prune_checkpoint(PruneSegment::StorageHistory) .unwrap(), to_block, - delete_limit: 1000, + limiter: limiter.clone(), }; let segment = StorageHistory::new(prune_mode); let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); + limiter.increment_deleted_entries_count_by(result.pruned); + assert_matches!( result, - PruneOutput {done, pruned, checkpoint: Some(_)} - if (done, pruned) == expected_result + PruneOutput {progress, pruned, checkpoint: Some(_)} + if (progress, pruned) == expected_result ); + segment .save_checkpoint( &provider, @@ -177,7 +210,8 @@ mod tests { .iter() .enumerate() .skip_while(|(i, (block_number, _, _))| { - *i < input.delete_limit / 2 * run && *block_number <= to_block as usize + *i < deleted_entries_limit / STORAGE_HISTORY_TABLES_TO_PRUNE * run && + *block_number <= to_block as usize }) .next() .map(|(i, _)| i) @@ -191,7 +225,7 @@ mod tests { let last_pruned_block_number = pruned_changesets .next() - .map(|(block_number, _, _)| if result.done { + .map(|(block_number, _, _)| if result.progress.is_finished() { *block_number } else { block_number.saturating_sub(1) @@ -241,8 +275,17 @@ mod tests { ); }; - test_prune(998, 1, (false, 500)); - test_prune(998, 2, (true, 499)); - test_prune(1200, 3, (true, 202)); + test_prune( + 998, + 1, + ( + PruneProgress::HasMoreData( + reth_primitives::PruneInterruptReason::DeletedEntriesLimitReached, + ), + 500, + ), + ); + test_prune(998, 2, (PruneProgress::Finished, 499)); + test_prune(1200, 3, (PruneProgress::Finished, 202)); } } diff --git a/crates/prune/src/segments/transaction_lookup.rs b/crates/prune/src/segments/transaction_lookup.rs index f0b24ef0a721d..3203552ba8432 100644 --- a/crates/prune/src/segments/transaction_lookup.rs +++ b/crates/prune/src/segments/transaction_lookup.rs @@ -4,7 +4,7 @@ use crate::{ }; use rayon::prelude::*; use reth_db::{database::Database, tables}; -use reth_primitives::{PruneMode, PruneSegment}; +use reth_primitives::{PruneMode, PruneProgress, PruneSegment}; use reth_provider::{DatabaseProviderRW, TransactionsProvider}; use tracing::{instrument, trace}; @@ -42,7 +42,10 @@ impl Segment for TransactionLookup { } } .into_inner(); - let tx_range = start..=end.min(start + input.delete_limit as u64 - 1); + let tx_range = start..= + Some(end) + .min(input.limiter.deleted_entries_limit_left().map(|left| start + left as u64 - 1)) + .unwrap(); let tx_range_end = *tx_range.end(); // Retrieve transactions in the range and calculate their hashes in parallel @@ -60,15 +63,18 @@ impl Segment for TransactionLookup { )) } + let mut limiter = input.limiter; + let mut last_pruned_transaction = None; - let (pruned, _) = provider.prune_table_with_iterator::( + let (pruned, done) = provider.prune_table_with_iterator::( hashes, - input.delete_limit, + &mut limiter, |row| { last_pruned_transaction = Some(last_pruned_transaction.unwrap_or(row.1).max(row.1)) }, )?; - let done = tx_range_end == end; + + let done = done && tx_range_end == end; trace!(target: "pruner", %pruned, %done, "Pruned transaction lookup"); let last_pruned_transaction = last_pruned_transaction.unwrap_or(tx_range_end); @@ -81,8 +87,10 @@ impl Segment for TransactionLookup { // run. .checked_sub(if done { 0 } else { 1 }); + let progress = PruneProgress::new(done, &limiter); + Ok(PruneOutput { - done, + progress, pruned, checkpoint: Some(PruneOutputCheckpoint { block_number: last_pruned_block, @@ -102,7 +110,10 @@ mod tests { }; use reth_db::tables; use reth_interfaces::test_utils::{generators, generators::random_block_range}; - use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber, B256}; + use reth_primitives::{ + BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, + PruneSegment, TxNumber, B256, + }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::ops::Sub; @@ -132,8 +143,10 @@ mod tests { db.table::().unwrap().len() ); - let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { + let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { let prune_mode = PruneMode::Before(to_block); + let segment = TransactionLookup::new(prune_mode); + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); let input = PruneInput { previous_checkpoint: db .factory @@ -142,9 +155,8 @@ mod tests { .get_prune_checkpoint(PruneSegment::TransactionLookup) .unwrap(), to_block, - delete_limit: 10, + limiter: limiter.clone(), }; - let segment = TransactionLookup::new(prune_mode); let next_tx_number_to_prune = db .factory @@ -161,7 +173,10 @@ mod tests { .take(to_block as usize) .map(|block| block.body.len()) .sum::() - .min(next_tx_number_to_prune as usize + input.delete_limit) + .min( + next_tx_number_to_prune as usize + + input.limiter.deleted_entries_limit().unwrap(), + ) .sub(1); let last_pruned_block_number = blocks @@ -180,11 +195,14 @@ mod tests { let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); + limiter.increment_deleted_entries_count_by(result.pruned); + assert_matches!( result, - PruneOutput {done, pruned, checkpoint: Some(_)} - if (done, pruned) == expected_result + PruneOutput {progress, pruned, checkpoint: Some(_)} + if (progress, pruned) == expected_result ); + segment .save_checkpoint( &provider, @@ -193,8 +211,8 @@ mod tests { .unwrap(); provider.commit().expect("commit"); - let last_pruned_block_number = - last_pruned_block_number.checked_sub(if result.done { 0 } else { 1 }); + let last_pruned_block_number = last_pruned_block_number + .checked_sub(if result.progress.is_finished() { 0 } else { 1 }); assert_eq!( db.table::().unwrap().len(), @@ -214,8 +232,11 @@ mod tests { ); }; - test_prune(6, (false, 10)); - test_prune(6, (true, 2)); - test_prune(10, (true, 8)); + test_prune( + 6, + (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 10), + ); + test_prune(6, (PruneProgress::Finished, 2)); + test_prune(10, (PruneProgress::Finished, 8)); } } diff --git a/crates/prune/src/segments/transactions.rs b/crates/prune/src/segments/transactions.rs index 3c2ac425536ba..ed53c0306ff92 100644 --- a/crates/prune/src/segments/transactions.rs +++ b/crates/prune/src/segments/transactions.rs @@ -3,7 +3,7 @@ use crate::{ PrunerError, }; use reth_db::{database::Database, tables}; -use reth_primitives::{PruneMode, PruneSegment}; +use reth_primitives::{PruneMode, PruneProgress, PruneSegment}; use reth_provider::{DatabaseProviderRW, TransactionsProvider}; use tracing::{instrument, trace}; @@ -41,10 +41,12 @@ impl Segment for Transactions { } }; + let mut limiter = input.limiter; + let mut last_pruned_transaction = *tx_range.end(); let (pruned, done) = provider.prune_table_with_range::( tx_range, - input.delete_limit, + &mut limiter, |_| false, |row| last_pruned_transaction = row.0, )?; @@ -57,8 +59,10 @@ impl Segment for Transactions { // so we could finish pruning its transactions on the next run. .checked_sub(if done { 0 } else { 1 }); + let progress = PruneProgress::new(done, &limiter); + Ok(PruneOutput { - done, + progress, pruned, checkpoint: Some(PruneOutputCheckpoint { block_number: last_pruned_block, @@ -78,7 +82,10 @@ mod tests { }; use reth_db::tables; use reth_interfaces::test_utils::{generators, generators::random_block_range}; - use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber, B256}; + use reth_primitives::{ + BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, + PruneSegment, TxNumber, B256, + }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::ops::Sub; @@ -95,8 +102,10 @@ mod tests { assert_eq!(db.table::().unwrap().len(), transactions.len()); - let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { + let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { let prune_mode = PruneMode::Before(to_block); + let segment = Transactions::new(prune_mode); + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); let input = PruneInput { previous_checkpoint: db .factory @@ -105,9 +114,8 @@ mod tests { .get_prune_checkpoint(PruneSegment::Transactions) .unwrap(), to_block, - delete_limit: 10, + limiter: limiter.clone(), }; - let segment = Transactions::new(prune_mode); let next_tx_number_to_prune = db .factory @@ -120,12 +128,15 @@ mod tests { .unwrap_or_default(); let provider = db.factory.provider_rw().unwrap(); - let result = segment.prune(&provider, input).unwrap(); + let result = segment.prune(&provider, input.clone()).unwrap(); + limiter.increment_deleted_entries_count_by(result.pruned); + assert_matches!( result, - PruneOutput {done, pruned, checkpoint: Some(_)} - if (done, pruned) == expected_result + PruneOutput {progress, pruned, checkpoint: Some(_)} + if (progress, pruned) == expected_result ); + segment .save_checkpoint( &provider, @@ -139,7 +150,10 @@ mod tests { .take(to_block as usize) .map(|block| block.body.len()) .sum::() - .min(next_tx_number_to_prune as usize + input.delete_limit) + .min( + next_tx_number_to_prune as usize + + input.limiter.deleted_entries_limit().unwrap(), + ) .sub(1); let last_pruned_block_number = blocks @@ -155,7 +169,7 @@ mod tests { }) .into_inner() .0 - .checked_sub(if result.done { 0 } else { 1 }); + .checked_sub(if result.progress.is_finished() { 0 } else { 1 }); assert_eq!( db.table::().unwrap().len(), @@ -175,7 +189,10 @@ mod tests { ); }; - test_prune(6, (false, 10)); - test_prune(6, (true, 2)); + test_prune( + 6, + (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 10), + ); + test_prune(6, (PruneProgress::Finished, 2)); } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 15f0d8a1db8dd..3d2466b4e097c 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -81,7 +81,7 @@ where ) -> EthResult> { if transactions.is_empty() { // nothing to trace - return Ok(Vec::new()); + return Ok(Vec::new()) } // replay all transactions of the block diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 67e80408690b1..6f1981de0f748 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -1097,7 +1097,7 @@ where if block.body.is_empty() { // nothing to trace - return Ok(Some(Vec::new())); + return Ok(Some(Vec::new())) } // replay all transactions of the block diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index df51b19b8c6ff..9ac9785e0989a 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -15,7 +15,7 @@ use crate::{ use itertools::{izip, Itertools}; use reth_db::{ common::KeyValue, - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, RangeWalker}, database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, @@ -38,10 +38,10 @@ use reth_primitives::{ stage::{StageCheckpoint, StageId}, trie::Nibbles, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, - ChainInfo, ChainSpec, GotExpected, Head, Header, PruneCheckpoint, PruneModes, PruneSegment, - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, + ChainInfo, ChainSpec, GotExpected, Head, Header, PruneCheckpoint, PruneLimiter, PruneModes, + PruneSegment, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, + StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, + TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -850,30 +850,38 @@ impl DatabaseProvider { pub fn prune_table_with_iterator( &self, keys: impl IntoIterator, - limit: usize, + limiter: &mut PruneLimiter, mut delete_callback: impl FnMut(TableRow), ) -> Result<(usize, bool), DatabaseError> { let mut cursor = self.tx.cursor_write::()?; - let mut deleted = 0; - let mut keys = keys.into_iter(); - if limit != 0 { - for key in &mut keys { - let row = cursor.seek_exact(key.clone())?; - if let Some(row) = row { - cursor.delete_current()?; - deleted += 1; - delete_callback(row); - } + let mut deleted_entries = 0; - if deleted == limit { - break - } + for key in &mut keys { + if limiter.is_limit_reached() { + debug!( + target: "providers::db", + ?limiter, + deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(), + time_limit = %limiter.is_time_limit_reached(), + table = %T::NAME, + "Pruning limit reached" + ); + break + } + + let row = cursor.seek_exact(key)?; + if let Some(row) = row { + cursor.delete_current()?; + limiter.increment_deleted_entries_count(); + deleted_entries += 1; + delete_callback(row); } } - Ok((deleted, keys.next().is_none())) + let done = keys.next().is_none(); + Ok((deleted_entries, done)) } /// Prune the table for the specified key range. @@ -882,29 +890,72 @@ impl DatabaseProvider { pub fn prune_table_with_range( &self, keys: impl RangeBounds + Clone + Debug, - limit: usize, + limiter: &mut PruneLimiter, mut skip_filter: impl FnMut(&TableRow) -> bool, mut delete_callback: impl FnMut(TableRow), ) -> Result<(usize, bool), DatabaseError> { let mut cursor = self.tx.cursor_write::()?; let mut walker = cursor.walk_range(keys)?; - let mut deleted = 0; - if limit != 0 { - while let Some(row) = walker.next().transpose()? { - if !skip_filter(&row) { - walker.delete_current()?; - deleted += 1; - delete_callback(row); - } + let mut deleted_entries = 0; - if deleted == limit { - break - } + let done = loop { + // check for time out must be done in this scope since it's not done in + // `prune_table_with_range_step` + if limiter.is_limit_reached() { + debug!( + target: "providers::db", + ?limiter, + deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(), + time_limit = %limiter.is_time_limit_reached(), + table = %T::NAME, + "Pruning limit reached" + ); + break false } + + let done = self.prune_table_with_range_step( + &mut walker, + limiter, + &mut skip_filter, + &mut delete_callback, + )?; + + if done { + break true + } else { + deleted_entries += 1; + } + }; + + Ok((deleted_entries, done)) + } + + /// Steps once with the given walker and prunes the entry in the table. + /// + /// Returns `true` if the walker is finished, `false` if it may have more data to prune. + /// + /// CAUTION: Pruner limits are not checked. This allows for a clean exit of a prune run that's + /// pruning different tables concurrently, by letting them step to the same height before + /// timing out. + pub fn prune_table_with_range_step( + &self, + walker: &mut RangeWalker<'_, T, ::CursorMut>, + limiter: &mut PruneLimiter, + skip_filter: &mut impl FnMut(&TableRow) -> bool, + delete_callback: &mut impl FnMut(TableRow), + ) -> Result { + let Some(res) = walker.next() else { return Ok(true) }; + + let row = res?; + + if !skip_filter(&row) { + walker.delete_current()?; + limiter.increment_deleted_entries_count(); + delete_callback(row); } - Ok((deleted, walker.next().transpose()?.is_none())) + Ok(false) } /// Load shard and remove it. If list is empty, last shard was full or From 933317c62a9411e71a74664d71dea964ad72dd13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Anda=20Estensen?= Date: Wed, 3 Apr 2024 12:58:59 +0200 Subject: [PATCH 042/700] perf: replace BytesMut with alloy_rlp::encode (#7432) --- crates/primitives/src/transaction/mod.rs | 7 ++----- docs/crates/eth-wire.md | 5 +---- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 4266075832211..817271ae33933 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1702,7 +1702,6 @@ mod tests { }; use alloy_primitives::{address, b256, bytes}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; - use bytes::BytesMut; use reth_codecs::Compact; use secp256k1::{KeyPair, Secp256k1}; use std::str::FromStr; @@ -1963,10 +1962,8 @@ mod tests { let tx = TransactionSigned::decode(&mut &input[..]).unwrap(); let recovered = tx.into_ecrecovered().unwrap(); - let mut encoded = BytesMut::new(); - recovered.encode(&mut encoded); - - let decoded = TransactionSignedEcRecovered::decode(&mut &encoded[..]).unwrap(); + let decoded = + TransactionSignedEcRecovered::decode(&mut &alloy_rlp::encode(&recovered)[..]).unwrap(); assert_eq!(recovered, decoded) } diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index 41bc065f90894..534090eb519f5 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -352,10 +352,7 @@ pub struct UnauthedP2PStream { impl UnauthedP2PStream { // ... pub async fn handshake(mut self, hello: HelloMessage) -> Result<(P2PStream, HelloMessage), Error> { - let mut raw_hello_bytes = BytesMut::new(); - P2PMessage::Hello(hello.clone()).encode(&mut raw_hello_bytes); - - self.inner.send(raw_hello_bytes.into()).await?; + self.inner.send(alloy_rlp::encode(P2PMessage::Hello(hello.clone())).into()).await?; let first_message_bytes = tokio::time::timeout(HANDSHAKE_TIMEOUT, self.inner.next()).await; let their_hello = match P2PMessage::decode(&mut &first_message_bytes[..]) { From 05c9f61a38978eadd8ba587d3d952fd9eedcbd1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Anda=20Estensen?= Date: Wed, 3 Apr 2024 13:20:26 +0200 Subject: [PATCH 043/700] docs: fix broken op spec link (#7433) --- crates/primitives/src/transaction/optimism.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/primitives/src/transaction/optimism.rs b/crates/primitives/src/transaction/optimism.rs index 9c6dc9e840bd5..0001347b50517 100644 --- a/crates/primitives/src/transaction/optimism.rs +++ b/crates/primitives/src/transaction/optimism.rs @@ -89,7 +89,7 @@ impl TxDeposit { } /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - /// + /// pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { self.source_hash.encode(out); self.from.encode(out); From 3cafd586cf2be4984067a9074befbd1d5196a91b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Apr 2024 14:27:46 +0200 Subject: [PATCH 044/700] feat(discv5): config via cli (#7394) --- Cargo.lock | 3 + crates/net/discv4/src/lib.rs | 10 +++ crates/net/discv5/src/config.rs | 2 +- crates/net/discv5/src/enr.rs | 4 +- crates/net/discv5/src/lib.rs | 3 +- crates/net/network/Cargo.toml | 2 + crates/net/network/src/config.rs | 76 ++++++++++++++++++++++- crates/node-core/Cargo.toml | 3 + crates/node-core/src/args/network_args.rs | 32 +++++++++- crates/node-core/src/node_config.rs | 26 +++++++- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/net.rs | 2 +- 12 files changed, 149 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7d74ed3ae3502..7f286483a5b24 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6380,6 +6380,7 @@ dependencies = [ "auto_impl", "criterion", "derive_more", + "discv5", "enr", "ethers-core", "ethers-providers", @@ -6395,6 +6396,7 @@ dependencies = [ "pprof", "rand 0.8.5", "reth-discv4", + "reth-discv5", "reth-dns-discovery", "reth-ecies", "reth-eth-wire", @@ -6515,6 +6517,7 @@ dependencies = [ "const-str", "derive_more", "dirs-next", + "discv5", "eyre", "futures", "humantime", diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index d144cdef86eca..b919c8277b9e8 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -94,6 +94,16 @@ pub const DEFAULT_DISCOVERY_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); /// Note: the default TCP port is the same. pub const DEFAULT_DISCOVERY_PORT: u16 = 30303; +/// The default address for discv5 via UDP. +/// +/// Note: the default TCP address is the same. +pub const DEFAULT_DISCOVERY_V5_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + +/// The default port for discv5 via UDP. +/// +/// Default is port 9000. +pub const DEFAULT_DISCOVERY_V5_PORT: u16 = 9000; + /// The default address for discv4 via UDP: "0.0.0.0:30303" /// /// Note: The default TCP address is the same. diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 1fe25c482da97..fb47276ed9762 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -100,7 +100,7 @@ impl ConfigBuilder { } /// Adds boot nodes in the form a list of [`NodeRecord`]s, parsed enodes. - pub fn add_unsigned_boot_nodes(mut self, enodes: Vec) -> Self { + pub fn add_unsigned_boot_nodes(mut self, enodes: impl Iterator) -> Self { for node in enodes { if let Ok(node) = BootNode::from_unsigned(node) { self.bootstrap_nodes.insert(node); diff --git a/crates/net/discv5/src/enr.rs b/crates/net/discv5/src/enr.rs index 51323b040a956..5022337942483 100644 --- a/crates/net/discv5/src/enr.rs +++ b/crates/net/discv5/src/enr.rs @@ -60,7 +60,7 @@ impl From for Enr { mod tests { use alloy_rlp::Encodable; use discv5::enr::{CombinedKey, EnrKey}; - use reth_primitives::{pk_to_id, Hardfork, NodeRecord, MAINNET}; + use reth_primitives::{Hardfork, NodeRecord, MAINNET}; use super::*; @@ -105,7 +105,7 @@ mod tests { address: IP.parse().unwrap(), tcp_port: TCP_PORT, udp_port: UDP_PORT, - id: pk_to_id(&enr.public_key()) + id: pk2id(&enr.public_key()) }, node_record ) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 360bed68bb99d..5f0809cb207d9 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -376,8 +376,7 @@ impl Discv5 { // node has been inserted into kbuckets - // `replaced` covers `reth_discv4::DiscoveryUpdate::Removed(_)` .. but we can't get - // a `PeerId` from a `NodeId` + // `replaced` partly covers `reth_discv4::DiscoveryUpdate::Removed(_)` self.metrics.discovered_peers.increment_kbucket_insertions(1); diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 7332fb9eaf66f..1df8067649b86 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives.workspace = true reth-net-common.workspace = true reth-network-api.workspace = true reth-discv4.workspace = true +reth-discv5.workspace = true reth-dns-discovery.workspace = true reth-eth-wire.workspace = true reth-ecies.workspace = true @@ -30,6 +31,7 @@ reth-tokio-util.workspace = true # ethereum enr = { workspace = true, features = ["rust-secp256k1"], optional = true } alloy-rlp.workspace = true +discv5.workspace = true # async/futures futures.workspace = true diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 39f12b2981f24..85812dee588b1 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -9,10 +9,12 @@ use crate::{ NetworkHandle, NetworkManager, }; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; +use reth_discv5::config::OPSTACK; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; use reth_primitives::{ - mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, PeerId, MAINNET, + mainnet_nodes, pk2id, sepolia_nodes, Chain, ChainSpec, ForkFilter, Head, NamedChain, + NodeRecord, PeerId, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -43,6 +45,8 @@ pub struct NetworkConfig { pub dns_discovery_config: Option, /// How to set up discovery. pub discovery_v4_config: Option, + /// How to set up discovery version 5. + pub discovery_v5_config: Option, /// Address to use for discovery pub discovery_addr: SocketAddr, /// Address to listen for incoming connections @@ -110,6 +114,54 @@ impl NetworkConfig { self } + /// Sets the config to use for the discovery v5 protocol, with help of the + /// [`reth_discv5::ConfigBuilder`]. + /// ``` + /// use reth_network::NetworkConfigBuilder; + /// use secp256k1::{rand::thread_rng, SecretKey}; + /// + /// let sk = SecretKey::new(&mut thread_rng()); + /// let network_config = NetworkConfigBuilder::new(sk).build(()); + /// let fork_id = network_config.status.forkid; + /// let network_config = network_config + /// .discovery_v5_with_config_builder(|builder| builder.fork(b"eth", fork_id).build()); + /// ``` + + pub fn discovery_v5_with_config_builder( + self, + f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::Config, + ) -> Self { + let rlpx_port = self.listener_addr.port(); + let chain = self.chain_spec.chain; + let fork_id = self.status.forkid; + let boot_nodes = self.boot_nodes.clone(); + + let mut builder = + reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); // todo: store discv5 peers in separate file + + if chain.is_optimism() { + builder = builder.fork(OPSTACK, fork_id) + } + + if chain == Chain::optimism_mainnet() || chain == Chain::base_mainnet() { + builder = builder.add_optimism_mainnet_boot_nodes() + } else if chain == Chain::from_named(NamedChain::OptimismSepolia) || + chain == Chain::from_named(NamedChain::BaseSepolia) + { + builder = builder.add_optimism_sepolia_boot_nodes() + } + + self.set_discovery_v5(f(builder)) + } + + /// Sets the config to use for the discovery v5 protocol. + + pub fn set_discovery_v5(mut self, discv5_config: reth_discv5::Config) -> Self { + self.discovery_v5_config = Some(discv5_config); + self.discovery_addr = self.discovery_v5_config.as_ref().unwrap().discovery_socket(); + self + } + /// Sets the address for the incoming connection listener. pub fn set_listener_addr(mut self, listener_addr: SocketAddr) -> Self { self.listener_addr = listener_addr; @@ -142,8 +194,10 @@ pub struct NetworkConfigBuilder { secret_key: SecretKey, /// How to configure discovery over DNS. dns_discovery_config: Option, - /// How to set up discovery. + /// How to set up discovery version 4. discovery_v4_builder: Option, + /// Whether to enable discovery version 5. Disabled by default. + enable_discovery_v5: bool, /// All boot nodes to start network discovery with. boot_nodes: HashSet, /// Address to use for discovery @@ -198,6 +252,7 @@ impl NetworkConfigBuilder { secret_key, dns_discovery_config: Some(Default::default()), discovery_v4_builder: Some(Default::default()), + enable_discovery_v5: false, boot_nodes: Default::default(), discovery_addr: None, listener_addr: None, @@ -326,11 +381,18 @@ impl NetworkConfigBuilder { } /// Sets the discv4 config to use. + // pub fn discovery(mut self, builder: Discv4ConfigBuilder) -> Self { self.discovery_v4_builder = Some(builder); self } + /// Allows discv5 discovery. + pub fn discovery_v5(mut self) -> Self { + self.enable_discovery_v5 = true; + self + } + /// Sets the dns discovery config to use. pub fn dns_discovery(mut self, config: DnsDiscoveryConfig) -> Self { self.dns_discovery_config = Some(config); @@ -379,6 +441,12 @@ impl NetworkConfigBuilder { self } + /// Enable the Discv5 discovery. + pub fn enable_discv5_discovery(mut self) -> Self { + self.enable_discovery_v5 = true; + self + } + /// Disable the DNS discovery if the given condition is true. pub fn disable_dns_discovery_if(self, disable: bool) -> Self { if disable { @@ -442,6 +510,7 @@ impl NetworkConfigBuilder { secret_key, mut dns_discovery_config, discovery_v4_builder, + enable_discovery_v5: _, boot_nodes, discovery_addr, listener_addr, @@ -497,6 +566,7 @@ impl NetworkConfigBuilder { boot_nodes, dns_discovery_config, discovery_v4_config: discovery_v4_builder.map(|builder| builder.build()), + discovery_v5_config: None, discovery_addr: discovery_addr.unwrap_or(DEFAULT_DISCOVERY_ADDRESS), listener_addr, peers_config: peers_config.unwrap_or_default(), @@ -546,7 +616,7 @@ mod tests { use super::*; use rand::thread_rng; use reth_dns_discovery::tree::LinkEntry; - use reth_primitives::{Chain, ForkHash}; + use reth_primitives::ForkHash; use reth_provider::test_utils::NoopProvider; use std::collections::BTreeMap; diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 022a824f54cb5..7dfaa9c4412a0 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -41,6 +41,9 @@ reth-prune.workspace = true reth-blockchain-tree.workspace = true reth-static-file.workspace = true +# ethereum +discv5.workspace = true + # async tokio.workspace = true diff --git a/crates/node-core/src/args/network_args.rs b/crates/node-core/src/args/network_args.rs index 23670be8c3c1e..88f448ee99286 100644 --- a/crates/node-core/src/args/network_args.rs +++ b/crates/node-core/src/args/network_args.rs @@ -3,7 +3,10 @@ use crate::version::P2P_CLIENT_VERSION; use clap::Args; use reth_config::Config; -use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; +use reth_discv4::{ + DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT, DEFAULT_DISCOVERY_V5_ADDR, + DEFAULT_DISCOVERY_V5_PORT, +}; use reth_net_nat::NatResolver; use reth_network::{ transactions::{ @@ -211,13 +214,27 @@ pub struct DiscoveryArgs { #[arg(long, conflicts_with = "disable_discovery")] pub disable_discv4_discovery: bool, - /// The UDP address to use for P2P discovery/networking + /// Enable Discv5 discovery. + #[arg(long, conflicts_with = "disable_discovery")] + pub enable_discv5_discovery: bool, + + /// The UDP address to use for devp2p peer discovery version 4. #[arg(id = "discovery.addr", long = "discovery.addr", value_name = "DISCOVERY_ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)] pub addr: IpAddr, - /// The UDP port to use for P2P discovery/networking + /// The UDP port to use for devp2p peer discovery version 4. #[arg(id = "discovery.port", long = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] pub port: u16, + + /// The UDP address to use for devp2p peer discovery version 5. + #[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", + default_value_t = DEFAULT_DISCOVERY_V5_ADDR)] + pub discv5_addr: IpAddr, + + /// The UDP port to use for devp2p peer discovery version 5. + #[arg(id = "discovery.v5.port", long = "discovery.v5.port", value_name = "DISCOVERY_V5_PORT", + default_value_t = DEFAULT_DISCOVERY_V5_PORT)] + pub discv5_port: u16, } impl DiscoveryArgs { @@ -233,6 +250,11 @@ impl DiscoveryArgs { if self.disable_discovery || self.disable_discv4_discovery { network_config_builder = network_config_builder.disable_discv4_discovery(); } + + if !self.disable_discovery && (self.enable_discv5_discovery || cfg!(feature = "optimism")) { + network_config_builder = network_config_builder.enable_discv5_discovery(); + } + network_config_builder } @@ -250,8 +272,11 @@ impl Default for DiscoveryArgs { disable_discovery: false, disable_dns_discovery: false, disable_discv4_discovery: false, + enable_discv5_discovery: cfg!(feature = "optimism"), addr: DEFAULT_DISCOVERY_ADDR, port: DEFAULT_DISCOVERY_PORT, + discv5_addr: DEFAULT_DISCOVERY_V5_ADDR, + discv5_port: DEFAULT_DISCOVERY_V5_PORT, } } } @@ -315,6 +340,7 @@ mod tests { ); } + #[cfg(not(feature = "optimism"))] #[test] fn network_args_default_sanity_test() { let default_args = NetworkArgs::default(); diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 4078c29d977c9..aff0ee4c32b31 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -2,14 +2,15 @@ use crate::{ args::{ - get_secret_key, DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, - PruningArgs, RpcServerArgs, TxPoolArgs, + get_secret_key, DatabaseArgs, DebugArgs, DevArgs, DiscoveryArgs, NetworkArgs, + PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, cli::config::RethTransactionPoolConfig, dirs::{ChainPath, DataDirPath}, metrics::prometheus_exporter, utils::{get_single_header, write_peers_to_file}, }; +use discv5::ListenConfig; use metrics_exporter_prometheus::PrometheusHandle; use once_cell::sync::Lazy; use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; @@ -162,6 +163,7 @@ pub struct NodeConfig { /// /// Changes to the following port numbers: /// - DISCOVERY_PORT: default + `instance` - 1 + /// - DISCOVERY_V5_PORT: default + `instance` - 1 /// - AUTH_PORT: default + `instance` * 100 - 100 /// - HTTP_RPC_PORT: default - `instance` + 1 /// - WS_RPC_PORT: default + `instance` * 2 - 2 @@ -768,7 +770,25 @@ impl NodeConfig { self.network.discovery.port + self.instance - 1, )); - cfg_builder.build(client) + let config = cfg_builder.build(client); + + if !self.network.discovery.enable_discv5_discovery { + return config + } + // work around since discv5 config builder can't be integrated into network config builder + // due to unsatisfied trait bounds + config.discovery_v5_with_config_builder(|builder| { + let DiscoveryArgs { discv5_addr, discv5_port, .. } = self.network.discovery; + builder + .discv5_config( + discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( + discv5_addr, + discv5_port + self.instance - 1, + )))) + .build(), + ) + .build() + }) } /// Builds the [Pipeline] with the given [ProviderFactory] and downloaders. diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 8210e1a8cf360..0f858c914d488 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -71,7 +71,7 @@ pub use header::{Header, HeaderValidationError, HeadersDirection, SealedHeader}; pub use integer_list::IntegerList; pub use log::{logs_bloom, Log}; pub use net::{ - goerli_nodes, holesky_nodes, mainnet_nodes, parse_nodes, pk_to_id, sepolia_nodes, NodeRecord, + goerli_nodes, holesky_nodes, mainnet_nodes, parse_nodes, sepolia_nodes, NodeRecord, NodeRecordParseError, GOERLI_BOOTNODES, HOLESKY_BOOTNODES, MAINNET_BOOTNODES, SEPOLIA_BOOTNODES, }; diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index 2e0b77d5099f9..7e582d0812e0a 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -1,4 +1,4 @@ -pub use reth_rpc_types::{pk_to_id, NodeRecord, NodeRecordParseError}; +pub use reth_rpc_types::{NodeRecord, NodeRecordParseError}; // From 7cb05d58294531bbb564554e0f381484f1790a14 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Apr 2024 14:35:13 +0200 Subject: [PATCH 045/700] Convert panic to debug log (#7436) Co-authored-by: Alexey Shekhirin --- crates/net/network/src/transactions/fetcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 781ba0ac5d300..ab6547aa6162a 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -668,7 +668,7 @@ impl TransactionFetcher { { for hash in new_announced_hashes.iter() { if self.hashes_pending_fetch.contains(hash) { - panic!("`%new_announced_hashes` should been taken out of buffer before packing in a request, breaks invariant `@hashes_pending_fetch` and + debug!(target: "net::tx", "`%new_announced_hashes` should been taken out of buffer before packing in a request, breaks invariant `@hashes_pending_fetch` and `@inflight_requests`, `@hashes_fetch_inflight_and_pending_fetch` for `%new_announced_hashes`: {:?}", new_announced_hashes.iter().map(|hash| From e6d0d7e00b3a6469b6983f4f4f3f9c9e85637bcd Mon Sep 17 00:00:00 2001 From: Colerar <62297254+Colerar@users.noreply.github.com> Date: Wed, 3 Apr 2024 22:42:22 +0800 Subject: [PATCH 046/700] feat: add dev suffix to version if not on a git tag (#7337) --- crates/node-core/build.rs | 16 ++++++++++++++-- crates/node-core/src/version.rs | 10 ++++++++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/crates/node-core/build.rs b/crates/node-core/build.rs index f24f9b22db619..043505cdfb671 100644 --- a/crates/node-core/build.rs +++ b/crates/node-core/build.rs @@ -1,15 +1,27 @@ #![allow(missing_docs)] -use std::error::Error; +use std::{env, error::Error}; use vergen::EmitBuilder; fn main() -> Result<(), Box> { // Emit the instructions EmitBuilder::builder() + .git_describe(false, true, None) + .git_dirty(true) .git_sha(true) .build_timestamp() .cargo_features() .cargo_target_triple() - .emit()?; + .emit_and_set()?; + + let sha = env::var("VERGEN_GIT_SHA")?; + + let is_dirty = env::var("VERGEN_GIT_DIRTY")? == "true"; + // > git describe --always --tags + // if not on a tag: v0.2.0-beta.3-82-g1939939b + // if on a tag: v0.2.0-beta.3 + let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha}")); + let is_dev = is_dirty || not_on_tag; + println!("cargo:rustc-env=RETH_VERSION_SUFFIX={}", if is_dev { "-dev" } else { "" }); Ok(()) } diff --git a/crates/node-core/src/version.rs b/crates/node-core/src/version.rs index 29bf87c99e7db..868fa933ea4c8 100644 --- a/crates/node-core/src/version.rs +++ b/crates/node-core/src/version.rs @@ -21,8 +21,13 @@ pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); /// ```text /// 0.1.0 (defa64b2) /// ``` -pub const SHORT_VERSION: &str = - concat!(env!("CARGO_PKG_VERSION"), " (", env!("VERGEN_GIT_SHA"), ")"); +pub const SHORT_VERSION: &str = concat!( + env!("CARGO_PKG_VERSION"), + env!("RETH_VERSION_SUFFIX"), + " (", + env!("VERGEN_GIT_SHA"), + ")" +); /// The long version information for reth. /// @@ -44,6 +49,7 @@ pub const SHORT_VERSION: &str = pub const LONG_VERSION: &str = const_str::concat!( "Version: ", env!("CARGO_PKG_VERSION"), + env!("RETH_VERSION_SUFFIX"), "\n", "Commit SHA: ", env!("VERGEN_GIT_SHA"), From 50fc3aa24f38487d2469f50274828decc8eb81b8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 3 Apr 2024 17:57:46 +0200 Subject: [PATCH 047/700] feat: add helper functions to obtain a fully configured evm (#7439) --- crates/evm/src/lib.rs | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 2bf1f72a1cc84..47700260efb88 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -10,7 +10,7 @@ use reth_primitives::{revm::env::fill_block_env, Address, ChainSpec, Header, Transaction, U256}; use revm::{Database, Evm, EvmBuilder}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId, TxEnv}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv}; /// Trait for configuring the EVM for executing full blocks. pub trait ConfigureEvm: ConfigureEvmEnv { @@ -23,11 +23,42 @@ pub trait ConfigureEvm: ConfigureEvmEnv { EvmBuilder::default().with_db(db).build() } + /// Returns a new EVM with the given database configured with the given environment settings, + /// including the spec id. + /// + /// This will preserve any handler modifications + fn evm_with_env<'a, DB: Database + 'a>( + &self, + db: DB, + env: EnvWithHandlerCfg, + ) -> Evm<'a, (), DB> { + let mut evm = self.evm(db); + evm.modify_spec_id(env.spec_id()); + evm.context.evm.env = env.env; + evm + } + + /// Returns a new EVM with the given database configured with the given environment settings, + /// including the spec id. + /// + /// This will preserve any handler modifications + fn evm_with_env_and_inspector<'a, DB: Database + 'a, I>( + &self, + db: DB, + env: EnvWithHandlerCfg, + inspector: I, + ) -> Evm<'a, I, DB> { + let mut evm = self.evm_with_inspector(db, inspector); + evm.modify_spec_id(env.spec_id()); + evm.context.evm.env = env.env; + evm + } + /// Returns a new EVM with the given inspector. /// - /// This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is up to - /// the caller to call an appropriate method to fill the transaction and block environment - /// before executing any transactions using the provided EVM. + /// Caution: This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is + /// up to the caller to call an appropriate method to fill the transaction and block + /// environment before executing any transactions using the provided EVM. fn evm_with_inspector<'a, DB: Database + 'a, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> { EvmBuilder::default().with_db(db).with_external_context(inspector).build() } From 1f39b619d423a8f05eac2836963130a6f42fe37f Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:58:41 +0200 Subject: [PATCH 048/700] fix: use provider for header range on fn block_range (#7429) --- .../src/providers/database/provider.rs | 70 ++++++++++--------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 9ac9785e0989a..03ecee0d85cb1 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1416,51 +1416,53 @@ impl BlockReader for DatabaseProvider { let len = range.end().saturating_sub(*range.start()) as usize; let mut blocks = Vec::with_capacity(len); - let mut headers_cursor = self.tx.cursor_read::()?; + let headers = self.headers_range(range)?; let mut ommers_cursor = self.tx.cursor_read::()?; let mut withdrawals_cursor = self.tx.cursor_read::()?; let mut block_body_cursor = self.tx.cursor_read::()?; let mut tx_cursor = self.tx.cursor_read::()?; - for num in range { - if let Some((_, header)) = headers_cursor.seek_exact(num)? { - // If the body indices are not found, this means that the transactions either do - // not exist in the database yet, or they do exit but are - // not indexed. If they exist but are not indexed, we don't - // have enough information to return the block anyways, so - // we skip the block. - if let Some((_, block_body_indices)) = block_body_cursor.seek_exact(num)? { - let tx_range = block_body_indices.tx_num_range(); - let body = if tx_range.is_empty() { - Vec::new() + for header in headers { + // If the body indices are not found, this means that the transactions either do + // not exist in the database yet, or they do exit but are + // not indexed. If they exist but are not indexed, we don't + // have enough information to return the block anyways, so + // we skip the block. + if let Some((_, block_body_indices)) = block_body_cursor.seek_exact(header.number)? { + let tx_range = block_body_indices.tx_num_range(); + let body = if tx_range.is_empty() { + Vec::new() + } else { + self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? + .into_iter() + .map(Into::into) + .collect() + }; + + // If we are past shanghai, then all blocks should have a withdrawal list, + // even if empty + let withdrawals = + if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { + Some( + withdrawals_cursor + .seek_exact(header.number)? + .map(|(_, w)| w.withdrawals) + .unwrap_or_default(), + ) } else { - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect() + None }; - - // If we are past shanghai, then all blocks should have a withdrawal list, - // even if empty - let withdrawals = - if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { - Some( - withdrawals_cursor - .seek_exact(num)? - .map(|(_, w)| w.withdrawals) - .unwrap_or_default(), - ) - } else { - None - }; - let ommers = if self.chain_spec.final_paris_total_difficulty(num).is_some() { + let ommers = + if self.chain_spec.final_paris_total_difficulty(header.number).is_some() { Vec::new() } else { - ommers_cursor.seek_exact(num)?.map(|(_, o)| o.ommers).unwrap_or_default() + ommers_cursor + .seek_exact(header.number)? + .map(|(_, o)| o.ommers) + .unwrap_or_default() }; - blocks.push(Block { header, body, ommers, withdrawals }); - } + blocks.push(Block { header, body, ommers, withdrawals }); } } Ok(blocks) From 54f75cdcc82125a97ffd82952c2a8bc8ed324b48 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 3 Apr 2024 17:11:05 +0100 Subject: [PATCH 049/700] release: v0.2.0-beta.5 (#7442) --- Cargo.lock | 122 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 62 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f286483a5b24..c142385b984a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2307,7 +2307,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "rayon", @@ -5747,7 +5747,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-chains", "alloy-rlp", @@ -5820,7 +5820,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -5838,7 +5838,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "futures-core", @@ -5860,7 +5860,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "assert_matches", "futures", @@ -5896,7 +5896,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus-core" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "reth-consensus-common", "reth-interfaces", @@ -5905,7 +5905,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "aquamarine", "assert_matches", @@ -5929,7 +5929,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-primitives", "arbitrary", @@ -5944,7 +5944,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "convert_case 0.6.0", "proc-macro2", @@ -5955,7 +5955,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "confy", "humantime-serde", @@ -5971,7 +5971,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "mockall", "reth-interfaces", @@ -5981,7 +5981,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "arbitrary", "assert_matches", @@ -6018,7 +6018,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "discv5", @@ -6041,7 +6041,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "derive_more", @@ -6065,7 +6065,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-chains", "alloy-rlp", @@ -6089,7 +6089,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "assert_matches", @@ -6117,7 +6117,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "aes 0.8.4", "alloy-rlp", @@ -6148,7 +6148,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-chains", "alloy-rlp", @@ -6183,7 +6183,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-chains", "alloy-rlp", @@ -6207,7 +6207,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-chains", "alloy-primitives", @@ -6222,7 +6222,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "reth-basic-payload-builder", "reth-payload-builder", @@ -6236,7 +6236,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "rayon", "reth-db", @@ -6246,7 +6246,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "reth-primitives", "revm", @@ -6255,7 +6255,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "auto_impl", "clap", @@ -6274,7 +6274,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "async-trait", "bytes", @@ -6293,7 +6293,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "bitflags 2.5.0", "byteorder", @@ -6315,7 +6315,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "bindgen", "cc", @@ -6324,7 +6324,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "futures", "metrics", @@ -6335,7 +6335,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "metrics", "once_cell", @@ -6349,7 +6349,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "pin-project", "reth-primitives", @@ -6358,7 +6358,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "igd-next", "pin-project-lite", @@ -6372,7 +6372,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-node-bindings", "alloy-rlp", @@ -6428,7 +6428,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-chains", "reth-discv4", @@ -6442,7 +6442,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "anyhow", "bincode", @@ -6463,7 +6463,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "reth-db", "reth-evm", @@ -6476,7 +6476,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "aquamarine", "confy", @@ -6510,7 +6510,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "assert_matches", "clap", @@ -6575,7 +6575,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "eyre", "futures", @@ -6596,7 +6596,7 @@ dependencies = [ [[package]] name = "reth-node-optimism" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "clap", "eyre", @@ -6622,7 +6622,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", @@ -6642,7 +6642,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "futures-util", @@ -6667,7 +6667,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "reth-primitives", "reth-rpc-types", @@ -6676,7 +6676,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-chains", "alloy-eips", @@ -6729,7 +6729,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "assert_matches", @@ -6759,7 +6759,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "assert_matches", "derive_more", @@ -6782,7 +6782,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "reth-consensus-common", "reth-interfaces", @@ -6797,7 +6797,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -6854,7 +6854,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "jsonrpsee", "reth-node-api", @@ -6865,7 +6865,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "futures", "jsonrpsee", @@ -6879,7 +6879,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "hyper", "jsonrpsee", @@ -6914,7 +6914,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "assert_matches", @@ -6943,7 +6943,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -6971,7 +6971,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "alloy-rpc-types", @@ -6982,7 +6982,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "aquamarine", @@ -7024,7 +7024,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "assert_matches", "clap", @@ -7044,7 +7044,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "dyn-clone", "futures-util", @@ -7060,7 +7060,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "tokio", "tokio-stream", @@ -7068,7 +7068,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "clap", "eyre", @@ -7082,7 +7082,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "aquamarine", @@ -7120,7 +7120,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-chains", "alloy-rlp", @@ -7147,7 +7147,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "criterion", diff --git a/Cargo.toml b/Cargo.toml index 52251dff289da..5393b2a5f8df9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -150,7 +150,7 @@ unnecessary_struct_initialization = "allow" use_self = "allow" [workspace.package] -version = "0.2.0-beta.4" +version = "0.2.0-beta.5" edition = "2021" rust-version = "1.76" license = "MIT OR Apache-2.0" From 0ac29bf214ca37cac94495ddac307f2b3d172e18 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 3 Apr 2024 18:32:06 +0100 Subject: [PATCH 050/700] chore: change README.md and release images to beta (#7447) --- .github/workflows/release.yml | 2 +- README.md | 2 +- assets/reth-beta.png | Bin 0 -> 938648 bytes 3 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 assets/reth-beta.png diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1ee5d9706f346..e16e2c1c3e9fe 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -126,7 +126,7 @@ jobs: # https://github.com/openethereum/openethereum/blob/6c2d392d867b058ff867c4373e40850ca3f96969/.github/workflows/build.yml run: | body=$(cat <<- "ENDBODY" - ![image](https://github.com/paradigmxyz/reth/assets/17802178/b906d475-a594-4eb9-80cc-222eb441fd08) + ![image](https://github.com/paradigmxyz/reth/assets/17802178/d02595cf-7130-418f-81a3-ec91f614abf5) ## Testing Checklist (DELETE ME) diff --git a/README.md b/README.md index e3d7621d6119e..3f5e434eee88f 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ **Modular, contributor-friendly and blazing-fast implementation of the Ethereum protocol** -![](./assets/reth-alpha.png) +![](./assets/reth-beta.png) **[Install](https://paradigmxyz.github.io/reth/installation/installation.html)** | [User Book](https://paradigmxyz.github.io/reth) diff --git a/assets/reth-beta.png b/assets/reth-beta.png new file mode 100644 index 0000000000000000000000000000000000000000..61b6c24460a0cb99cb34725403e018eb485bd43a GIT binary patch literal 938648 zcmV)DK*7I>P)Px#L}ge>W=%~1DgXcg2mk?xX#fNO00031000^Q000001E2u_0{{R30RRC20H6W@ z1ONa40RR9A7@z|H1ONa40RR94YybcN0Fdxm;Q#nUFt5yHfFa2-*ueYnKYTeAr7&!Lc=ZGp`=FSL4aB^o--3h45aO8-n0x-bLoR9yn z|L6aHwA$mxkDuN={rc;#Z*FcLJ%04dFTcEe@$%)XmycfOCuWaPd>+kA{CV>DNfUbA zMy|i!eoO!B-XB|=>FTUKe)9O{33Ge%)mLA=c)`@ZWJ}UeVuqQ0`;1zb2tPL4_Y&{* zBXVgHq+Cen_Ej=XzDJzTWjQ~2@`Qu;yYIgH)1UtI?D?~wX+Qn^>eZ{x?~(6A?qziz z33OFEW)gE$x{MusChd*+Ccmxy^_1Vz?EaL@Nqo$K%Zh%&nd-^S6V~waXU|`}oRigW zo}u3*mph9orAM9~J+G|d$1&f+h%KXdDF{NXr?2*jeNM|ypFaKS=~p*j z-FW5o?ELcet5-Pdg_CfA*i*upjT2f=+K3dZ>*;Otj7t<%>S0goFiU@PE34TyT&>*3 zl#SNaP0T37n~MmuR~|z**7oB`-Aa%7)iaL!Om%1d<2r{nz65ermxlmDtpqggOqC`0 zIrG=Cp*x{QPaFFIRWxSAIpLQo;dG~tC7{VPqz(+W8KZoo>-E$XBhq&eg( zhI!D3B;98HBQdYCIfABalM+&rO^caXoN~kz32PY;&R5(i(Ba5^@tjwa&l@Ef zTb4i#7$Bi5DI>2|A)_bShso3eM^lh+#X4@!Xs2)qw0JN40N1LH2|H|JJpE{seFZy& z6pbz7qd$NDmw)-|zx?_8KmYv8&;R+K|H*5sXV0FU_^f=`FP=M{RdWT4rgo8c#t!+K z2XkI+EK``$i-hyP1+g#Oa>z*9Qy$B`bU3U9ve33l9Wx6^2 z$5w+aC0DC45+Em+ct+OpQ(mPHF9AA7td6waK&^t(0dJ*WzcqEfcAt zFej{{&UU;|E)3)EKHXe-)z?Mctb}v}D~4%vkrvrB=ll>qi7Ya}WMP?9Gm38lOaV9K z@K}WQg{;0coBC(a{vfZ(9pM^|SEBC<)O%Ml2uYLPrXJeK(;IwVS24vnGP$o_a>jDb zUcG*mJZs9lyrO!;M^(yBNd|itWh#lBwklH{O9xuepTGusQYB*E-vhPGWRv1D^P)NsZ5#(_h95+_6>A%RR4_CCbdQm~Wom zFv^wJ+oDB5WBg3#t`-7OvC;5oP@b9X{I0Cd@>*}n;rs!5QhoCH>#x4%fyTf6+rRzm zzy9lg|M!3Y$AA3CfBpA=nd2q-4-o&;K$0?-qb%n~oT)f?UC$4^?b+&Cg>1TnSjAs| z^Yy>~`@ett?YFGt|NZ~|@4x@;@7(zI{9K!J>Ko@Qd=UMe_;0eko#Soa+cv)m`+*E@ zbGr(}X^I~%NuI1;yztV?GfC(F%J9qZp;h4VBc4{#zW?+0Y=8Ormmh!pktbKZ06ny1 zew!~#ou$n6h*LDnmm9#olI&8R_mF-Y=AXvmH%|=@x)NVbU=MowzL5V?fmYz3=s^C6 zFS+)~|EjzrKi!<#?X7q23UXK4J9+5R@mAK`y^`m}g`EygD4H$tSvg)#HJe!G_xufX&WEnJKS` zsz$GyEw*+M?2@*_w!a5pT=r({QxJqaDhelqm`k7~kWf`MDtDsWcY}}DLYpYs;c62z zjXu(qfd^NhIurA#;=xjwzVvK!*!XMgzj8UMX(m2)C z08}+GPS@9e1u(yoR$JRVOdYR*iMu#SF41CKIm{Cxd=j=6CeAQEVGJwwdZtZ2G8@6{ z9k0i85$O`o7J-L?lBO}9>|#+EoTJqz?dY7WN(TM#F{aiSH;O~vyfz7i)v=w3C7@IT zS$M3+Y{p7;j9#i5PfK6pIBd<)(R>>nxQD&LXh2 zyvk6**HM^_hxv;s8I>7Mkh5#=7)O~5r&Er(3&87BzDswvwIGW~5F7N&Ly znwS$NWE#hKm9m{o7t~>PQeoq&nW@uVzO)ogf|&!wc0lD_=UQl`YlPV7%Mq z*5>%evE~ynfluqdh#@UW{MLdLA7Vz*S6u?cOx@wCTY;gp*3Gy@qD&jNV|#R*i?o7F zAk1RvHot?6pG?+F#ilN7Ym?`1bp|pAS0I{=G>+0W!RBUsnxYgTBf6eKCyu^!C7iDP znx!CmA2k~wcdJ^lZDCWwFb_7=FO0JO6QuYWHB>CkbVaT~^og|mm<+Wbn=zD4)6#2kRoNYEC;sP=)g6E?_51fkG zO{ZG7@^GD0mHwD7iHQw_%!Keao?%LWOdfEj#hwAkgQkyz*)Y=Boz0H_3}iSEt4fpw z5&t@9HoY3};h>4vGqatdnVpPvYRT51jiRV9x)Pl$qkrGBf_!t>-Tn;D5?Bour-n%>M^o z)m>w}L^!`nK-eMM-S zPoR=uA`C6@?dD_hJf!ydLQ&bhdi3h%kslJL!H{A5W!OI9DsN3H>=c-gSecQeKkjRH z%v7>>YG5)mxr`F}7@T^Im2s8Q*gUAAn;^L zj>wQbi2GJ{EESTsep}=UFdlj z*wb|KP0vH43%6VnIvbaW)~gjUAcKNtmDISZHaK0ZvN)$`!MWmast>k--QmcwKc+B? z8Psr!f{6Lr=)|SiTE?F)y$~2&C9ZFpc22Zk^jSmwV(t;4UVsT&qmf2k$LWqeR@jfL z__}v^-fGq{APwPk%*>4CF+m6xvvl=szI#YVsDUlmFh`|Mj5ua7)m^b}I!a?9L~F=! zZ|qIEv&@?05vrwZ)qp@D2u_`V$+B5Fu$PYrh#NKu&?veWq=x0dvQI~)iDWQ&@~%Z7 z74Nb)Xf=yS{X$HJM2eri_G4gQLA2?pw<<@|H^MPBa?uhOjsO8AB;u-jNJ zz)8exJKc6>Iq81U_A*1-{#jDf37r9{@zD&{!HisI7lG<>Y^fDLX&}3I*QsaV617|r zs?4wp!wfo{ytQlw6F>Iw(Xj8EsW2Qk$P;m*BaAAO&_NH7tuRVFB5_XRDq`XiLSmAV zMY@G#w9F6c!ikUyppHgp(FVc9cYRdpaEByfiWNdwvvJYo3%NNHkW+;99LcsHbD-F@kD~t=avtHP+)2~C8gzLXysg)6h zwQibOy&W_7F`Z4PEzz?clS*j_B&*1YH6ZCM=0&NEGo{fcW@kgya%*dO%XfZmPAW z!};as{Q5_Coi#VG4AO_J(P?`IM*xkO=g&oS0qLo|dkdhOlFa_}16{KvG3HuUu@Ao^ zhY(N}yF|#QL91QsKZ(I$O_gn_#|% zHPMd+FQ}H-lol%Q*b_+cJPI&i%@+U8a-4Giy)ZE}t}u#-ZHozL%7QO)ejKWp)?T)5 z`cPv(+AxKcuG%Q>^dD2u4q69Q|ELvFO3E0K45VS>bNM6@mOeeG&Gfmszn=|^$<#8 zn9oenYn6(yLc}1d6pp|0@G>~DV-mO=mVasj8|PgVoW1zHvnkK*DQ~;N2!2 z0Q!~Sqwy`Ek`NvavCQ7#9 z-JtaAQhbvlC9_CT(3zb;5m_LHnsq!@G0$RBP+e%pSS_GJ`;WGq?X`bMmc^DNoB(!0 zd#h?22-8o#zU&)zp9rPKFD!utrKD zd~Y~ir^dt@ihUBXG%I(yp&)yUpvr#R>fl*vwFSh)L|3c9wp?Q2w*3~qx6Tni6{kdL z?+yQf19eg!N)lJ=WL8e5m_DzKX%`C=l_!j`n3dc${7`bfi|6fw2&<-fRif=ZMc88t z(*ye?x;Nc3x7*C`4b3k^{%8eGr{xX7M9=T(sNH_ zFobHob@sLFN!tep1dnTOw!P|V=}TTejL$Z=-0X4%P6T@bvOnLr8X~PBVk$PR&zqD4 ztz<@3FR?VP*V;#@zX;vOOB8Knk``ZjgFMouQZe{>$4lPPDYZDj=Mlj6I}J?}yQ}O1 zd8Q=(dfFA3xJ^+-u=1Mt#G%nI%*sKK^3m!rzHmwGYUviU9ZLeZbOS!p%zgz;Mjx}5 zOerOBhZyco0cyDb7AvGls@o&tVRuTi!oY=`JP3^-3rt1utWg(UQig~U!Bx1+V8N*q zBGq8u?*LhWDL_}3gZdu+Lw@50DU86;q3@!lpKo)^c7KRw5-K=7)6|FSo6fC8!o=SyQatrGp%W=`nlr__JFYm(R zx;rt=CU}XRA@00qOf`@LV~|wJlEz(<`mk4GE_su(1I5ai7t!`|6kPwccPptxwo5aNf$U~exvpBrLwqBv z&-zdYfvy!%y83p$!E3hGu^?Ct{ls@o49HszSOiI*C|pj_k1m1G8sWWVzstW-CEPPDp)4=8B_b7V$I zd^>Wei}>j$dxbG8aw-+b(W29xe}w8K(jZv{`l&K5+a?|gu}u6CrjJT|IR-ANflxjQ zEw?qnjQ3?lG?!?q2-k#-TP!PUgs2*zUR!C$=xM`BDn|x0d+3M~UsdT!C$%EW7OG-M-Stl9ucGBMQZVp_J!1f#FJ6*d|i3S_!$Q)yz%{6xYf%D@DiQ)6_DoY9c1ZvK`=+ zUil-8rN!v^-+&gu?UW$X*9h1*a^2}~N&6kHt<^m&7U>jz;zGHtA2cCwwOeW@OBDGr zxjS&Iz6@)!hNxX2gzBe8hJDf%&~cI$NRX^C%(bYF6i_v9 zgj|G`*N$EIM8GB}k;PZZYPH<`562yI(O@DPww4zU!-3u@DMAIbMn<9f7>_G9%8D4O5N^97nLGABxsb06NxgVb=+pk&JP0W zo`XZOCx(lRK;ksiS_cI&BAN&xz`l|~p;}3#2ZBiuB}0?VqY$xeL$!m`P_!jvZ$%q5 z{r>&N;WbFhI>Wqgr%N5}tTZJoozbu}Yh@$4q9m@7V&On2$I0bToovz-O~^J_rK{@1 zYAJ`n7-uJq#=#}NplxnOPw_X}@acbzHqsS+a63%CCu$)nzqLlnFg|9Q33X#jT=(l= z<=COpV5y81ft?0cnJ)53vouUId3FxO89zIk#(P7gqXo(e*&aSZ$1lsWGa<&5Zf0a6 zWc8+ul?QT1ngkju)Kj|2#;0c~PYKGvg%N8#v(7hG2uMsD2}@aF?Ze0D=|ncYC9o3a z>6U(;Pa9>4B3BiYKHAf;|H>=jbyys9>I`>vZ!^R#E|Ric>whYp_dBnO*6OhpR=>b? zK+t?TUTrYZ(w}r>WPcoXM8}h1SzELKrg}}*iVE9$<{dgrL{?+Rz9Bl@dQo~|aDvBC zL(RiaXaAw9P{R;^v3cub#S$_zEVS4*?Cp0UNHPrk3Qshu zYUVCQ3p3SK)l;PUYLzBvX=}OGKgu8u&7&-SX~;6-9rA)JpL(^8k=6U;Ayn@)mN<*$ZTheydj+bBo&hg zEkXhj7P3+@V1G@bB5PKGRPVYpBO?ZZB_$YQVlAf( z`va4qZmnbO_xfLoIO~L<{W){ZXbOQvnkJA5b9-i&5-Dv*E(oXvLVGpR64 zXM<0qhlKzZ`WIeCGI_adkXaBQEyKB2IH#xYjkFV1jc$;*u-QULN zs=w5IH;6^8OBL8~ zhuNEY0i}LG^^?DqC!_{vGtf#>1bVVsl8Gvv@rnc+nn`bBSeU?$T!Z7e&(2|c7&Hg( z{Jpz4Pe$@jn{pO+w)N^3BmCsI>~-yEVJwZ|nwkgwesR>{A-r=Vitfo#ww zFE^iAyH9H;JcAEkKF}S1Ey>`Fm9#70j%PFbYbmbmBT4BRka`3L+;C_y=%#~eQB$&u zcZ#Bya;u_G|HYD6d^@Udp~ZRWHA`yxq!glvBR$~^tPY9$V5`wcP3fga*8(=#YU!i8 zlI+seJ{VK^t6(9sAsPa`m<)t8AqUpeI%Hw`26sODsLf&`NhQHkE%B*qRgVGfh27nTrx>u|%|(+Ck9WvW=fBDl#0)EuEALNmXnumq1$($ttXt39MO!wF@)jVvPl5 z<^lV{3F$;Lq#QnZ6;B!{9WcI**N1}pJptsC!sbJVi+&O{^7qpUF>J|?b)tr_}H4np+tMJnZeoMduO>t=y~S; zX7tum)kHL3=kB+59m50R_j-PG`#Y&_;Rl_~0cWLoFwKIa)9BvFtyO0~&0XY~nK;?= zzY^gIYK_KJ%eh;~hi&WcT`|2$Fs7qju}~4N_qvM|5#kto4I5+KCo|k#;jhe=(xBrw znGKoRhd^1xE~n7kns2uOFGv!S?K;!S+1q?uO{Q)>?c!ZPEu&oNcVL<8)Un$`LToYD zkI3`HO;PX)YcBbjptY2-2;DZ{GlCv5XotST#go4BNhY(I{ub`gL-I5c2IZ%r7vOdr zwyD;DlzsSwbz{$BkW`w1<|&DO=?_1=T$`DN8aFrk!y@2XXPWAk(q~g2wgsALO*sOT z{0pU#m`rKHJ-P8+mP|}$IneM!sC>VB53-U1cSjxd6yM?TAunMMQ z{rfaFfMdNacPcp6*qu=|@u9jQM-bdT4}O6zg3y2HI6= zKge-S-;KV;O=CAt8>><&mleo3iBU#wmq2;QjLCUeGq&5Hodzvn`)s6>0PuJ$I8C{DOvx1si-IIK+wK#EeNhBNqdRr*GwdJP($FhkeL9(eM#{48$ zq6H>AkW~zjHo={~qr{S{ovsv}{fxw_KvwKvFU-tXC~j@+1u~F_M?)Lyq&krql0p$Y zUfQk@1Jz;#L%w2XH;HH52yx9E2o^-MvWc}q;E15GY-&;UUt&4?Pbs*TS>a6 z-P)`{5V8^!PBv#>VRjMhH3Qt&Bn2wc>e4MTkb_FK)w)aMWEX9tdLk20^YDYFf^!y( z211M>i*-gyXo&;Kcrwr|PRX>k;>5xS{jn)aVl`@$993X1gEWvJ=kvd?iFP<*f}~?J zn_@~bR8!A7nS>Vsjb(Pmi+KsdoGou2Xf zE}Hfr3L@2MxSWQKc&L+fhi(~~oUA%DRC&-t6jeFZCS(oeP%7ypuU+=g-h^TjGI`@E5M zAf2YTbgT`nS-H2S*zl@`&aCZb)=aHqj1VfBk02Cvko6R?-;K6|Y0{3u2nn^`VMQPF ztRmhD%WP_sNRDCTj3ltLNiRO)%|NS2vl!zHPNail7>xk(XSMdv+f+aUguz^=8itsi`twn9NY!Zbu%4@v@JaM@nyOCQF7J37U95+L+*Z; zV|}|_;V|eTnFAc#rLwbK6yJ5P>taeoVgl8S@ZuNEayMUZ@P%G$vSB*Tr&%XM*|t9R zL?25Xg;i>A0kk8LwED6=tb12~f<07t4JJJiTDJ@wmb0CtxrepfLppVwvWKC7t$^12 zyQ6Kk)!{DYEo0@Ohgnl=pxcPG6xPWYuic0HZj?Y&p7Il5SQ;Dx8cQ_T3$`&4-;u=NWbWcwV#iQ{*ScfiUIMI!es)=(gxx zKQP(Uia4?52~KN81S!xIr>V%mr5R&@GI1G->gu!gNNjHzZzMC9k~^GU%;b9Jpa?t!17Atsmhnp(G!7<3PjoQL8lz@k7gKAR_)i5Mg~ zvn4Yolt+Y-DW2zmQmt(5oOlh^KROA@Zpygo6iul#H%T`&oKs9j^>Sog+Wg*$O~CZn zO#E?NL$xbz9Nd(f80+MuSdz}_ru%aWSZ*M8E8-9qSeB@B7Qd7#VEnGHx&F|QUNRz|~D zPwCl^@Sp10DYSX_Uo10n6MN&WhQnkV$sPiwToiHoQ%gfv5jM4~qaUDYBFQ^%r=~W6 zk!xsAuK5<3@Qxd)(P}+D6MIs2M@uT{B===nNh)8_+DdPT$k4>HnY>UT%G3WOjq$e5 ze~X2_S2yJ5pOv<4OsmNyUQ%g^HcmUCG02*xz(F)686E6WWNg#a!f3q&7-b@lS#gYn;f+N?W<3Y3sbMtVd$l>dcobuD083b$&yf|ZqgW8 zlGC`pJr=rZTt}Gay5TNHLvK<**m!YYCUsQvg^x`*2yo~f%nKj2Du_iB zCzENY1Qjh>x8rlnX_PU@8rZ`R(U}Dn=*}^c{(gvX_Xo3y&$7NhwBCWq-MM6WgW9D? z))!0q9l||O*So89O6E5WPPX^DOCnw5_nPGwa&J)q4qh)0z0`3mug+VzJ~WeczJmAd zBk}r;Gz!tuoG!z;BrlZTRjJ$Y?sf6{swQ>1*E~yZ&B$+1Jzh>}xMzgU?*9hD=cWE# z3;5xs&UE{C#b$21>yxt2;S_hdn6GE2$M3J(T>IRQ@#$D*%0BZY`W7Xp0<(fKrZ`sQ zbQ9HyehZx6Ycj`6q9>-eod+41I6YZm*{E7~y4s^>*gMjUbJIG4k~y4nsqM`}&GNpQ z(xE-35}*x}#Ya)1=aE#1p*s1_LduC%`&MNi@LfEuu$TppHGP9z zwjHt2_i&<7x1Gbezd+CybDKHKE9(j|5Ii|na(HE#HMH#FxNL~QIPl6XmSs96)wO_9 z{aP60+{7cBf}DZUFZkY`>SCPyOQ~jKR^=k&;gPUb7y%#r&lBn-yYkS-H{LQo_R;!O zN24i-(zM9d0$3E$AQ!)oW_lD{VJ+n=%GA&z#Whq%oR65x+(^o-#LfQfnEtuNdtxrdSMiAyB>(13s=-1fzBTVJUUL&L6RzdB-$<%CMmgqscC6yeqDmrO{=yAzYhp_15P z9Q>hzNoY>Qh3FzrOqEktJ(%l!Z6b3W?Bx;qgIu!FKp>3qwH;}SCU9t08MZMjPCc4O zzpf-y)523%R~S3rTvDEI52t{MWl|cZVQ_?VpmYlll`LddRekps;VwwPF(WANb(H+P z=~3T?E;9@=)PFU(6$D8-Y*lt62aBINjk6kOlYQm=D}Z{Znk!k$5|~cCpml6-ZEu@x z`-B#*{FV2|(BU0G;%X@+x$6+BN>-Efvk<#Q zCA;VtreKi_Fg6Kh5RW?pxjmo%2goHs!)|l9NI44K%QobjwVH(4#jhil8wwkn4u+tK zWAZa>Xldt%&h)LqIIvUOl+ul65R`|%p0+BhZQ(HKu%ITKbg-DGEW2k7%gHD*Xu(53f9CL<|VC~~Hn~)510h)APhdAj@ zUZGVQR-tFYPV3E+8-n^GfgREP_VO$kZ8Xac5ZN*~7%*r(DmG5yCMLnf32F>Wo>*A} z-6lj>3H4DLj6%zNZrAdoQ23xhc2*vAT|OTD>CHTR(D)x7{5)&TsA*2NuHWAzv8(i( zmiNsdN9l>wow=S%bh3Rg-8++d1A7W`F`K`~_YIKuL;TPu@z<+(IZd9Ryg~35!Q*)4 ze!bd%9rK=RNx6A7ceYys^VPq8BhAYdmsKv=bxV?z>Ajcc;wbvKrQE0NK7wR>WS6=e z!;ees_eIN#n_qKT%lcv+EC=fU38!GO_}Kf-EQ#+a{+x{sihAtedd#2HvbH5 zXSqA;9QWX|-;48_`93F~QhTHEwe!OsU#GcC@d47>vAlQVO1RG!Dt!G{xvJwb86%Xm z-0*tQ9FjHfy6nu^?-(oCPAK{+_N*JY|Mj}P zXluZz2v^2CQ#z>l<^P$WpFau|Wy9gEg*E|>DmfV_ZHx9;JQsj-gqzz6_5>?&?d8Xu zNHk?h9L7yIHjY}dl9(`o9e70(%Zha!7)&5FX^ssTK*iuW%g78KGv*v0sq5-K@1?2L z-ADH64WvTEA-wFnIHOorNp>W4ucSv)Lhmf4AE@PQ@@5`_>BD!g{{#<0?2~Y42Zc=} zgEbUJCTNQXrM=^^vrjnc3ta$*9;3Z9-KTFhb`rawE5LCL z)=G0HC3^U}K^uF7YZ)X8u*i8S=N`knoyn?pa5NXgNc`{2N`o|iZIprRqCrUWz z+LXHuBrsf_9zFIst;l_=d-9?dyM+;B{*W<%4E9~YI|uKqy}0p#2vXoXr6W(QBL$av&iXTth;)d04SNbVb)bw1Nt#SJ7~Kz zW&uN3d4<#O#0lY#VZdlWxAvFUpod9Rv6EFrtwN|oAYAQCFVQ8+AT(nRCUfX%s32ny zv&2h@%D}Pu?!-tKgOqB69m7;fJDV8o*8ov-&OXsKOOaz&D#~Kmj#JQ8v2aIyD!zoo zA)H%6lOp>?l1mc*N2zq`6qA_e=tw?NOZilWe7%wDx3D&eYBjW&Y$K3D+}-7{EOyPWJo8=)PH|xyqy}XWL2;r=YLme+7Yb(0i;njZ zXNoMbAgDJ@+d`na`Oo|!AK~iz|D=lMzw&Vy?TRyK39KfQg@bl+9B#x7xQc2^DZ_&` z!?b#7JdESRXc^coLKz^ZvqReT=!-*iVI2S+x=`uZkE+1iVFJn7C8|8<_PQied{-G$ z&!qq4^^=#cU$TYrJj66(qsni+rs9(y3XJWufT0J1ls6eHh@h9ky8EBcWsOC+C$z7D zk5|G6j_&WVt+I!WIYD8y^oQ|#4Ed>{n)1vGc_(#pc2+t)Q;bhd^pC|~3(SmYd-Y{% zx&NiTeEibKRwRqoO^r0)#c4;;18r*(&xxEf{T*p7sNdo8DNK({uLyT|O!B87_aS0_ zu6g}xt(awh(@qEOulhV7<<-(XGhYk%>D5A?ili>v{;=7ickX-$%(o%MX^Y>9?#l~T z0FLfk6kn{Y^1Kb=r?dHu8j{w^$OPA%-tM#@gO)`@|ESWZOYo0HhWQ&EO>7Q}$@h(> z4^Tl$J`Xt(bP^GKzHKMFlDp!ukCtdT++J750mbT?GtFLS z?YJGY+F;fPI3V|kSRKaqS>xlB; zZ`j6-)tUt&u3yj3{h~t%k;4V@W>%6##=8^9Fa?y<$xkiz8dth>0jcW%pG+gcay_b6 zCB#HtGZoz9sZip?^kPn@$P*>IsRO$L*HhwIH!G=zGwvd~9gDW*Gb}+%FjK%yxJx1U z+N_8;(xUQ+N=vjl?!a-$b0|T>qPl&I38}jU^5OEp1~Iy%l!6puob4gP^p_azA;542TPLk>7n3SpE8#WBWLw?G#*6pS5nu&2i z72C7aDMGBoQxdOeiWm?r^x|;#NFJLpE*V|hVmaL$FABn_SPVV68r%@>8&$H?$FhLd zSeXrN-g#oYSm|p&P%+w@n2?mM(Op)FCi=Fs)3DBB8bjk=Td5vC@U~nIk?FoLk<(vD zeSs%_92Ivl6sL|BK?55(Zp+%ta3fC8U{Xh#D73gUTJd$Pcon%cV{9RCNdLSoR88E* zX2-S@r!Mcxi+b++y-Y}D%k`{*FgU?$<@zlfel`%y3#oOPEBmSDc0Bi~7KatSi__t2^>^5$Wfebmt~6 zgOvedelXA@HY}c3FLObvKwR}f^&GWSKnbRJ@zlp|Jal&2I;UTP0g1h`>0y8WOk68{4DWW=lT^ZLLH($F~onbe-`t( z1$u&uvP;rlffFIM+C&e#^mD9uXL&asXq8huG z8 zT(V;m?iy$oJ~@IV)=p5@s`emUl#D{Zkrr{v`0Ayr0ylYJ6KE{4jm*x`0I)t=umJUp zxLP3(qpds(&;1Se?j2yAiO^{Xwns~UYW?oP?hV4MTIb4K<;C=6b>AC~f1JoVCMT~f zeHWOGGk97CPPjj(#_+=Zq&=X*o? z-HBu>$?@Hh?h(Ri?R646vmCyBLwbI=Z28}cdkMQo%-@kp65lJ?7o2NmTL<8uQi~nM z*ITO{r?VF1u-fMD*7-zrCUdIL!+2ML((Gh%lkyg4wC!-8JIorteMywQ(@n0t67OsA zOlxaWOS3r(?{jun-1dq}rdLXb$t(NG@aU_4*v*;cubA|Fc5~ycqs#g@=9}JL^HJ8wGS)1%LM>7;1w@i5yCg-8M*l zYnSNl%5&ZQmKeh$=<*UA2P|{zZnRNJ*p}3cX4j`w^w$SblW3w88P~XZ_Nk;U_%g(C zgNo)8L0KLbT*F9!^Hskz2H-ktX{+|s1ZW^W$jkGBG#_YWM!bwqE{6{uGeMXyx`HIH zfzS1S%xYsY=4mU*bPZH#RE--=>}y+w^sScpvhSIhG;eO6-tgf|zviJq;RiNU8*U@R z%7CehjFyi{O|iOCE@t`WR?lCV2%((14SV`}HRWol4jL zo(q|*#PMRH$&{>-vxIbHk_cD=jXz`>dA0^k=|pP{b+e&Tl%Gmk5(M>9IJT>E3Ut8U zUjH$3PLc#5F2&ROH)z%v5ldqTxU~I=YYsltP8Ml^)tuRjDG#nJA6Zo&6cx6L7*i(m4)g+rh>#JHYjdRn#&OamtHgd?!voqsiUta6lS5PP;9VQy&&sfvCUCRn^^?*yP;fvYAf&P?>(px#gVG5Youk@U(t;B}fT}(&fTW@|6Bg z)OkYQ`M{{A;gX7ltpM_}S~-WrD*X^9W?dcG3ys-0NNBb;f$B9V6)vBs4MS54fWf|ts zPNbC~1CMJ-iV)WhYX(A30^oRr_Jq(#U5PFc#Lh1%gTW8q^WfpuqP%+xr09iADIFs~Vx_XC<#P2_GQtCxqm*Y;-br1#={`l=J75Iki0W=9-?RJE=6+ zOs?15r#IaF`QnP7*EgK9V*b$6elvg195by}CfzN0vs*NCnEr;FI zZe0&U^5HLV5;>-?={&4y`aR2K=FXkx=civi{q|4a(&4au{^GgUP1>m*zX$u?*}i=B zvR7LY^kF3DC_eVhhtE38o~NDZr<}RiSrIOo*4E=g$E zBk4ECXPsXrJN6GS(@mKao<4ob6#w;K{`&0MGy0^sQ%1fm@9x}n_TObWE)sV;eT25M`-h(shA4~4at9f}f@;c;$%dfxw+P`3uKl_08 zoa|noS0I0Qcm+tQD{05t)7CBh?u-rGm<3F(whGqWGA4Ph)I@SizUI!yD;}h(UgsV0 z$4_3odO_|KqxPP=LckDt5U(Vd7~p;|)WXZRe3pT}X%Z$BaxH?1VQU+&>EoM6GdRs{ z-tuREeh$MBmz&4FmvgM)49YGcS)#TyHmcFJg=YPoAJ$8H(YkV|I_!J zmVf^FXBKwdUgx94(%`4`%=R?h@LGS&ZR)cO$VC?ld$~f2qj6$r7W{CY!q(R`(lkgD z`>uw;usUN}w$?|liCZ$*r!l!KOJ0m}{F3$NcKsAje`k7HRwjM6GYXQXUXOhvO!8qaJl9LB&^lT?;jV2UmXx&|CHC3)|yUd`IAhX$J6 zPBhJjQzZs=&f(Tjza~-y-+1trh+i{y&rQ>F*>J{_6F-4vhvU^&5{4sIwD3x3#p?E(JDNfVRIN?62o{~((ka5oQFq`I5&U&^;h5j`Okm;{?E^! zKmY0HpZJCcKm8O-ZP-$ZI1j7T^D<38C=g&^Lwd(G%*$lk^d-1e7HL1Ma%4IawoSQj~E z(Yr0gG`*C}uwbaKI>K*IftG#?8+gp7+TpMDyc?h5Fl*fNe*4|GeF4dx(Jv5+)|vnS zKmbWZK~%r|@{%vWpYt=GDKGvVBk1Yt*iPaxKX07Mbyg9mc8m2=9db2Rq!N4M(!5hr z)vDSru6$3q;RBTES=tGD_m)5p(=6s*r54Evd{w81ib zcr%snm)EC3OQz1_US}lel-j=e=9|C%^{+3Vzw{{1OXyES^20~vO7zRKUye9EUiX39 z<98i~Jq8cstKM2XSK|9K{Q9e}x$|YqYi4GacObaa_kD<-URVq1oe6)Sv&uQdd+)QP zz3xDrvuJ+OUHx2)-h0qb1xT{~@)p|Dr#zke{`>EL`123XzV?~3xBI6vv-v;F)-$l& z$$)cmPE$OrLf+$-XBylC(r6_nLpH< zL!kwl?Y%!?U9e@4>1R)xOd{LPESYk}`tG~$nC0hBpC1#Qo3WX0OQV+{53?nMdF@5w zCh__a)0>x@NonVJTKo?)*J16)dU9U-_M30%FtbeYip2NY!ay?M{QGyia>?Hf>H~lKti9UwBah|83cfr6LWHX{fLkD@bmRIUy~Q}%cBAA zB$;FKr2=OC$}74OYVb6V4gt8CFU@GC(M5g6cs*w%3gw;CU~QKwK?LxO(Q~F)hSomD z5nLOX3)*b=f&_^5DHH&C7ftBZz@+gw!1vK|n$q4d+E0{I|19ya%@HB^iD!pmgtIOs zmQIn~rqpT|opHICAIYzrgjwci`B}fPmYoheI-gfc5&fZU!TTKdCs+!XFRQ7SGVYVKXFhxo`Xa3(E{&T~^G_ORQwI z<#r03j!>0!S=Rj`Oa>>H3n;S)!s@^wOh!noBLqmnAD0tEXd+K#n+icua3;Or;G zPDB~ui*exe=#ePyqt|@6;k)nu#48ON_e9TrdB(~nUuL{?S=hR)6YNSW-nEd~YK^@{ z?oX0U=Fp$CV_aQQ1inR6c1Sw8GsQJ;nAT-wdJ!5RZV6bu+!uZS=kI;t^!hc){_XF7 z(luql9j;&ax1RAY1&p>eb@Zbs^X_?Pr`JAEbOIv3n~_Vdf}5%C5c$`1KTQv zc0vFoCep?9}2TL^JIW{kaGy z^AFIypmX3~@jMi_dv8FvZTLYX=jim;d|t$|tW#m+oOPL8w#>3mBY7T~H-;}%SV~&e$K(1wB6(-V zRW{G5SlHyq=XLnV?4oL6cO1pcDarCi3%NWL{%>D1lH#!K zG%V@Omz1qgB z7u^UoUW=1Hn9M9EXU@!=pZ$6k52#Cb8302Moq|0EVPuo6n(6tdK0o?wtks^R@FEJ!i>JdD=p)Nuo5%mFfQ<& z!@B@=D~I;Zj;mN2*w1IMo;`ccjWJOW@@k6vCjTznBGi&loxl#K;GgNuQ5F#S);P&( zmf;j?sWjUSGDOfcK9z6Fw~=q-`p(uPH>VokK_MlV3+JM2IEZ}-ZN!P5#gt$attzx@ z;)Epesuado>E_pt0<|k4@fEpF{3+nou;_)_?ggpWInYo#=13Ag!*QRVoUHoc!f*L> z;kVzBEEeB<{UaZo_GW?C9?}w`tikjgNDIS=LtHig?6(mo1BIySzLd2chwzM3s?l(P z^N@-E`0bR@HWI$=3t|Bs#>E1XENH#tkP8Clm3In%_~8fsDhchUAAkJElOJ*6<+GO^ z=Nxx0E?oekrO`Pz><}vnwS#kLR8@v&uix8W35}nZJc1^ykrP_U7?{){U63}?&0F~H*+8qFSK4g;gKcT{;z-iUqAft7d|%22eSk~mxM5*xlUTX1ztf+ zqDet;(bzSx%v-2SuQhUC>?;YwBG;h9#bPa_RA5Jp4bjIjES&i%cYs*m{P=e}^VgRC z^S}PT|M-u`%&&)S-bgcLS4Yadr%z~qDnEBbjd6;UN$i}WxN4xm=G1U$g%4S6M4Z?f zETLvas51RC2z@`36EpfuWL^aNJpit}KmEiPXY;53im9FL6zyVomTx{0IEP4BoNpWxCZsf23s#ak00|^i*GOanZnZzU6=1#02^mI zC&O#i`x>8?-){uH(=tM?UtEYdKQp<1|DV5eruymCPxGe^$$TAF?~C#$#PRh*(jn77 zegCH){^bWxUymOB{KS{gHcz{AaoNYU^4rIV(tF?d8$z_FH@u2uRr7g{zy0lRyqn-3 zHlRJwq~28Z9EKdBG_FD17I87+RW#?U?;d~W+yB1QT$B4IkcTDD`G;eahs+Rff@Z+X zrW#L4duQbN?$smSG_L8krYN7=Qb^7{IeYTvsHY&WUUw1ff;`-gr>V|aW}1G_*Sw+1 z<=JmL3&w0J+^vA1p$#5hOst|H^MtL3jn5R7HlA*W7Y-tZFn_nZ_DR!bB zdDyamdGWy&noOBxPH?`}l!KSM!Op6c=|wK%3V$ueebtfmi)TLUV3|9!{G^#^8WQ(2 zs;r9DzpYxDPrvUXEknL_#M1yiz4h(4eCUHLIh*q$tZ%n;_IoyeN7@7RjwtW$o?Bnm zDSTMQEMQhCD|r@kUV{VAlUffrmc4%(wU&x<^SQXI-!zcV&3XJw zxI8_SHrk-Ovxb1X8})qDH?sW>+4eX}7yEjhU6AzQ6o0tVx6q*U(({#ty0fk5Sjs0e zVie!IJ6tA5vhUTrW^4Mcz-x1OCq#pbd|CikjDB6)LS4A{BO*2OOIvsnofXs6`nL7gn5{uN@+#4i<(K9vc;~Jt@#f!&&{yp|2t11t{ zG~vh43oZR}&+JXGZK?54Fy=T~JZqw4)@KIGJGz`r5{`OjHY~Y#VXsEJ^LZrd)WIrg z!A7G*36>_QL-vnf`xjgb>w5}bfw8w)-t|uZ7Eb!H3G}Sz8VFUQNpQS!)U>~Gz#sZY z1@JU;KZGSVmf_C0PpC@ROT@HpQ8lE~)7bC9XQA>Hll^guffc18R?Bj$~g$^I0W`yO8*G zme+-$kv1_akb<)ID|DNB+No;Mkbu?$;@ni0Z{M3%?*-wj-rmwZgKSjeDqwUk^R&mMc z?-#M#5}`%93@1n^qiQje?=7RIwJc>Jt}CthNXBtlR%^IQ*v~00#1o+(@+L{%8KrqQ zl%*RB9Szizd&9MqQSWeP8sq5k6F#|c3$j|-Hq*+Q!jZ@w1zOJUum`{1EyBLFhl#JO z(%u37{<%7lxh}9mUOnc%9*V5zdFAY@wTIdEQest~n-lC0Cnc`9T!1)o`QaMG-&OAU zih_7fZp(T;13b{aC#am6ANyZ+dLH(?uukujR7&G!w|1{;xV87P!{<55%Tsh3mtE$U zn_n77F*0)O8Akm&?^Ukkp*1EqA7SzA%)(~dS5R5$nL-jion0SVeh1$kj!Ys`$))z$ zi)XxI=EW63$}vlJtYqKp=LRqjZF;2o4RIGS&0}}XcL$ku zh0EX){K1aaLF-9uy|N@33dFML8%qy1w>J{!eErht?Tq zH$vWHg$elM#W+2(aou|+Ym)bv{2!wB@`eXJFI>aV{nHlo?IdR}*lD|SL>SE>crd-J z&(1Ql+Ewk`uA}=+6Rk_yn+)zr1ti0x;E)dg zaWpsr7pf}NMq_??ZOw*B=8s+Q*(QY8*JbkUvQ6$FH&BBLLEB}K;vQ6!Vc*F}Jvm+o zHzCSARAhd>=WlUNU`4JO(DRY2<|T#oQ?_AMsw`JakwUf4rS`?^7v$c_l@i4%Kmik1 zK{&To8JScqrw$P)TMsEFi$vV8?SP{hyCh!NN{AI{uvgHn0>9cuD=~$OHrkD#%?bnp zax#7Xgl`FZW~P-tavs&GKZgS_xs;nww9djRY-4Sq!_>C0){VXR-0aKLZo0~=dK8;T<^AbC0Vz8Hu#?okRYv*_*Drsvpdk!BT zWqSBUh1ifbW`J^*Rs?L+B>%n_Wm!0aAneoBwj8kWBZ2iisy_N+ltkHYFda8fkbC!- zRN=Rf<>;yimM-y`aGF`LqMl;)zN>UI^I@BmDtxg(9B@1k2 zxA5Gf+S|%2*|FxtOSUeO{M61=j9&XCyVwu7rSOMgud_Ur=D1~L_26|)^EX-{BNr^E zVXt3PvzWkx&36LLkFh_6y`Z6~f)rBEjJjJZB#|mNT-R9F zk6-aEOb;zaGZS0>onW zjp2Nx5+=zeCt)di{5DIYhgxaVjlvOV5iV1`WLTHLtcn#dF|?rJRQr{teivqmQ?qd? zqvc!SX)j|oNNBTD>XlArynLQE)!Epy!$RvgVu zS!0>a3>A4)3(c2j`~07*%V%e|I}N>u?OY9O^ZvK4viX6YS7A0*&er_if6z~aXkHwE z`QQRG%dGK-&^agbJPS2-2Piy<0=k@FKSgTAiHYT2_wF-{iJmpG!9LA}S;(9-#Jz8>CBd;pl& zAw1{d)yTZ=WHyguJSd;Tp`E)UrkK=bX32^px7l8v%6`3c_g~xpQ`*CJ{Y`vx%_-cJ z+dh{omKW)~&gN>x{UOWP1DUp7e%(D^l%cm?OC6Zpm$sM7d{+zGX=krhNL9-~SFV)} zot1WI>)UxKcmq{l7bNRthnNiae8o)oe2SJ3q{*e)H`ThVW3E9h`*h#%HgN#l!i3fj z7^M;kjeCcV=<#sJZ^tYMS$NrH?EL1-^1bGY zu#3|Ki-)aL7W@R6IU1KOtRvf9_9Y?W*;qI#5b%%-v)Y{5w!eIbM)Cp;{@~b2Ne4eJ z_ID?7_*$Cl*-yjg>kfH^EvrUm2N*Baf>=uQuiKL7wVSErs{?~IiXJntU@=Nrmk^F` zIjh}JC&)VZhEu*~1frczP+;u#=}pSmW8_=`Tu62%7Y3Kyq$q-WTl;KjVcJDfC+pk; zDtb2|b_fmZbgUyH!%5j0YWmHzOT(hXAiF$U5*AtX6UbwJo6!-f2U)4sEN2EXWdx;_ z>R1fPW*S-YyI74A?Tnd{jJ!K>Z#C(y>Qz}uW7K2Z@TZ4@qLizchQbQXY>CwVVx8&BU{o5~w&mIWO4V>7pk^%1 zM%ZqZhl6HX$jxnSjH7|Wk~F1CJZMSW?L551%F=YUW4>kqahXpcBTwjKB~&HT7++eB z*x>$#!%WxNKI~$<(TGd`9KBdcEAPz{~Hc}T4i-qfXtwJyBFczh@y~bBmfwHVlg_#Apve0O^lrWN`m{Gcn ziF)L#7sR*#vFT?h`Jc4759DIhraF_|gF^eh4VjZ-+&7+7JA24YB*Sw?mFt!CVv`xK zB$uQx}hLLa9Em&EWS zQqKkwq-EOmx?&S;!^JLzT%{TW6jQm#r72B$;xJ(~~N}%$*h}3~~oEKF2JEgpJ_!C((xec^KJ3M7xIo>;ZD{?gi+|YN; z)Tq``)*+-0`C2DeDv5{5l)}JssBX)l+9Ja#(^N-hmh)9D`{K&^;6(@37OB&e5SebH ztlU9p;zHTWN=_fg3K#*-K*?SnI@pHl@;2OuYvmALZb*|^?E$~J?&w$N`7M7SotcZw zLascSTP`6q7W2bwCUt6Aoij$|klpLod~U_beeD}*oSRK~uDq_d6Tk!LUZo71)ZI*lqW!-X!0S_psB%nNS`zJH?M)`*)R}{ojE-x%)YySSw{96a@Ir=?q0Dh0leXu>B*Wlyqr+M|_hnCHKp!)6{n4_I94>ag= zcyk{2Mj{6~DZ2o6>q6Hj7r8&SGB(;L?T~^gcYc{@Pc!TxY}gokKzEOeM!HDAw*x7#YF zUi!yo3)eG(U4D+ZoN5w0cLmsT?$JBF(t?52s)+FI5=AE zeht_nRNM1O`r2f9(9x%=RNlGEOXmV|?ydWFVJI>_i3L2@T|SfOYi#Dpc>ZG0(3laV zOTqpm>OKIC9=OPG&XlbRbl&x2D)aq=WTzn)(_!DlQdK=&BGG1rnlmaYoIk|n zk!!Jh?wqV+N{^}5(JTM0dZHk zR?}~kGL~p_vo{9rXq_@^V3(A6O~sbx5e&y#pzZlzON)lsUdg0qEX$ixrX!~Hp~WRd zbF}0siZ&6sDbFtQC|0tySqPE|;kp!|b#EQVJdkpL)$ljO9H;rR=0p@t8cxM&js&>X ze%;_sE#NJ^QC4vZTnT+ip0q=|E9x#v7;ns(*JN?sf9YR4eD%`*n&s)Uul%pk{2QMA zMvHUiAAw9Y?K@GKjx6IqNmr_oOOj3{2z}%&clG>u#MrZqa|B3&fx)C1hk;>tJ7{2> zorIgJdI>TRuOe8YU^1n=k9jt(*W9KYb;;ZN^)o zRitx(Ga|*Yn+&qjWOURl)aqI&gH1ZAly!>>0aCM?%{GK}v90|U(ji}de6Yp;m)1}1 zkTcnPMe^O^)_lrk;(krpSs-y|GN5*?gWB_AoAerfwpiZgxh=MKLTm+bnr00KQtSc{P^*|aBv~N_{Wd`;Mt`KrfqJUNjq#8w)Jm++tioPSE_9=?y1{Fh!!yZ zE!{>a+EpFS!zuo$6h$Z*BKm6i$zz_<_D?&SanH}|UD9Diq!QRUrk?#;{AOF5GXW$! z!D{c`o)})ZFZmYk&M>0PhTn(G>PJ4YoQi3@(+?l+{+Qq%+LI7VPM4lxvP37EPHsT_;t=g z&tCmQ|Ng0e&dHvsS>3y(TDOa~^$(@vi7r!5f}EDU?d@LzlAMlPV z5noavS!XsEASO4WAOtW;hziyjv>~YTI_p%#aL$_%J8DOH?)Ecaw4OStFNjxb?6j#I{=J^YKW|4L(D-O6KdS2 zA2bbacUCODwVtdEW2%B9S>STavx*IJSdM>(YtDnhNkL0teCiWR*bhiyq1P1q?CIJFsr>s`skU&@PX@B^HHAm$yZ0FF&- z3%4YD!rh!tvgx8SyM&W?>Xt=i?xyKaF^y$zwuv&a6dFAwjlQgqh-mRuIxKTBjyjEb zY=W|t^&9=D2xK^f6Q@RHm+0F21ORxSVeO@m^mRJh>Dc4$Z5> zEo`T;%%W^llEc13XCl2MGi4nAP`}l`u)(C2*LIN1D}${n2HTzf?jWoi5-+~+vzAVQ zgW%Jdo2wPeYA)uoA44aIw`DpRJ39D>WH9?ic@2MsOrQQZF3@# z5XsvIDj50~QrB&v5N7302=Qr7&}zAUIj06)21{8;?F&+aYfHaHElplFv%b<89mIzjZ zXWWRpTqQIOzHvw(119&{27R5R|98xTkQ0M8zeSHXrT19pRlL`w?#o(wN1e?L86MFF`bFh zS4w;{tncsEz+l4ejDpP@Ve|P65;yzAEp1DYAL{1!4SmTG1Po`&ElZ=5Vv>@{|GQG#bj@%OI_!+t%lPu>(T3OPLiWIGq3{|tq-!z!-Rjev80 zEr;y+_euHKl5YqzBgcpoJ9%y)OPh>{-t8=cXNA2ls`xbyV7Ft(=hCax;B%qQ4zg4e z0HpnwbIpw13r~kT|A^W?=lt+%3;yUYSLTXFotOPk?dS3&Hf)_y;5XKr z2Q|M6TLz~5Q)5`v-UmHnK<)NzeRN$+KRq5M7AziYpjjFM9> zXU+;9+W+s0{VUm9;E$Y(V5a0f|K2_x8rm(6-J>T?;baU^Hm;o_?H(Y~NG<2a7(^DLsiZ>Zla`DXk) zC4EOJ6?+3PxSTysA%7U>i}$#HgTrUOeqWrP^PZ9$r+g}~x0;;Xp6I+kbM!KJ5&ns` zZ|Ltu{KIFn46k{Pg{O(nieqVedEayHzVY!{VZJW(OJRG?*5+?)WMx0Ms>P?<+??jj zc(=?M>R00IH40!UJ_}FiezoSVn@DcIdoNA+dO@T9Y6m1I1@1?1EoUAjv!{yIC*P?Q z0U3h9*arP>yyrzUrAE zBI71~a=Y+aAheo{KrYBiV~qi^tSfEez|A-(Wq}f~DJdVykYv7@o9z4|%IVt_f+YS# zWc~%7>DXtRKrYuVJUbQw;WZ{xDGW5VjRr+{BKngVlbXutqNU+olG1uMDY^c*V}V8_ zcxahj76>OM1{LYgnwVB9p9T61X-;z)RA{SBOg`h@-=7|~m*tebGZ`9e?om6Z1~t|( z7=Y-cleQG8w&>V>0#}k4PmQ%rng$L7h0YmNhZ^ivI~K6W*jZ+7;Dn>ZxTtjGbNKWB z%z>ekL6nwLNp@%^%4)_tR}0zI+b~j0u`#S300&mUab>%q9P7Q4)O#!hD)Lye2qyN7 zf)CHxwiIBRt};Vq&oRfR7OtC;jsa&4N9+0z@LFVb;cwMl3^yZ4=ot@-Vjhu#=vh8!yL3$sp_ah4V8mqM;LC2g_fk%DlhUMnBky@Qr6@`qoa)BqAPvpG9)1fo6=7G@ZkF@>AJh9B{ zImX7xbO;Eu))7`B-IHZ4A}L>{GjS$mxQkLg>HI1{M6=gkj#}%|VxqBTohF1K)W_bo zv^Q6aNY2S2WQy3YV)Ts8WkBVsO=udVYorLqqS0f+F(P2khoDj&YT{HYn1Dg69nz+m zBquJ5f`g|}cL=S+pvdcsbGV?J7*OIT$V~Q%Xk2Yt=RuOCDUX4q7`MD+%QkF9uxQi* z)`OpXMFGO}jZ`@{dRHF>kCT z5foJ_1+e^;Dy7!6&J%(1EhNwS(o-O`y@rvNhGhOpqlqqZ)z3C-@;i4ij?a2%!ynY& zTI&g)<`3peRGnIKmanQ)Vs^JjyZ{P0z*}d=by0l66-B;2w>H_Q4i z;LWHHYwS}zMfL2rI@Vu!q`3PR;4fUM?@rEDeHK*g8SdHtnhp5gS^gTew~E~1c#fJKFcEddkuH~##`q%Pww=}C;uJ5zv5i3J0@)z*zM)8cZ__5o^iXz z4j~KhW8lzb?_as+B-Fu*ZSUYOJ2ws24sj|F*%#-|A!bnqd5h&fzc1+|@X-=FH+k00 zQDfJ2;R(mw{9`l4o`#P4APol1D zMhE99o?=&$yz>ciT$K+7UgZ$DYx|Pj{ImBX7q+$q1)Qb?i_#Qp!um zSm7DW5Qm1C81thxxC(XAAc+7$9g=(w=PCj7t)eNmuvjcaJQXpTtLyS)O^X;cJKKy(%AOXrAAqc5b=Kt!Q^8Wqw5}zQt;qBz zSn(h-$8Pn-*7TrXsgg zA$we14`wrAe?-=EU;ys=?x~lV5U-^<83wzaNUHYqwQ18Lo}|4_Hay$cFbhO{M&%-T zPiS-13u5iN7)iPkeq~I~d|Qhbu`FPgn`g1#OY`YGy=kxlM7m$)`9dQJM3V3$L{ry> zlv_#V>a2EK;$8-A;LlEy?Em{8CBs6uvzb0p2+7ARmjF4ej;`3XAq%?Kp{4IrV^m27 zrxK<;Hw>Q(c)Ek9z5SB};1Q*sWdFqF$XR=?H+!dILT3WGI_3H(*g#K+)?zyG4QSSn2jL5AZbC%#Eh%UoA@$Cb7&e?zi8mgNWn zW^~2bfyqBHjgh0Ry&yMrhI4kzBP80cjVZDX>G=u5L2)KL4cFU-mKuK~O*ts*Mm^Os z#>$!}tgLN*k_nEI446csAq%^E+9I=k&KG#iTq~TJI^Ht)==;9GpYZVC;PAwM)wR>UPq}D0J=d;zb<09aJ8-tw!cv1f zSIOtAed=7{mV8yzwyg8>9p}mWj_{t}%$Di-zFpY2C;zzKqWSiz|3=^;`!+*h-zE7r z!*|0?@Y&DwX(#VFGV(1nzWtCl+4sus>nq~ZuW|vLI$vK5$92D!UWD8+ci4;8yokDx ziN7T9{)o=!<+BUh?mAcV+$nRn3x>B+d{-bxZh-~(Ceo)O#@aYgTMXB;nxG6F&Db+u zBSnb?lFd!hMPgI&#+SvszZ0=kDm)jIb8$ z3g%0pVlb`XK|-0&ht^||f#47oQvU7y1QL~XtfUv?`O2vTydOwy?4e7+=WCp@6u<{AFg0lO zZ|HF?fpX?#mZX+;I%`|XK|7g6??N%jMYgU-MKdZQ-i+F5luX$Y)p9sj&}N*=DF6sF z6X_z`9JvaVy5yJ@Wo)Q6rfiaLQj#nkFobb0&ksJtKCwCEp-Vt-3oGZI^2KySm;bBa|pzOGSc(pZbk0^4n{ zxmj9I{UEtGC$?el@l1$^au(TiEhF}m-JH!Z3ear2fQVf)@@zOl!Vo*qDU7WaCKt5; zsQ5+3Sr>EDcVXTvJDPfAFGF1flG#L>Vqp(oxsisk`r{A}VgpajQL|^qKF^bD)_2ac zhE^DaoAjRgZtpsCq5z+T{VdOkeE~L6gT`}5Lb%@%NeO_ZZ&x2aL9R&PdGKBT0iy=y z?La#gYsPeR*EKTNkp&eH+op2d4KPaU>;FX&k(ef3F0`(UD^uC!>PuqJsAsdUr6RUi z&v+Q~$k#IeDGGDnNTMd5eab@0wT`jWElj|0E`C?Ad9` z6ie50{hL2y1an|g1B*ZX{%4g+#S~HDv-i|x-pjciXt#mex-6=USVvXOX1#!O zCFd7+{vUpI=kaENtHER(71))Nsja(09`|jdrv(L8Fod*=tHZ{TC2n&RMI=>W9~o@A zC4aFY(G=+Sum3EN)-KdY}xUSo3^GePOH5c}&vFG;-on@=!D{scfJ10*?f4c*Ft>(*7zfQrZ z@pt@2T3@P8FLL$garNbIcl%=zJiabd^*3I}KbG{rp{7;ub$^I{=*{0_$4=}=TjW&w zMaegrD-NUMd_C@k%`IoftA=dc=R0yGZwHS07nat3yq|Z`7bxy2{==u1{9E|jRr>le z_`H?Ar~G<}X7%=U{Ni-CEg|exalbZ-Bliw-rRnkcY8K@0=;QvJ@>y5=Mn_$ za-Ut<5{B8=S^=!U-9I`p+@~aRqL$J3nlMV^t@^Bq=0gxPl!pra%9Vr%StH6SM5<*& z8h6)vm#_$^&X+-B$wKp*e-6@L%;ScY`f_E$vro*dyG=r3<`rwe%^Pb;K*AxB6HS|r z;pm51@yx&HjxVIt!pHd?*tW(H-i^%?FZMQJf>1Wd?Pqw_ebzO}Ab(op3y0`%^=vJ@jNAuMwYSutKk)w&7zIZOSf9kQ--A~*ql-~vPr_*<`a z)x2blBZ$-)1BNAQ&n%MpBg>p1+Jb4r$V*`fs67C@4T6?GDkrK7vN8tQjoY3GIOQQJ z5|1P@FiLLg^sl-nR$nr4Q=?AY!f(pLrY4|Fe3*pIexwn z7R2no*$?J)PizA;wyV=ynLz8xTy(dCxy9bO_MBO2&(sND7LHXCSD(DdoKsOtAXCrL zEO_i?HVAQECp4U5&DWMD7H_60!DA!PKvKHoLS&1b)JKD`v^$W}v*^$W&#hPGoYVqJ z*cm+=6^+H~sj7DQO(bc0n7THB=d?j(lwCXH)2@vG%jRkZ}-Ho{$^k#EI3DGn#dujCp|$IOv|WqU*{! zUyp3_ngct}FS#`qtIfu^QX5A5F&}GyfDt?Ind|g!*<|0pAQnx+3d-^-U&URzWOFzm`2#9 z`_=z7OMmlg)jOBS*X+Qzt4b@*$KuL9>TGXtCtgP>|5J< zY-1VhFwd3*l|KvL3i(fTdrO)xBHoYU@naM@+07%-KSPXvQF{*2A!c3W5e(o{M z=*+nFp*tB72Z?`JyNv}e2d0Lqa4nkaEl<6ym!Y2`@m}AXXCl_cvi#+7j)p7vNFwM) znp*t1o=Jz3;8xd->j!oPoGM6-+xM)p?=8ok$8wf7ob8@5Bzlo6+Yzxr9n_?HWcF2#iBVjBybiZY`);u$dc zl7|7xS(8dKjqF|+*;dn47L2r4mT21DKL#Y9H#O1c}`u(3c%d3$u4c&+@opy;VaKiraZwGTXiu zvZ?Sfmo%{r?|e%-Fv5_;$eO=In6Wjuy(~8|7%TMPZ>5GXEb};kO?&LgNb03(wozp` zt_kc8f~BHeG> zB(MnBr_ae|F7tXOjAjd6j+wXlTH-IudLSDUv2|+1!G0gQJgd4%hl}6TY9>FCKbjLp ziWjvb7{xl}%FjOW128bP7XMWj%B|N7dKHquylssUY0xu}(cuDG{&|=TSgbFL!l3mJ z{(7^p1W!UX*c;vjGiSNQb+;DJenaBeOM>tq62QSc5N^gIyUjdNt$%7G#&gR&9s=Zo zdfxxEtY%2a2D#j+F>z%9+t@=F11dA+Plm2qCTJ_7_Pm))s4#%I4mG=dotI?;ahT{z zTo*Y-LUmT7jYEwUR)xnn@YYjFA+=3J83Tp#D9nN73}a))-~{L5i|X%hM`tew4)G|u zAsDj45~{5xrg4{t@6ydB7Pi@?IVLQrL;B1!to#xG3u%W8U&=$H2M zMebj-y`RLhcJcLVf8F7E_W9&}?fpWQp4{K+6jN-k@cV1-7dofUQhI{(Hu$vSN57HQ zud*lXYb?@^)HVM!VBW@mjmqAsB2Vu=-tqsY=IiP3n>OH+mc3uEKB@daEqh*s{UUvT zLB6m5H0ehTxK3{0l)SIAj zSBf};>>$pO?JKdcaI?B)eBQ7C06+jqL_t)5-oYA($GcZEbH}mR@o9ikq`3a;H;n8sC6EZ@N(dY6+#c1}%f^?ikcI9l(vJEZZ+V#Z+zE2Yt zpz^oFHnZnJ)A<9$Q8t#x3bd#pw=8FN?x+>VlZpz zF=*QkGk|WL+x*NK0q^l4zG`yS=LsH2e9Ac7wuN|I^}ZsAHw?wZc@_oMt37N~cMW!` zQ{ng&8)3GHh&(rGOZNVPpu&i_$T&O8wa-U?7Cc5GcRs04sBws>1@Yu0Tlp{jU}E-l z5|NDbjsI!P7G+}FA8W(OHbAjJx(=a>%z5z7@M(hUT^MZGU?D338F9OJ;Z*fg`rUjp zZ5(dPPSA>J(K(SVzjOdm4TxZRQVWOtHLA2auW)S8UZfd#xQR`zJL7_og%6seHP5LD&eWSY9 zbsb9{PZ=gLph&X>JYJW=7#%LP8}E2p=)k>oX` zuOo{n?gWD=08{a~i;Xesxes3O+y-MGll0!nHcQ@X#?*gAT;?Td3Ep&be|vw={j#E? z6edes0US4V35QB!6np=Dp^yIlqvm^fTOAU9$&d9wX#Hi*T>LR++fFI4OB#M8HL>{p zaDnpqE(+S5;Pbl*w!&BX@ojqjy zy};R_AF@(WiecJ>$3m>kSb@{U911ci6vB;`tm$LTHDPZGnIJc}i2`Mw#x?_TGc1Nl zbwN@L=&WW@px_m@CcxOpeAa|&UleY7!R|chFmnqxPz0Ixn!}2Tq6sOIZPhCvo)lEB4>^XE8GZh&bsC^za|{Z(jH_N zdT%7>yjEL&P1%3>jh8V~W{x{ZjXgrl{OXRvKVuUA?NtkNsYhWYpBq}Kr%yqdR zsfJKsy{n{M&5LapG%K-|kZ5zxi=i3fEd?xN*#mpL{mVrQR}=cn0X#RM_98Ia(*jIW zqOr~z!HCUpMN1gMt86?m>4MF#eV{n~^w3feV#mD@#E9%!D6izQ@5=1Nvrt1;=7OC+ z*;XwXdnOGk192^>s!=sk0r1w;e}aI>c{!cH99k{-ZQ)!Cu*S%|UWzl9GI2Lk?IoBZ zX#i^ahZ5+<7}@S;cxEw)d}h<8Fru=Ml)}rf!~wVV%xY-6uyu@)I#4r2Sy6@tYGbr` zfEQ)cZt|kU`%-CXIH%QRB=@MzBHwaa&H15!dzUI0=Ops4ccN~d3zEWAOJRY(RBlY| zyw&gv*?|?D*drzusGKbpiKu8Ia5FHfze*~jfzk=2#g_vpxuEoev}uneAqOKc7;RAC|Y@qU=!ZI0T!M4TEor>JDf6GicIst58-ww2c)W7Dnt zf0p}GvGx4rba;|JmGmDLyQOhue|xkW-^P44FIB%&_q&eIR^!q8-HN^oI8+~YkrMj2 z6yDD77bx(m>#S}C9^02)+=@LX`h2PS-gk<(D{ovS>-zM^^D=!FpT4BW0&Z8pqvLno z9PdG{!DM90 zV}WDIVJ!%BQ#{%o*t3?SmH>*$++Xl`NbcP(!=;V9whF|iski91YM4`yYx*=nRC)8$#m|9C zOH#)GifPPH8I|zH1yFe<)Y%|N%f&JD;dg~GTvWMP3eqLX z57DCQomagWOC}?(;9~5nqNk&|OFS7?2iq$1Kq^BvC#3PAWk(@kTr@;IgEz1Y7wN7bQwStx$<}3dI-s(7HV_%cW-ezJ%7B1v zG>UK}T%psO~f~DG+C=E^-u-I%48fzQgiESBbzHQw!?j* zqKE`BH_x((6ArIF(=Gw6acz#)oc)mH5VTR5Ic{FVFvNU}LJ776rXjDajpZ0E_;|pZ zM@JHF{j9ta;+6|of;7)!!=NiZ7NTH8*G(JPe&1;9M8F9K4I@G(z!~J~f)_n1ZU2{^ zZT?K8MAk)M;?PTf+*pK^<$Es4blY6?xJ5FBM2!4l@nj;*LFw*um{WKb4(9pv|Ez{)sD#J_cs8+AU|y z;4}2jx}exoRh^2gyv{JhYgB|m;HbA-V(m)-O_I3-xL3ZAY2d6m&{dK1>?pX5$46R~ zn2NCcRYQ+-z(iP&7|ZPQZj6M}z{WBsDqBVZnOy{Kv1^{E6&WWv2Ssqqz?E3!ki!2F zucDXOP-%lqUcJ3aRW(W^Mi|d!B*|+IV8nF;fD7`n4Cs0IUI$JOzX!ber|UP%7hd{Q z_`>zQSK4)9{8p#?y>>k9&klUT4*cjh(mr8nPsw)Rcm4YG?&<&Szg+cw@HggnTX%DO zBm8=2aKpb|ew*P3NnX?b4wm1cIaiI}dj$)U+eGdubHm5eyIbtu9KOCJ3eFr%Lx`G0 z+&My*Ex!GnLY8d9DU$okx#KKF<72$tsoqjc=e;+^>lW8FTr--JjW*Ln4B|T1Zn+N_ z>=zBR*H%0K`opUzO%V!pm)RqR2MZIUyxvnWn}N ztGwW}wyDAajR_E2^fI#-2gC-TiiGN90@|p|T^x?{QDXw=tM`4cLYlRL0EKLXF7^+!l#s{bFDt=JIH0J*u97tZPK% zriicfag~7@;sVRbwX!rVPLb|r2 zt1D#fIfen{5IN}5^(2bRlCcOZTH~n8)oc-Kv&lk+0JzN0 zK)o6!3cN4oSp)`MJg$$W8BpOPoRvcYiFd2+o+Mm2nZ5qoF)WU8*$QNq0%gWERB}nX zLag*|nZPB~n8+LN*^09f(ag03!ZE^wBxgnq<5y3hW2EqT{kMCZO`>AU&%Wqo6HBy^ zAr5C;j;2j;7TWSPYuL##MoF^n761}PMEJ+0XlQv>3aC=-7;&-kxU+i37~4g=P<*vg zBU$?>p=Avq8R(i#;xGewEKz;aATlJ3D+4tq$slhXShC>(MkWXlM12hN43=zCAGZtj z8w*#))QlkFwb-xjpb#?7A}$Lmh{->1ihyb&TtCLHjBPu#5hq+ zx_|@?XWb-Xblyc9xFt>AQC=)QvJZy5n@H2}rPyjLuGTw%+|lNfg|*ia3WcszJX>YE zPs#Aee)=HHZp3$~6E}ntagd?eW*Qj3Vi7uD(PEp5I8joyOn!%hjz7pL_Z3 zllY~-DxVYge4Be(@nsjelE3c7{Koss_n+7Iw`KRWH^ed-o7H)otHNi>ZO4)uM{iI0 zd;vo4e{)-2BX^;$y%XgQlRIBS{A7gHoEVK{JqwZY-_Kpv4N_44z_ZVTLm;lKTO^%eq~kLr)dL|gV| z$L_}>4ZWv`N4b=o5rFb!4OgQ29RVW&^pA<)u>&RrQRcJFB9BRzH0r6>WTtDqS&9}c zX}f5~MCOQLzMJK`V#<+qY^H$dHtLA4dB-!?Qsv9msUn7f5YGUas2v6R@(Yh{DM+oI zTn^63o%I$z6sVD~)abX{BO?dSth8ijy(tO2Mk76IE+-1Ps97MgFy$0X9WLC)Qux7J zEebD!C2CzN$mDTc6V%F+?*1*<_$=Y*xzkxGs(jwX6BI?RPNs{df&go`l*b*0ogDXcmy>teo<%2-8_WmBn6H zRAezBO??uz=Qc_Gws#m4)zHN9hpPIE1QQj8{9UAd>A5gZ>@ZuNYZ5{?{P8GC4vm6| zou;*`oyk%chdjaqQm@J&XKPN<`Wi*Lehp4?OTf@@>~a;V4r-ElV1mKl!P{gd zAl6_@sOP#)6wJ@M5!@`IiO8aX1hEu6>uhlvH3{%&XdA`T3fr964oCyoEvkC?#e&B? zYJjY@U0_MgJmS{nQC4Iz4q!+&W$cG`^7O?M?gb@WAsR5sTIy_lWM`$coFe#LewI*{ zV8A!VITL|Y>EjN#r$)Ww7@=<)du4`(V0f~qm)0o+i+=!h7(5pb7K?*WBAB#tz+zr7 z898tocrIgg;%S_pe9|7M~rqjO4}Z0Q&?+w>-xXa?kR3>q6#Y6D4eEAU)W}ELk>yhG*tcV zGw6-T64GwTz>X_(`A;VCak{-@{y4&aR7u2tYW+W2=l5_u(a&zQPw!n4y_;m-x8v>GbT7BR4)0>O<8TF? zcYnsvV*Y09&F9?5a`RbN;)Wc{jf%MPcu&jQNfsOp2nE9p#@^@_ASA{BQPX>Gphth% zWg|8tF6bK3*hHxQ1g<7Nf(bJJsF}#eLBf(8Ns^c85j(qv9M{<*X+J9&PD=+Fg%ksT zogYPM)}V5$!Kb3e6hg=ulwGLg$({!+&Be+TtEAh8cU{3pb{PWb2MjhNTu8MEIjX*w z1z8j@yh^dX=K@&$M>6N7n#)xOIht&xY@JhH&l(VF$&tyymBFYkm+Xwo92_xgQGYDq zvsWi73$07ATjLL#d0u_^NWYIB2vp0HqJfQ4h+44n_X3RC#zrl%c_c)S?3-bafIBRL z0t^`=1s;NsP_>8n5VB;`c~W}aMMvm7xiqS-n-s;iF+e#r zdXtTWwk1*cTKielOU@a-IPjkng@K1IVz0vG5ssS#g3r2aH+g}z=W_~5mB1SYK5(im zW2G}Epx~~vr5m?F7_f@DIV?>4DW+ah^GHg_$(%(7N7S;xE(FJHE4#`^gK-m?Sbj_w z52R)oHny@CZ*T>373neAp_`1PhKCC)E5MF9=#4E|&_37uTXA{vPfNW*9a0r`0g=)#(BaFNBn`nHOjuVoy^KHc}G9zIK z0q0qFizRF6?bB3b74#!q9x5Q=U6eC!(mUEnzDHbE;?rf=s}$(ZDwO z*jkc=G%AtQHh9_lZ(?x9%z-%%uYs~L>(ONtQ;?&yJ6Q9#O~$1AmMbkGpn!%Jlelcj zEC(o0qa?0%7+o8C%QZ|}qP--cf!s<)OnIH->8WWk_c>r(Tf4=ChX6~-Qhm!aaN_<8E`L2U>lyY;W$k*m$Hx0mhL>Owuaj|9wQrS@XTI3h><)9-B!J94-+A9}dUnNBP+9j=9UjJL1H0;=C!;FWu zjBt_{!*e`4@C!Te)+NW)`B5K#p*8z5@E1ZqNyu9#@smh@%6xXWTZj z9q1d-&#V8b{MmswI?(&5HymHf^u4!Yd3NW$b!tAdH-d(B^3JfZ^HRWFqJum4-auyN zn!10n+==7kT@xUqxh@M*_0T-$>vHxBNQBxZYDv z7vLUGeTyQyO5ewwSZR(kzEv)xsReH(?Em*0=Kc7rv2{F+W_ z@fI+b)0I4{iaBz|aN719t>YD1k7Q?OaHE<~YX7 zA%6$UAtQF9vlKocTqB+(Bfyq_6{T^Cf(kPJxM(v2?oi$Ra)J|#>HX~n3gC%#X=`G$ zvz(D2o_&MY2z~a=vbb>j`hNosbr98B4cRu>IjV4qc}TKg>XVb|_Tr=#jg)T*-47LX z=GrFts8x=c}Iz$XMT?Op=I)$6|@=~(*a$tk8l)~pDa0#KGM4;z>$ z9vxjIrrUrrmN3R}j7<+4Wn|{x3Is6>oI(;Ul?0U|2OdA&xMUaa0Xv{vF2xn%%nIkuhY?!?Od=4t&1@?>?QsANq+A zzh~8-_=G*Bf3pKW`i-=H;e1~G-)!d7`4KyCd+VHck6-`I-@V?Ccs`z*{!Ryarxb?0 z`T09!pE35$r!dz%cabzDGo$O?K5EkK#|MwC1KV3lZ?ki=>g^>PyIgedI|U|{ceLf^ z6$NKtq~)HsnT*4<{M_eORv>$K942-Fdn_9@s#!4*K_cND*1$N_<7WgRA^6rLKUpwC z<1gPbLeufuawA%h8AzHELUtRLI$3HS0H`U{#ls$C=Bxd$o)P9~= zOSQo&fHp3&*)r2KKRD<*fT}%UbH{1HFcPo_sK>Wz^)?Nc5QOTl?0U;tyTouv6;Bxb z5W|Eezav3xR&@x`!cW!g^>5Mk<^gX5{+It-jy!#k+dn-zC!^5oIx)>PwXOrZ90~8o zVKvd8=o-bixc)(S)mWKbMLEwY%$;+L1%f@TMpkrCIGmY?bzi)`a_MXSAhuFsjHVD* zt!0%0L|0v&5kees;cbfmT{2cuE%j|R@nQCFV!D}7RA-FLjlOS5IgbH1q;bx0Uixi> z>9TS|#(OO1f|!lULICM^{ZJiH#{fKK+abksej#l>5__tHpK~6smLmu%-L+@wt~&9c zz;Ebf1GVm{15<3EvTD0pBYj}cg_iZWR&W>>45~VBoaG~5|8gML*}+MrS5iYlL<(J2 z05?J=Tb7ZF0XWL5UMhfD#L@>;Ek;fvg1{C;m9^1$FjwBb)DJfh^Am9~u-4L~?v5~0 zU?-1>Bsu&Ls)4eAC=^DXrkrjNCA!wrC6izu3G9@k?2hX7A||5k+t)SZOl#D zS#{U~>vvoX%tW(CWc7n`Bw3GSBVqQ0caI~eLbY3kMNv{}C65?kRTS@r|J)i*DkbaB_u7T`w4WH`$N{p_I0nNL5@H#bu5ucrjY zjLn3KI}aFiYqzgl$%K!MP8Q?=biwc#6?^doj8?OB-By4WX%Ooy;tEe)%rSG~*1PB7 zaMm@6*1H7Afb;hLht!UQ^z8?&#ex=I|K-9q<{GeGs~ZMoI|^7y7aaDv{-p&UZ_M+r z2dEm;3E6_}1krY)bNUnug3oD1zp*zpu#tqenh*^cOfDp8RQ8Prz%UdM23<%!(W0Et zJUq=6bzT2udd|P11D?iTk?7;(=(YVt3-42`&$5rx@?WFm_pJKY^!AzMU+KV)ej}}4 zLH*+UujqUx{U#S|MwY8b<15t*`{Ab) zdX=(PQ-w>9{?-Ss!mR#LAlaz_24Me>k3Z}b8gRv#pY$LNn9N9h$O&?C=0Mh4wYTM6 zXGnl4oaOe-^>po8w}haw@Z*`}&khwMsuIel5d^f#Eqgvf(XV*5&SY)AVU>ndNX66P8vek@kvR z$yZdRlsW5SGw*t}4he9V0{$3<)=kDX;H0mq__z{zCD5AFs#I%=2zwPwc>)2SjAmsG zrjI(O37nf&9P^?y!6|$37cBS?Phv6h8&CJzqT$be>n?y9JR?VRH&6EAjNXMWIwH@-RTK{{b63zF;hCfwTnyJPVRESY))xN=jFt1h~t(#zW(|FC{9z11&g?tD-pzP)F>rO zqPGKV9q|$q8behMMHY#f@2+5QAbM~)%5P3=(hthwyzm;=nzBv^#f~-R{Ge?K+q9XB zte7gP_}B@Tu3nfz+Or9t3|;X4$L2UUFLJjPu;tH=0Em|T#oX-xV4ZdmiVs1|sGNZ% zu${BWn?rmGVB1<0KpY0}(;k+d+H&&ae33V!qUA?(wn0rgA+b~yBzR{g|7~l+1^_{? zIow=K*veZ=_v)=okLaJ71MNSI-VSJMhUn z@SA;|=ALXzo_8vr`~<#~|5x8S=dLeQowGRifydHBw$B{7cg@Ga-Y1J8Jn`x9xp&>$ zTCBZQ2I7jl)7*?Yb58h7fxFKb8`ZNkx3!a-v&S8*Rz)`M_PHfb8u+zFGC~r9Z|1UK@j_Mc!0jT&~JfZ(UR-xGu^`xZ=&uimK^4 z9iyl`b`)s@R!OL~+2bwkeQt_^A?;vw)2z8(sE~lFSo)Z#>kt6imhLJcVk`j9C{s<0 z+43NAijoCVl9nupl7i%d$>`j;hR1@jM7=1;Y7AB*QlJ0en(eUH?i@6^kY%AHlCbvS z2qvM-@XtWlW2^Dr`jHT#wx|=^s1v(&-{e_39DOXu5@k)JxBhTRH@N|8V!tcnATtVra_@EoYOl9|tfvXOn3^}lyWXNO zbTkAR%>sKX(VuOjCo8+K`cz6fe!Z$vv!=gb#Rml8)T^t7j|Cb|Ggo83Fex5pGRebb zQwEvkjfFs;0I?Eopkm3H*kjp@Dpn_&3?WvDS!7VO+kPTn0d?sW1Pn=#RHxhFiP?&Z zrwZ!`RYsD+HZ*}?W!4Lyk1nndC?Rcg33E^;E6uVSsPO^PKD z0uYw?zBvgv-zA{u{E-a?VD6o$QCA_m#fZmD9LO*MVGBCpNdEZ4SV@fEmbM2X{ zhlQseu+8kYYy}_Xab=V&PgcmI$YNU2u+kw72o%E%bZITP?36cRLfr?NdKT)F4U6FO zpN+)=M{|>ke8W~_=RhE44!{+01vf567W=@hs)HfgOtZi-Rn*Qd>GJ4e89b7FsCxaF zoj@A1DjU)~aJOllOT!!<@&u4?qvmaP{_vZDOBO$#t5W0G?o|f@m5eSrs~UT@X~aT2 zCZ3dZ7{JDBi=mgd4ytPt5gE8jm|^G^YoZv#6joh=8cvoB^2v`PO()dTy$G5hwD%n> z8Fc3amhr?N5h9<{-|RqJ{WnRT;m;0yv<~>v;)}~iQ}*-p0%r{?^-E0B6hn_P#w;1DP^5cAzzmCqAO4mQ%^i z8-4e{BJY;+U8%cACSo$@F;T6^jG#>J;!05@fr2-JJ@AZ1sS0l`IqFXMT`<{~;^-k{ z$4SSc&9nFM*R_DwQpgCgHa_NXG?=rGWt8pdGj42jW9T7Yvbhd)h0DrdDqqaBvD(@S z5cEv~vsoKqu6AZ#H?G~w*A=oC+`(-}ALEn8e!SAgb+38GNV z0I48Mb&!j3gH^ySV^cbA^}y5{7(5P39+=gSD)5pIE@jqZ zpvhd_%O~Fr6#i@npdGIK-P9G0484_-wJ=X{R#2di7d-<}GKsS6@E_RI3PoF_oJ3+| zWX{TO9=X?2`yIo2#S%gkv|SJK&M8^EP+?cCyP0C_Ic7m6vF8vp3n#W6u4W4zKO<7k zgvO35W*e~U)(8>YfhAy0gj*$%**R&4K1}B-SyGB&z|fJ1p^1%ix@Jq`X|bFxi1XAe z1pou%7}BacLRsLEs)dhWfK6wkx&}Ix^-30jb%|Q+2-xeS-pSN_mmbq+n+zo?Rn~dL z=p(TXf2^tChf~$7h12STN(gZ`d9_{b<)RkgX>3uNjJt`60)QfHID)X`P~&ujCfrB! zUKaLdY=NEke8Jqc%;it)B*Mp_pmtgpj0z3UDWlRbY29s;v0B#*Vj9 zXFCgPF+ToL9v1@@)xkQN3#c=5yeID%?{H!c_FBYxS0eHC%sQ?FOq-f|&(x609Pn&! zc_V3z6kzN6Dxn0MQ!z=(0utKW;UaMyb;ot@LyU3hHA?>CMS7-2`XI&At9)0OKp=f& zYj!HlUz&+UC5xmOzu`yLF>)Bq3(gh`X?Ps|Y_e{kb}7iv$ku+f!rYG&X(x4g;?90e zkKpI-pYFi3&;N8spM1Zz1Lq}Y(|*}Y`ck_JMe%1bbbB$`=`U60nZLRJMa&5 zfOp$FUVK4%v0c<(Io`AUm3Z&6{#xLP`i6$OGW*+ExP2fhU&gjK--tc@pKI~5qEw1$ z3;tZi{IJ>-c&_SG>{*m1fMWri%b+)?C-m|C&AD?XYA&26xtiH~(UnPjpxmhvz-@n) zdtaK}-qvn_$DtnEXn_zi1}i<7m>@g3P0_7%PkdE%J6`*=A&=PPA#61>a({)f+ps zHY!M-`wpT^)aqc{0=qbp;v#On{KI!^IR%zbo^cT{*Vml%c;SHx_<~E+%i^@1iU5!R zG-L#w!EX8TErbrKqO5gk0~4%J3}_k!LGcm2mj<7?*cMeyHX2n*LMqt2Yem!n1QV{) zA|eKyMc2}_E?mTVWZhNbYd?|=767JAOAxTInlV#QT*Wg|ExsF>c)jgJ>g~6pYlG`V zFH4VcBx&MJ(L~oA%qh7G&l=_SGUJ%@NV?>BC^(@I!#sM~Hy!Or77l0VB%xZN0$&z! zBqE&Rb)Dem1uS6f`SwA<{@7WKaFjF14C<3S>vG;3B_Qw_Cz!nflxKVng!3pLM}~kZ zb|-!c5gG~nc!_DfcmtcfJk!I|kob_^8Rm?nq#j%~aAF8zPnUcGWIu$pWD|s#%nx8a z6yoq%v(z=kWw1xGg=YBV%sQ`x{Cp1_J(Z1|xzr@B&Ps{Xa0|%_zlPA%*2=_DbHX^e zK}wiuh&1{`)u#-oPG?8PsVYoK&~l6*HMC8FPPX||P`wyN;6a{{uppVE3{n_wKbz*33|guXoy7_N-#>B|IA8f`Gt*6B;cnWHve*PQk~th2N8HGP zjIjs2tT(%-3aqE%uF$ZVuy28u+6d>h4}dF#vjgQU*iy^LJH(?2SmCO;UjGR}mR+(* ztN|;Q49X{^i$RiklEIt5i@&NEwRn4_jjP*{cxbvm+blBR?ZQ|XFOqm~XLZm^&@dR5 zpkOacX{|AQ^gW|$38x?pnY@G~`QN;c$+eC6lxjB`GSw9slXM(KLf-_Ec8aTu7Ys4R zf^p?@EEY#N@nrUVZTWMyXouBoc+s`!Ifq{$RT)@8aFkHYF?3lux4D?;Ce z40T3adMR5ym5Rqp1ciI8fZP!t8nw)>vIEh176Qs#O9Pqh9Nk&kNljzcR2fQbA&}!N zfva1e6DLW-TVcTbaAr4aB1aq<9#RUg_S-7FP0-AzBBBJ9Zg*s8tUqko(+V?}l8IUx zOY(VL>XA9E;1XFgwGBS+_x@9r{proBJ*pc8XO$$bt?oAFRuKS;=5`VO_P10*>p)uip6dtgJtIE|e!~9U4)ky+hk_`6rczgyy~?7Ud*_;zWJwfFU8Bd-^q zWBII$dqX~J4Nu*FrUPE~zwu9`onC$W`F~)~#j^t+wgY~Z`up$OK5TtYVb2bHy90S= zcD;r5C7qj5ZX~bQ_dG$mb>_a28&n3*A8u6lfy1@;wL}#vpoB!qQwlJL>ag{;*tvJ= z10%1wWic*>{(JhRUk&V)+C;m~E(%d@8Ai3_Ad+T&hmYKJM)2UX-4RQTUHq3%yBVjM zvB$XO&&_r**UU#TsfXM08neJcjS^$8%hW0axk3)VL0CaU>zE*K`)iY|>8A5)Fz6C_ zBxO-o=2=hO^hyTEx{es==^2(REzW~Ap_bA_h+F{RM2u`UG^H5}#7nF`uxChUcvI&4 zBBGEr5hjgwR2f~0N@5+F#!-*RX7dtgAP#_`)3pNs~$#g)G&cA%%751z181tAQNck$t0m0MkFFHh6R0aeg^ z*w)tpeIs-EK~#IwDb>&CvO>!O?d`l82xXe{JB#yLf#+achXu>BLE7simN{$qCQ3^a zH;VNQ26u|eC}Y=wHYPJJV>%}mA`dc}DXSJBI03jyLg(g@L*i6tsL3W1nA_E8zx(Ym zmB}K82@|Sv4ABZCPuHF6DUy5)QldsHr6@Z#>6(hUFU3J^SzaMVIh75b1(Y)LJmO=u zLjl(|Qdg2MHF$FQ7v+Y1h3+Wqp25C`yEP{i_lxr=BG&5(IF?fI+Zr zl64l76~r5Bt$~0#da5w_jCGf3j!DAk_KJaM#uRo$Amg^V!P3Ruff-n{rV0K27&%Tw zgW1559d=_Y_=rLXzw+~zobNSm@T9{V9>6@@)MX)YV?LIR1~-*j!`Ky%t7P4pVZdXV z>$WrvMJUg%d379I_&Cj~twUyy+nJ^FK zlwr&-*3H;iSFiYhS?st|-q-Z(jE0{YZp`h>U_DK3T|<+k9h&}YC9_nI&9l+#!k?t% zrNdnR*RWNbZH!QPK2-w#JkDE@B@52))_XQ-gPaL;!DW;+6e?SJwGGrmGc*bVL@La2 zuYWO=%FEzVJzf;X?7(+A@cc&FcN+BDx_Jln93Qmx})zw6` z@PlZNqiu+dCoHpGNy$4amt+}1Qn}B_>gFa zfrYNuKZG;X8=!`okBV)FM~Ib?W7dO!I%*WPR(KBW*< z4RRAvVq-_9(M6SqcDXNjlbSFSl3~H_8i%y8U_5fBfE!HO;?bAw3M(hB8b|&~0EuOn zJ)p$qJ8+ZCGnM31uYF!kp(0EEmhCLaM!H1dDr^PhI?@03ln4Sz2x()xsGTqgyxw?W zsaJC%FLLd(jf`QPSK5?(yJmvo!7!)G$8&1B$t8-yltkGH7tjN9^+ZEA71?bpY;8|r zXHA%Ow{L7iMH`Y!S9d0l5uRPQ*Qf%Bb!Ov1GMt?vUR30a+$)z2usU2>Oi5C7n!3o7 zI2qQVrnN(lH%53e1m8ojb@bKm(uftLVT0kkP|e1O383F@oHAn^#1uZ9t=RrAT>)tD zG-eUdh^#O(DZ!OgL??1b$=Iy`c;*an2q2tUl(~53mQhZEuM4JBkjRO`Q*))dFFEXK z(}kF%_42yS^`4ohOr*mmEw+v7f5I1lL?;<4#q@$Qlj^i=-E;I9+*yrRC&x%|SFTrZzrxX5Q|R|lToNV|IRT%8W| zZPVF47aytvZ@ihv3-|B&+Wo5P-|f37e z2Q%@(^1(#D!$HaY$bchxLf$@Hi`wIH!OyN44tQN>jQ}cWrtBDj9b^|n((*10P*qmD zj^m}#50-4MZ`f`5k)3hJRZlg%5LQ@{#U4PBWAyW#p^^`jSz5xR$fn;lX)P+Odf*8( z_Dowu41t0sub=FzO(36T^PR|(z4BrK(0;@UYXA*&qD||=J^fKL}8EOQt4H zg?%5pI7qG``3Rtxu=VMzl&lM_FkEyG;L$NPuJQtSE(-#?F6l{u+=QtcE*V9wxF+!x zp$bC65Pd_&ISNa%%ovw{1PF)`*D}?nB6#v>1~4y10y7fWMZLAiHt<-osvT3c2I1n) zJ4jO+a?tHapBP&PdHK#3QBuL^j}1w^r)9Em;w>i$v))f2GKpv}QCvs0tCc-c=sAs+eO7U7OQcFP8_H=^u-RXm@8^Rz}r~0tb|IiK#{z zuw;9bgKDCM#VeqZUgXWa0GUY|7V3g;2&*|jOwL`1)`Yg>K!3YbGd@<>XDqhJA-=&t zMt>g$h5?ec!+A6)s=7yDsy&cez6#rr>3L|eW-=AM#b>7b4rtT?DI9wv`mB)&RWCFSi>a1 zHM@wKHBbXE0V6><5^`5qJ)%Gyd0A%w-dV#KPgDUzPtu+gsQNO@D?rnuLroF|(X&u0 z83ua%`|4p*Rfix_C(D2R>Jw)z9K%W}aGFYsc1)A%^{J_2YayL1=*gpe!5r8i$p$zm z@NQ0p-J+<2>T%kZwL-dCtd?dkeir_w!pbN^?KDA|*-r9=hS(N?K>@1pW|Y_&p8Pqo zHD2IV*ANtnJj{o)A&@s4Nt%Ymtiv|>_xd+=CX_`RXVyi4ZViTq?-gy|Y+lK!u(g@u zsA!#PK@nhZJXmfpX|k`p@nK;ZyfCsEOJu1U-u*`TyEkv|e>^GD?^$RRdRxer6Z%wl z8Vl`l$!@)xtCmi5^hJ`lSBE?beHmkA=8d za97FK00Ei&KxFPV`#*UYPThJ#w#92*o<7OtzqpeBdaK@!zffOgW1 zkSdLDz{XvA0%W7d&%uFJ@x430=Iflp(Gz z;ysH}p+WC0Fr4gicoYCoSoN2HIV9RCzZG?x00j~#8pu_?&B zgbm#J63D7zO0{}Ka_(a0G#ovWU-89-L6VkJZ}ku_MxyCM0KM>vhrTegS5NTend|>l zC)>D!_rY1qSPgR$p^!0iyxra{4e?CEDk<73*$9`G;^ej}gA)O7NB6EL<7R*1Z{Eza zXg&m>vwB9A>{(ZKcvw$ojsQSa+eBwB(vaFxUDY#FYMVyIkkofyw8T*b-NcEVXKrIN z=V7j39tSV~g4Z+KBrh$tpABaz19K=d`@@brwaUW~#{7z_K*+J_`Tye60-OSe7OXEv=FE||G|c2>`3 z(H@qSiZ_{3pn4}MMoveg@LN;oQy74r;RJ`cGfT5nlwyln> zj%}-B+qRuNv2EM7ZQJZ59q)Yahx)7b=~_o?Ro!*Xd5@MxisE(m=SGV3OQ;i;;}ZC9 z9ocZ4iI;iEJSPsoBQ{w<8$xXgknfMTMq_?0ZDJDNEhuK%Na|;-i!!T zOzkgD@w0#rd^9#eCGh)gTDaEXxKIsYsF-DNA)L5&SRAtm?3_kEip()=mWs}=tNaJG54bs=fx6*bp1Up$X`O414vKRi_^;Gt zw7Oqse^;DXw&vZstVW==OqXA<&EI^R2@VqN2|Eg_==^0M8{)?WU`Jx-!goL!nPk{0 zzqb(nAaGG%Fk|+-i7#+7LGrRQ<0?C^YMQz7gSDvL;F}}KqhD#qB_7*l1Z4?YR<_9? z%>IXi6ZTBX9HZzY+RpI$fv@v={cD8^4$r=$iHf$VPTfxgJ&uBle37-KEzK5>l*b(t z2V)vd#4$=dr*Ke!aVwip`fk9*CaC9_yi=rrQ)fL;k34#|ZKDKOQ+l)6{2f+8lRq@H z#|}7|ZdaErmzjYzXgZ)?cYqFXZ>n0MdN=GfK{SF>yp?wK6he{z%Wo(A~@Tou0NTJ%J32q zh{kB*F|)2_X8Yb$qpm>dh&U;2v6C>TNh}U(&hUO0DXiDb?u{V#bAtsJCLjK8thZ+_ zzPsz*dccnlK?~CN)~@KYsl^xJz|I@~EA-N7>DffJ$NSfl3GAnw!7RUb>F-`ZCnl&{ z#OckD3_rU@bqc0*f^Dell>*_$9pF|GqFH;OgXQCp_nUSUQzLj~NSkADisSzFs&pb^hH zZutbV)mEpf5r~m;o>ly{Eg^LJ>BM%*3o_2VvREUY|7d^%k0>y{d`Uqm2Oh6ujzF zn+<)@A@%{uWVFO@_li(-T3rF_rZQ~qADg$`YcepG=uI zJ~EE+p|`nU6j3sdC&oRahCu*DzIlVlUfP6<*XNn3kD4<29Tees9{Rvbm;ebHVLrys zn8S?7<=y>-%Mz=BZf#%%OW5S_YS9p)5Eb$8*89sxOYSFaW*sJv3!N|u6tmD~vY@uW zn{>!hy`>E}8vf2=_tRzzA%y08^ug_9&s#gP)Tkx6hq zozy^6um2D?sNpV(;wpY5u{k1B`vz8$LR{#eW0c+ebC+ApN!}de%x#KVt>B10=tC;T zh=CD|!6%4=eTfPC@-Xf;Rp=v{P03`SE{3;_m zB7dS}t!R=OE9S3=?34QH1Pw@!<5|NsxsG8xswm?vp;*%g+t(t0f{A%SO3zf zD%lU4(+JT#sv5DyO+zuMGn|{ zMyiLyW7W0GL79{t3Jpfoc~&cHc|v)y;bYItBGt0pgCTwUGVbe zx_8~T8PE!92ic?O67q_=HZH<~fa6p@{gCv#l>%|!Ip&nQ5K=Lq=*UgJI=$7gHvPRm+yRQW z*0Uu95si4Tj0Yz7@D~!=)?=fJ4jDDP6r4w&^U$M-wedlUA5K9PA+rLSnSR_@F0s5} z0_IM|wP0&iUEw49n&SRViFDb=M;S9SAvtuj0^ebrv_N82NYFxH`Dtud{s*CiAm)G> zd=UeGvCefET++-LCEo0#!`)p|EFDm#K5HZL1z?T_Ca7@=HmvjztB{sFw({h-3<(u| zGQw*=}vj>TzRG$3>iyP(yi=j zTosRa6^-*BfaV4X#BoVHLyEEW}~Ub!$$;Jp=x(1^LNXFSlURv z4ptU41<7@+@Ak8%m4sw^zEcr~C@=^E()XRo)~IqEM*>n62~v*x^We&x`fQTGyw zSy;8%&;PxB$ux_H+Fv&FQlD^{q1Q2&A*OK_)*ZfDPWieFim%)+d-96QX}aDViqyPb zrr2U`a^sNG%s2M0NytKB0uhYqm_=1dX3)A~+{LN&7_-anf$c?m91Ty69sY?!e|d#m zC6TS?7U&#$h$ecVm}K6$UQ|O{icE$ra8r_U0C$U3r+)YCavpfvf=Hi5^NY_u%?Oa3eT{y;uj0h zZ|>31x-KqyP)>x=*M74}gIKY4sL?SY%IzMf>-&zD$WR{Y< zT<4pUg}9hd>$H+V>toBl<4}Y#8$OW9Yn4=a`(MzI^UXqtJ`AZ>Qi33r)45gK?qv`5 z!~*qgMUd)VBoKT`Z|fq*X%K;gX9^2?B?DNbva#TG zA?xEoSsRR*DnVlc)ul@U`$|S>f~_7aD8P*DevFtFHl2j$iho4E^U^?QO5A5Nn1%dy zGg*XyHM68(^W2+nSPn2yZ}aFvc`fjhM5DhcU~-I2Op&ntGe0W?oJ$o{Gzp+qj?Gp1!jCq*eW-2{_h&(2-O=`J@0=ny3re*WUxixq1=%tGh-OJhsVAQ(@h z8KnYC6o`#zgFzi5RP&W6y~d)->ku|92fXkKq2`u7@EX#6Ml3VGqAIs5oRq_!(uNb& z*BB{z{djAWU;!g39mX_dHQBnT{^>JIkTRgM%I6wnK^kFuN=5yely?$tGpj8j89b)+ zx|bW%-(gaXnpJ!tW9|;ou<2e=P3b=@6CtuCVV0$PV4Et_Z$q@Wf*-fFjiJ4(m(VjT zlxcGBre?w>1^kjpn^TF~M>n$@5Y^ueTg1aPD*(34yt=)9!`YwF7t7vGo~*D@d5alw zWj#kTWs5;w&gluK21$XjiQOvZy{;?M;vUIvR<>=*SMd9{f%e!lf`xLe>a-P7YXQ!c0#@EkzKdx* za&qyPjxKA!fve`C4K#&t zn0sk|P%hP@yb^#*TlJyuX$Zc41ua;f-!KD?Q=B{|T!b-y3jzOi;q0Y56we$Zt==mg zr^Lp$fi)oshRBWF!t8Wf5}F^$EMKYaBl+pGt5IKn{Za3FurNnblwaet&QjxG98vxsJ_X*!f7-rDP`Y~NX zH-P~!C7Z#DV$wuf0%Y4ju{%#!G#>P~UOp=+7^1{eco;n#l3d3peertybs0TbLOU& z=Wapus1s=Gsso0W_vp!r(Zy*G<*i;ni4hf*0@tmXaEv^cbMdX%3m0%8bH6DjJu}z6 zVKP9Ssl+!u3&4iPqY|EcOHtjZPU@d8{aTSQT864EDoniD*}-V5p$DUxo<{je4X#jh z=f%75AQ4?V1m!w*6f@saf)x&w=)Hzp4Ee^;~QcyLzcbMb-; zM2}kh*ibtWFfD^}H(;~GL+rRwGxmKehCT%iT@Oskeo#9qys2a z!4c06LKprIjQ&`#yeT*bzg~R zK)=xu=A^{CNr;j~+vfTbayMt%Ew4Nu;sNGGzMS0~o zBZhzh_b+Aj0X4>xCjD-xGeEO*PZ`=uYx%lPT;xW7T<*3y8;b+g`W*qgox-ZUFQZ^x zUp;vXV!&D|TeAgG&p(&UBTy?A?4b>!?t3*RxMZ1JM3e4*LG|KX0^~|vQPp+%;1CcE zGH_`5S6W)d?TN8fN9n^MSS@E|Jjl#ZSp7?9%jsiV3mBAC*!NBz+9lo&Zh4slA*a>S ziX+)n#ybVuNUJr+jKPLc2~%e{j0o2eR361V58X}R+iFwxg}$Uqa&LLap1t>4vs1Ov zaTc~GMfD!se;`Fq^1g}rf>K5CWy?~ThH)%21%xSugzM3=^rALt3anIgOXT(uta|Qj zYj-+OH%Be9a}vNQy#SFYErqUWG@e6S@k$bXIyN*|Xei)A&{V8spBFB}+oSDAjIwhM ztX;etAiXgd#|Uc>KXVJBaHRbc=53AzcC5Ijn1c~BYW{F7=!g;iClbqp+zJXB=mO7( zz3h}zc+B6uFCgRyh)e{wR=LxwmqUS)JJ7V z!iZX}?lzo}Re9WGXqn!>cKbA!`*T7_iSZbeVm=JYyAkm3)Wj<84;#xm(G>*~B9Gh= zxpSw^!SAO$0U){*5fhI3aoV2`Hao#J3p#egZ}&7`Gjd<^TrwC96h zLbV?s@3!5~T$!K;>{5oF=WpSNCIuDxARD#c8>nqgIu!&Q?wqXa(I)3|1LE(x5%n+0 zToDx%YlvEx{7JqcZOtbH17fL22UaQCGsXWee(y>^t7z{^`s(+8m+k_;|Eop+gti^X z6&ZLZyb>6X`DbHt)Y9`4vt)BV`D}I}C~xFsdT8KB@RD`UIdHABOz^>Z?UeY)TIz>b zrAtO`HY>s>elQ7AN~yl1tYXpi^fw{FfC0jB#$t7eB84KBOATW(5%m@gyT^^ z+~z(`w~>)!trM5pv1fNB@B%l~C^w0;FsCjiO!ipD+AaY$y(4KK1sXmEh3UDNY>y|S z-Nv)P*-#3MIol?G{K&D%z$-|uPE(b{x(LQ9YHrVFpZ#YG>T=^gm?f^pbjf{wL?;jW z`M{3{42AICF;`sd&haduUB972ea*jJ>#uWI7QibL~gSdFbaW8~YRhPVaHP>+q{ZJdv9NSrGSfKfu1k}gC z?L3N3zeHbH9+K1EV`;VcH|ZHM*R&KKb)a>QGAB&dWd_qRF} zjOQXlmH}L8rXhFzM#T9WYsU=!*QU!v$}WT~Nhd-xk1Yls2jz*8&<)ak6M)cili7xT z7@lXk-;cD1Wr5QZ93`F;AEM)+wCN$@iM;hu`EwQYd7|>?jfgO^?~fsJES=x^0UxkU zH>tD~vh5JZmx7>`qlIKovIF6)hxUBV_2vLFz~y7;hihGjHd({O?B}KJ1(&@^OC*J3eC*MH&5Y(dLh9#L_5L9l`0lZK{p#Kz1&5N zJSL}aNGRiLVz0wbR%rgdN2D;Y%0jynZ-e8T0BKVC$$5`@OY76H0@76%=DJ;R0gdQL zC@h+M$Z=$JiNoOB11c$k7C^^1#nh-X+4FmPVmfik-Om>-Q)h#(B(<_l=(jDybJ*LO zcek(JPn?$IK<^TjGgTx;doJ1yh3kP!ykmvHn)TM(eK6>99;#`{N6D=+EU7zw{+fa$ zmk%dVRNO~rbj|v=4)F^t4w|mm`7)8DXGJtwid#8vTw@J;Tg2lcz`O-xXoL8=(u48c zVrCg^ed!7fh@dI2chaySx}HE^yiqj6y$Lxsx2VTi5D{7BRW>$swbnIz7>*`A_n;b? zsBA5UMz1B%hME|RDW$P8{cuX!L$WqmnABRSg$rPP@R<$O7Cj!%7`4_3S=BbNOGQ(~ zJ1Gl^tqw8!?Z4rL(P5vttplS;hqr)+@#k{&g=|tc6l5&Xa!uMQVGR3T_hEOG1xFY< zq9w|Fp}p=H7&X%F3=(nLleIC;EG|BXZsg9~E#sGRD&*4es*G3s)jlwRIyeW1YehIq zMno|+P`6VR7Cn%#F9@A5#p_+V6geFpi%ii(cT6J;6b%dWs$Z7-manPti@H@B2k#T6 zWV$lUAk3f2ElUt6#7DA@AZLp4qnAWD{PYEAGTR?5o>(&B*-JK za|s@Iq7-mA$8LRM7y5vW+7MeaBuS7AuUR8A)%nJ~3OOlAuH|0OFQM2ZO`aG+BIo@@qpip`+ z)_^TAD1DR4`OSo$)XX7MkShtfR`5;6r9n&Ba9uXK5@>q)V;K5Vk*IUtfR~3~ZhO+w zeu<+-cc?Fy|8tmc7XspbJ@!h62vDB?oOgj!9}m^;|1J8zcLK7gPS#pVn-96ZyyI7u zkFqGZemTyJVRIYJzVJnBCCq|z>Ls33iYp;dm>dkjmiU3U?Y;p`%r|g7*qcsqC6ma? z*44K0wZX6;D(LR<*^I0;B6-c*fhh7vBmJ?V>-2G6%L10<`BxQ+OpK_;ZrdM^ z4M)}7G09oi2;~d~muZi8H<6(EY?v+{1XP%^n5(e3B=(!8xem63n@!=SY~BX16vhUj zXo;RPi-XyBL@aC$O`l3ComzyImEVfSff}6iM~J(TV16~%kfb68m!P0G?8Hk&0wT7W zAMac4hKG-E$kVZop7&59H|G-i^3*q&BI;dH9H6H_G31##N^$ZjI#1|FEG61c@SH$4 z+RH*=GNXHx(0iFj5@(BUsiIt`jF0K`8P`6rUn}!rAGu`KSz6{-swk>!aDhFvFw}W2 za=}D`&ttv}=+T^_i6*6XTuR$Pkl{Sn6rTT;dDjF+Q05+x;iz{f%LPZMV!3y-U0}~u zTCF_u&c?w3)UypvN~0}^vqJW7xif?!14=W(Cw6ajJ?7)?=^KrRX>DO-q%OwYNXF)O zIBvjzGsdh~DcMUP{!}>IrR&0+1=PBVp^Q$YP|GW^IV1)IwdO0nQ`G+X^|*;+JEC?T zG@!v27@1Dq%t2KdknX8GMn4qB5|Mq)W#|3OB52&lE8nj|u?T7Hf#4Z4geon{LG2sa zC<<95zJ!v{{{nJ)8Ha_kX_&kuPHZxm430yO<#y@%2N1DpKx`edj`|_aL9QQE!*0NL zt#k!QmKs(;4e|KOykAj#k@Kqt-W3= zt;Z_+K*XNFXNsLj)LPO8xENX@-@ZF%NPbbwgdT&k@M@CA6-(BDYkK>m3(|HW#{8m5 zi<{qe@dXLTHJ)y_dV`n(b>jPe7c<{hs$aF`zr5-1)Bnr3-84;S&4%}9C=U|6>vnIP zs?oE@9qz4|2>RL!P`ToE*`2*d?eILj>sTOiiq*YRvDiNHjVocn-xn$fQ z15{{Z|LqOQsRlG*-;?5F>s`v32gJE=s?1N0m%8NGHMH+8wH9rQC%_guuD zvLuHFsUUmF($%}XPtYxzPKAN+RKg0{2LVDvL3s>g+07&~)}IYl(`I_j$8s0uGwE*M zmf!2;;;NBVq0aq{HQ({1N?VeZK)B>_$fc2;r2d6(SXG2MU+?jyc+d#Qu6?+Qr{0iG zt#h|LYM=vE9%D8UF%{o$-ekv-HzTtQO2=zq3^Vby0_6bOHe~_Y{HYz~%_M|U-2{I<*c&{@qBdQMp&zDo$z1=C)I$&7B4c{n4m{@0%#~m$N8w&~27r*xODlIipp}K@(8>3h7|Ihn$3Qyqerv z`H6MQAz58j=Zze?KOP2H$Xo*tEXIZh&k4Xp0n1P+;P13}D|NF&JAshM(=@HmF02Z% z9F^_cU~8MRa^)XaiLx4hkUWg7>L|r4FnLcpAvm2dEN#NKGrJ z=4h2jtv6njK)S-7tf~zYbDVI3(+n&O#$JcW9^}1R?AQ-{HWH`I0d#E>OHTO#i zhfWR9VU~a#Yo&Ih3?A9RJ}nULsbAK}9Z>c)sabFES~M5;8dMNzN3U6k*2abH!8&*j{__IuxC3@iVweI4 z(92CL_@a@Y1~;|A#F++LkbT|FNI)iNBDT%t`!}lEteU{<72KMls@YvZq4#H!e>sC9 z|9tR%e|#^vh8N9)pY=TVaqH{Rm~in33ML$LYPaI^%K_+IdcL&Kb(Qt8L8HvFOwgm8 zdJG7msMtA`kX?GR_-ESaowVkgrhXD!4Ax6Z-0>R4!sqHp_MIQtVZtGD{gNc_WFpXV zU$=&gvf@Rn-S-JoR{w|W|0S>QmV(}LEcDEFdnWh%FVxK-9aSbT`MahQZUdi|ILt(6 z_l+IUyiI2^2@vv~j-0V$(&TV1X}SG-563;YxsA_2(0+?w;&M?7>}gzOY4z>tVh(CeiTfdnI@%#rcnV3Mhr+Nm^wMS)0!GPqsRuws(JU)J3V zl3wvpqW$`i6l(L=D5Lz1tXhoJ;Og-u)F9ToX3T}}f~&FgCnT$K&C=^&4yOsy!h&y= zsz3X;+q!{$qN`m*6!3{z==^of{vugJ-KJn+=*P_AKO>D)w zejuIRE`9i=fu3=R3f7c}j<&RB;3Jb#!eZNs{MUZa4&5(RS#4{R8ApLlU z)7T@_3r2-+dN120a+~i6qn7y<86eKHBSE6sqA!=io3QW_j&fcNWf*^hbMMrLIR3X}4vGv8Nn0@=}O@OMH#M@n8 zuir>}QlA9IU*dYKOEt zi501Xdvjv+PN$fpuvVc?>b_$hA?sj}GExa3>LP_w&g9@EJ@)B1U|{GAcBdCwt8u`| zEn5aXhFSL)cQ4*(1hL$n87up zefq06%0F2N?>>mpmET9}bgQJXM6x;AO%sybb!(rbd@>=P~sDUaAv3eY-DfJA-2N{^Z<1~*w| z>X3l%&i0yYEP%D5mk8c57151S8^smX@IJ|4$WC%Qd9ordd;fURTS?IB-Qwsp9v@Z) zq*0!*L@fE#rFr4U5tQME;GD!tIuwiGutjkq zJDU-zCV#sVvuf9I!z4>NOXw0(cINB}Q#!g&NmJELbtN9ea_7>eV8G{*b}CpfC$x!%SealS{*ZLuXa8@u+gh#sGe#33|F*#MnL#qnx)@0SVS#kq8t)z#R3qYdr`Lq+HtHUc7P&C)z@ zF88n^T~p+{5`oB)v^WqHQ)&)!YqHqkUuh^rvSDD_LQ#6pXw(18u?44sNO!L}<%P!| zs9b_`0@2?d6j@Q4^-nCatjcl|4*lcW?$ts?rRvRyg&P}oaM1W>z;0FL(-yQjh)alOK*GCSWPkB!$SGZKy}|VB3+|>oo_(ma|+~L z!bXYrPlEpjoHiM>*A;CN?~l{|6cwtaF}i%?yKbjhei@5?l_WWwbeWq$r>5>fz6%`x{B*c&!?>-c>jS znfz%ff2J3KGG|6VfN1?DLm_l>-ZdqxYqMNSBiW?hp7;Srxb1)(&`mDKr4kK=b*0pI z{MLc50V9VJTGwRq(I2Oal^7j41(K~F1hQ7I1f4z6$iV=IG9gLO_kmGc{!oR9eTps? z3Z6-14b&J{KppG?gcgE&M~)SoufNWWxXjr{D3>YO0e0nsS)VUb7$*X>85c_6fBTuw z2u{G(;aU3sRPCSKcR#DT-?tmp@BiC0w}Vgw$`?KS&ynm>{DKR6rhNu=kLwU!CTZ~` zi+KNxQ{8@b9_bs{iGUr)Y$|$m5aeWGIA<3Bg2VXAfC>eoU>Gv6nJ$8y+oO+pZD(aT zGR$uJPQC*OLosv;e8}x*xH}2_yMM@UbC`*+YqQ048x_Zq;mT8Mh!VFY3coGP77Q$z zyTZd%csV}F=7}h5NcDQA(dz!>V9d@8y_H@PuQb<_sFvCBH#Ri3ip@FJ>mW+Ld-#vk zEjJA^f}6zpo;3z;bKt`n3aJ5m_=|%mUTmlndAp3nv`NLjmgQ?>8vP#=`W7DrzTpLr zE98DCG9P?3&RZzuWB5<#S8g<~(O=e1C;i`dAW$(C;({p-lE7Tey3RaHt*Q)-tBafFROxnuxABv0COtAb| zs0FXVW`Gq|Gm((>6Dw`vF2`yK7TVKsgGIfV@rO=3e$wM4Aa|CYF(0A2PV�&UiY* zGH?~KJ7^E}$2Zp4-aZTD3HZ#{(9}dVrqmFeQLiJG)P=w@o9BIP%h1*qXNu{75yI(CJ{+g3mF%59G!tZl@(p!{89f>Q zmtht&M$LSY4j(OI`#dro5)?cd$n<>+D2yXYA%=|I8Hj1EOBWvD5CzIx_@6E^COK~Q zZ_J%@h?K)QQ`Lm(%ohE$M1>lgL-ODcSGc~ik#Yajl^j*K`MLViH`lPU^0KYS9y6D% z3YU2C5|j2mR@{+wb6%!5Pau6rogLuK=MADFSLs&@-tSzdtd(q=XAc>Dw`}ukVH6|Y zufsJUe?b?chN(0Zb*IRF!pZFfUusDV@_*t2#WpoaFATML&0_vBzZr&HQ3Myg#EX`; zYFn2Ivb_1b0Uo>xX&{;qiJhw(+zv->y#k9>DFYrh zZ!FD*31rOg0)T(D+ZFGW*b4UdH(oAi5#``-q5niQ%Gjp@b2tNKjy#pl;NETgAD&(J zG9SbxHv}X<6`yvv|NHendLH}SXq)$B0dv3O0PON*}pB zCJUQ_&umT9^C@EdiZg99tHd%Hekjq<8HeV*r8^B$4A3bL8m>SzNJlRv;(#&VJoLS= zQ)rq}FG-*<+}%eGQ-j-i@>Pl{sHOR_$frJ`oO8d`BE4evfk%yqyOFWB5Y3QSBZ+2i zzW9pAOZii{Nh=v*vV==D9QCMyBl2q&N?!)7mIsdmIGe{U zmg5p;_Kj2LB-UnF&N{Nk}}Br~yI$(tTLHe^MJ?Wyww{lX{$u zkI%!BsA!m3cp%B+D}91?^SV)vvVX%cIvS0w(ReV&jdF!Qw-{WWVI6BGJ{Iow#Dz8r zTK_O@G_-Mf`2$RZP3v(Ar3Ei876i(-DOh&+)Q;i}d({?uR(^C{E9%a4abVysObgi3Cc@X*l)Qzwrk#XFWA$A7Zxl?L%1M9<QV^&rO6Vs#IMHCIeay}G zzvZ(3^x?(->BA?pJzf6OhyQZkdn5hq_4g(IhQ~iMs2V*XSa!7LF=LPO&pMqcuO)7T zy17>x1mv4@vrzk{0MErGU$Rb?+Qb3w!4l2Q^hX+_C+Xs1O)*w(K=ZJAi8krbD#EBv zG~4J3os@6>tvM>`iKzYjezR>_G#I8tI8^tD+2 zc<01ZWuNgwj-$me5Db0^H7G>b*-es|q?>8fedbCv-Lh}Y^Fn&zHv<0b1gH{){zyt6 zKp~NjZcUT2{%2EIoi|T|i)jsF+5^N5Qd5#Gzvx)#8ig)!1U{MA=3P&)h=T_)eKw+X zqKYs{2P?7g4CRHkKdH5j@s*yM$@ktcMRd@|_A~ZxcasXC++_ z+=pKJqY&kw@W*5F(rmY}`?sbecThB4@{F_H|Ba(SVCOIU$15hoD;I=R*mmh{d9Vye zcTJX@fNZexE3F#Mm%1b+%WJMv8Eb~@8ZX`lX5$+al+-b?Bj0*_eU|WOQn13($7^?A zv`CbPPv)BNA#q=nVG;F+h04!RknHHR542wT+BCUcV1jWJd@(UbDB{1I>reJ)^xH5& zos8k)0tsd41-9t=pI@0fu);CffWi{FuWRj=E)Ia{*R%}j0>zDD&mV}^nzUJdo7qV@R zipxbJv=NNw&g>mAg_jz`C*(9$QkuRKdRr>al$}eA-wtvs(Wbo#z>&3QPc={O=>o#; zgvVxKX-aUfx}mJB4yGm*{SWR_{u~XCEW8ZQ;~6iax?iEFSsuUw3mTdI#Ivdiz_l{D zGMqW8jV$RprfdZDGKEU;DN%TUC3#?Z>rf{Vy&tK10H5u{Z~}brRCrIU({-@NLyGVr zfyba$!biec#t=v|QCF+rb3Sm2kma&V-h&IcC_%=`T!%)U1P32mrVPG~)` z!b9~sh`nSbh7uX8>d!nMXJ=)eklnI!+goGS-+g_02o8ZQ|J(u|{(YN) z^;nYrlb518d@rQY4?{>?g59!Al9(0?;5qF-l?LP55qs8yEk!uj8K(j@>&R$uD;kox zF{C90(8Pj_69SM@c<8RF1a*OvyGwnuzL~K?%;aX=4oSk_Xz;FCsc1=%XF6x}XeUY< zCMtqpq6I$7{O|P?NS#KY8FL?dC0h-w8LybVzr}rpD)}aZxjTqq1u;(TZ^&~_uyINT zkczH`k8tdNhO&F4|0ZWl>!DNf&cM_Qalo;k?cn!(2P{LnJ1h8bwiEq=ua3vaK+!>o zSt?oz)q(TsyW)YZB#x0YI6~JMJ2qQHiL_l%<=z1i>je)bt9LR}yLe`_SFLy8)StcE zK>F%XRN~@=yIj5yCb07ovo90lD1(DqQ+e5;amdt=Yt{BefuX3Br@#w%gsU=y2f%kE zd5{r}+t{*HTA!{X7a{aZrsIaSU|d(%aoCG?`l#9mlF*fbnyLB|F z7kFk(A2Rq2Cr_HlOcyF7!Cb)zytiY~ngu0c;(s0mHazhObT>=Y>&-Z%0Iv8m2-ML6 zRhTZOjJS;NT)Zn@`bM?2=bnepnYt0i1cHUAKiz-bkcmG5B`vfF;!Y*3QX|~16hvK> zoY&3HqbifTv@8;iyX4F*kVC!^Uq_P9xf~0e@Rq{85!#}Y4t^<13hup;E z7e5zu3_sG0{|Kgxpk|Kdn2d|7qKtJG62hRME+uPl#2-T{!xMi+LKGk9pTx?U9DO^r z6VjlDRrkBg*vN>ma$|=x;a($&3JHTdFhJDg8W2k}OAod%?r!{<9kCDD#bb)=z%nz_W3*9~4e{dE%cX(#ehewKBk)O6`&IxG=c z-)l9nZSC}!StwPDGoV)yd)L;Vd!zB-iCrikcIiZe82F$Il;g-KxDP#5X7~P;xM3lp zx;%iY*-{+p{e$@y^ zSSzb>n=`~Qsi(v5lH-z$dva{N_#OX^(DiJ$U@yn4KIjmgvD3okYDg9;&a)M58vya3B_!wJs=>*P;Qp*bc^ugV^ zou_y+ngS<5jk`i@K`Y=dAz(5{Z8!ey;e2f!G=Y9+V>M*|`yTOu2-T4+E|XgkG(J z?jF11yb1y}o|{k*iUxe@#eDw3)z;ZWTg+6V!O)&di}BNc_!`A1tVQS!kRk%sD$Ufe zYS!71<`4I&L&WE85}5b^Y8q;NkM+6WX9V$^sdKOCI^WupGH=R z@T&xocDI_=H7VuzNc(TlPI)+1EMxuWW)1)w$NYRQd0^?%upVF9)I7UnH!=o?xArDIIa{S zqeII!(z)OtX`agekE(YHuLSCnKx5msZ9D1MwoYu@wr$&XPHdYUJL$NSPCB_g^WT{} z_jT{*{q3q+tE$!`Exl@8%jGZ4pa@4lS9P)a!TIJWO6h2(V8nQ#&P1OE>@IdC5F}nc zR7dpNH9d&Slbh!+^EDR75vSdylGEFSsw*k$d0?e3)OPd!fGQkvFgLC?=Nl)O$bn%Z zMr9?=hbq2O1d;V->VmHD5CwUcEksl;_uTe_0yBOgM9&=Re7pfROV$Hq>AQ#2&(gIU zW{(>zwO***#-~LrZo`6(t<)G5LQY#BV?xuWN(32mc}tB@P*zOV|29CVos;yX-7XLN z+LQYS{*avNSA)S+abEMvo8o|>f+Exv6^HlydPMakspEIyW$TNH z4ql@=C#@?1{zwkAMJ+@7#@2?}^rAX1)sIBTRXvewdEYH}sNYkSf2?}L7ICggsx3N= zDh-!g`~Lp5RW;u9w9N(GvxW?tGHtDV7rhjcvEmPzA?@=iyF?#<$f=u#0>1Qo@ubYWJA{EHnT_V0so+gI6as!UK_I&#BGdA(Vh< zBx9xNK}dOO*3Uw0W%qxV?Fj|XGWfr>jP{jXA_qEQH9%t<#Wj>)INS9ke+Nvw@qUH5X1y_o+G1NU`55W)&%K5g9okKX3x^hUgW|4#k>4f+-_ z;0x_zF~4;qRA6$BkvC#GLcs8wgqvDQF(pBOJ`eUhE8Z3e4;(vuXaIt`BNh5WKw3<7 zGZklN7ZjN~^Sp}awQEdJH^neR)YP~$wff>m=>OrJ%=h3f6Udh_Td-M9uWOZeONX9d z7VGXHv~AK4IkFVmhpVuEQjxChT%^@HUlQ-_v7K{sRb_XPg zFg-wJBanzHN+rrgwfCw`VoaSg+~r@Z-{V8d(IcolaU=q#?yZ?DLJ%dVTz7Wm4!M@!rRRgK9rMf&6&5NO2gq1u;HZ0#OxsfOKEz-2^dqc_3gRkiWJ5}DJj~Vorb*QnVU%^Bx(n^RR>u9 z{8idUk$#-f0eUx|B5$I%k8Fx1wTueD6dUu1=n}M*I)0&7@;j_lIKGl}dA!gwlNULB2Z^B7p*+i>s=!GZYgoUxq!16w4n|?h+ zTmeD%g@ep!MdL$Ug^(En5hGUcCPLb`dtxa&hO+m0zSnvq|Hfq~_WU-` z$gEETy;w=MyE1Q+#ozQ0g5+}YmBDg1{%yoQzPUL84$fgW_AMoOiR)M}61Vj1xB-6? zv+igqG$q)%IlupTk8nUFZ8YqJPx3}VBzdsamN-q9mqF8L9IqaAk=RdhR^bIDGY^jF zwct#@okAC#gui(Sp^K6k=9Lf;`hhpmU|e+o*>>o_1q zy3SrFCyE~wfdBTIY;MYXIDutP2z|22gg>yT zf?%-ZWTGQAs*lvN!0-zQOOA^3tAV_s9le_P=R!B*Q7RD_>$FPt@=$w#{`>*LF(t@< z)7|@bQPBgIzg9W@it#&amE|XG^z~C|s>(q48%Y@XpIsPrBCOLkbf8@OX+{xl+twk| zxa6_81vnZlDI~QhY}d;sF8Py}zFF255F3p*BRukfY3R8=LHs<${MMV~2l=rPA|N68qY4=aKN=6B+&ChGB|Tsh(mbUq@v z3H^|KwBv(pm?EV#QtqQx=%r!5@iVE+BiqYa$hs6%3KJIfh1z{|J;jS$FK^)QV~QI#TkSkrqXUz5LKN#4FSFe5oM1E zZOqicnvY)d@L1dk(P&Ca7{fa-x=x{C8Q)NpWa|=kloRQhZ_z>>1jv{?F&^4hs$d8V ztn6a(!tiVNiiu0OA|YyhXwS_PCXUZ#4E1rwmD)_y5WI|Y+>Y#3K^UjX@h((3sgP*S zXA>ytIaMH-+MvkvBTkG%&Czi?S+=P#X3y0#?K3N1vgOpUH29-oQ}hD^-urk42vKl8 zW|!nEZf%9m1$=_AWlsAbbynDuX^d&RNF%k}Sk9;wuZ`jZ~BRQDuBZIB|eZ(^-M)~!0@^9U)GB@zrynr439@3VQMg}7Q={olHLw9e*n zV6sc`48eaOJVGJBBtK{A#FdQRYFW7NE-0o*U1Lc{Y7Q!!Ps49v;7CE$DlT4*A`BgD+bs&1ShCU8vYVYKJ*3zgyd_f zc5+#8MHYHkN_$GnB(70G4Nd%)mj^Y(AkY6x$JhRwj$a4#+x&Bvukky5Nq=iOj`n`n zf(r{_3W9FlMea=wlsR}~fnIwfrpK)*pUjO_y!b2dU^PQy#|0xJM&zt0) zQ2Hfolw!|8YPBG(TE+Ie&-;`0`H2=hbOARem$FCj6@UYUnL&$$u;<>AGcBt8D-u;W ztlsp68Knxj8)Cz!8NRARt*X0K%Y!~Zqller=J1xY38H|G3H0SvVR%Fp)sIYc|2eY2 ztes|Mfhu<>MYk+Y5cF5J+l0v)Y7PaThbet~&O`4*})0*;j0gN zHB&{K)D&v~%KA9PdE|MZ7fon%5~>;+*7&0}{kwr;AX#jgMyICFF}W;nN4^oVM995} zrih?rLm{O%MTEvFCx{8v(8=cP*vV&kZ7iV$r6&X6hsR+owO#Ie^ht20>2G5M^;w6Fa2N=UZy<_{< z;kVwt{wdJNlUmGLtpVI=;Xxk zK7e2jdv7#}Ev%TS<{Fr<;XR87!VzJWRSH~`9*}zM z`49Tw%Y0rN^LhnuZP_GBv9wwieqHw%q+d3PupM+eZ6*`!GFyufbd7P59$k9u!xwi7 zqnBo&7( zG6L@X>Yu_A2Gbs}7qBYuvj67Kc)+Kk?(X(Ge5>v?RmpJE;HH-9w8CV5;8Cw$HFAki zD}9%}q$oeFt{`$zUdr4IfVE6Z?E!&a)+#J@g)#dF9{r$dtK#*IexdL0r-kpKPd}C5 zFjEu8w}=XI$`zs<*YEJl$`HF7d#C>rBm`<7Dy400|Ck?F``+K(+Bicy?`Q8WkD&*6 z8SjVecVU6vA3q_pIKkwc>aWmIa&G&Anj31$tltdGXK?a+Q)^(>(N!X}WFzKgrvS5m zURHWJ2FC@B2Nl!SYkRx44MB>dP%{T@yW_5-|AOSR`_4QMl-a-jpPBaO7C+0k^;_Dz z+Bx;fpv%j@nfKsh#}|L@pU=`Yv#-_nUrhs>o#27`(2rdMN;kwCcf#)F1SzGzc#T~J zP7Z-THw744vs#ZnVT-!$76Fz>-{%~-exjky?)Uy*6*r#-DpqV%X(ROjq+*%hW_(Ff zW@vZ^(gbORYGnx=u!l2~5F>H4E;UnpO@0&ij(->Hkyen6<5!w_RLN$%>!_-6$jB16v;*+Wefn~-!eaX) zJFAYRxJdLW=2oE!6R~mj)oc%Qd5t)_O&9%k%Y8qO6T*SNE17m9$%}Rhi)A=t9lnV< zE0HF%l&V#TE=z06>V;I`RY#pFzc-aKV>Wl|HRcWMc^le-mwKR8&WocTAn)D1iKu%| z{EtxKlz6gT=eC!6R#PbHKtOBHcXQr+TM@9?D?S<~+E6-0j_dI)5%YweR- z@Fdt|qLQ7ZZSu-VL7@G@Wv7n&Ss1nS>I=x~hjZJ!PR)Gqf+EEv@4kL=&9WrHE;((4 z0)9c{Pf8QW+OMkvH|%;(f0sLzXMgr3voy%gCGfuq=i!<hb zC=%9ef7Q!}r7oc*Of=x3@rxhLOIX7((A4Y`Z_S8&cX4{+WZpmcQ9GA_WlUq4C zq%FNCmE7~REHc8!cp;G4qj4#Rwv|=qN$93#VKfBa&c^Aa`Est=S1_G8v+L7#~DLHQtU4d9pI_kdi-AOQlJ-d+&K)V3$-_FdW#xi|M5w>GS^Hi zBdw3)JN=C;lM}$Hsc{EW;^7p^=2o^&3w@)~Wu-B3+u10)tf2Ix#u?q~EAqMQZrjHT zkj3SR%lDx$J0`Dh;bx1O@lFi}s_G-&I(WxM+KTkC2vzs3!xSLER13aVDe_G5z|j{k zcpD!$jjWJH>G=nlF#Gx7nONr<9eYJFBMRYxg@duTa2fryC!9|I|Cm{~F{= zD)@fmliyEn^@;zV1(!E_{{LEU>AXGn1mp?}_;YVH?*He`4w^^pj)(e|=zKk&-Q+7d zU6AoEyLDpFI7e7&H1v@p>w;(4OYEA=RjDtJN-JuTh{4@4sIlu^pxMX^6j00yvWw?@ zdD0S=80;y@{iPoNaexheetP@(iov7)!{vs!qz2jIwZISg$)LmStSi=gRKPcl(J!;3 z)CNsqFdbO&I4^}e?W!c@8K2h}X0zcHXO&P|Fr z>}wo$vz#`=M_gZKi24CEv&ZLF64^{iXB~$+whjkJ8GGh1$;GOyDL7#eE({dg}L*eH!izH367Ot(XZ+O;AaH7RuUH7E8+64_Ep|7(2x2;@H1_N z#3@oOp%kz@b2|0PkCkdTJu2gY-8L0d4;Wiw7Hqd@r69C_kcGwkdDBI*h85JQkj7A4 zL(SHJ>B(1|Tn`u5RAGy9R+;pH$Ejl*ma_grmD_ScW*5DJegCQZAr*)YL7XAJ!q6@&(4)=MPPs{>uOQ!4iy4Ws1I>!09vIWcvBGzshROCI*VN1U$h#|HS;Pv*J=@&>#?7D zNy>RZv_Ci`YzTs&vUl9Azp#kikQb#@o0ixYff`nUWBw8 zyLFl%x_Ehl4EB)JX0p;mL=Of>mC5V}PFw4Q-(kdM$(GSkiOfE!xIS4Ohpyt&Skfx_ z#NNet8#i1v152dzDS3hBRl+t|a%#FPDt_^@5LRIAg(M_BCC^x06mPnn!l z|2Q%f9#N3wV5Q zA)5!BmqUqU(jaH`XJ4+~+FdGx&bzQE$AlW_b@alDzCsqwHhc8GV37Rw0|PB9oi<1j zj=ZX^%99$$9!f-`Yp=D2KBcW(x_WRM^%lq#y9)^=+lwK%eA+&QCAM~5be#MPjkFb? zVevSV+*Ndn-mjMq8sr@uM_y#+ijUGAf@b$Uy;e@soM>^ZS8(;W9N+cmX?8+9unG&e ztXW^XRDtxlQRusu@)cFjLuWOemU4A%;PFx>wT(bFRpv%eXv$w5Nd zlga@I9LG=0w)Z%uCjF;>P>V4&vcU`=%U{$ctN+Ruo7vj&bW1=oVI<7h3rlOTEPZdr ztmW)?m3s@q_yOrF_&81FB|y4`_Ryw;u@N1-6<&LXGXbTM?9UG8n8BXrtx&_H7sam_ zomp$-ZYAg$D0M2>TZQBL`u|aw^pO@7}#df<{CZ@7Q;3zbE zL@9qvX%#T?ds{`#S}vbr9oq$x6Z-tDmAb5{TR%xO=7wgsXDSVh7c0qzGgmx3AlptQ z{{verjGkIEwsV{Vli4@aZ@HWET?2UaV+RE7;V-;dxaJ_oM}6LX`K`>ga1(CRNNsLB zCb9YZT<1M}c6=XGX#H9R=AP{Cw3oXH&rnj+O|zRJwowaCUhJt z!fvy#@Qp!a(dO#ik7J#qjx=AHCg)wF$XpTvKX*;K@MQ71S)FzCzDEa19=KFNZp(`dsc3@(!M0O2qt;X531psg-3`hZ1DMVE^D-GyzoeNo3nfkWry?lX(qDX=? z4o_|AYFC&!LVypqx)TjBM1lCUEfQ8d^XdeS`ZeO9*%50Os%e zY@2+zn-NLLfaD`e8Cc}u#==7t_Gxug29ATdZ0a&|3ev2fa9(DcgKxsn-#;r7Fbr5e z*HB95? zK@ttuTxuaj%q>dKmfV0-|Gf)-!Fmo_%Yys0*MFI|+p{DmP`VB`eCmpyU_*lRD~>B>*Oh)M zi$;2-e>}QO;ypjPjEk;YDIq1&7?g>43*TO)6UU*S)ZyjfD9>eFZ)vU|1=+YnQaLA> z&B)zmh9gxSP-y`rQ^oR8qd5EbjF_J?WBw*o^LwnTPQUTWpQXav&}#3_|FK&_hL%DE z&ag7VAm6>c#f~@pQNAs9lHaptO>8qC$-VD}M&gcgturUCPKYZyhDQU$Yr+aAW&Go_ zwe*UBVQ9Mi!i6?_+e7jcNVQg}<)Y6^P4GT6LFM+|R1ak|a6K-Zp6s8n)8OZQ!@w`Y z7T(%U+Ee(Av7PZur3*t%z@F9<`}@x?HxkdwpTUFw*!BOreroXFE9aip*K+@x)fy-Q z?NY*H+4|w8Vwq@@oydC5qvT3NQRuo?a|+yAF>N*7&<672xn!|UBj7`87*Y`I*bxxT zvr|uDZWS;PBT69mb6|ug(T^AWu-uR~mi}N(6dsUi7zqI+AQt_3!Cr{Yb$zsoRj~)S z#m*>9GNJAz@``C{^fNb=gWn9<^ylqlw$;BJBt{l2AEHI@bjVMKtbeJgLaGE$yzs$0 z3$M)Si+KrH%Iu$4Q5QZFzA3AR=}D#l#Oj&;lGCx`L$VaB!4fLDDyFYA04+x{NOt1K z-^(52GY@AcHWN|;A(+cC`!26!Q{w$)o7g=?B!8bPG`DXFu}a3(Tzwry%pe|d2bgvG zl%&A(=dbFR4#U|Um1%{2meJ$D$>akTYD{;7^^G@4T(c-GDfj*J&Ji~bKTVT@=;h-@ zmLy5SJd`Q4PUhGN7I>62HK6yDg}Q}E?QuIGhS(e7|!>;BfOcX{m+%%5g5S+<=K>|ZZ9|Y!w5-GgyNS7>*^m|A;;TOLgZoZ*e zA&dCjD&-UH*>9voO@zZE|1X`hwO_2$BJC|>cshQk5|0lgnAGjNVrJ&)MytTo%r_7x zQdo39&=@A9qJvNHa?m_J@=yT`vP~^s)x5_8TQ^N{IV?+#zVG2D;j1(pYh2B;C=1VX zs4%#DYOp;uKQ+~etqD7~jwszbs-cSWam0mm2EiY{NvGQVJVan`$Khb52FA$VJN~G5 ztYeh@fznN4kYybnC|`&N{>;}nw_6TVZ2biXK}3dq!%@2MO&co*A&LVxJf*5=Rx}D3 zVK6MJBodL(^D*n(-t0W?5D88GE2<3dT?wYG;Ej>Ggi|#^#qSKk?SXxf7{Fq~LqXz? z>9a#T)hn{>U?PoXx9m(F0E`r z#_lKqpx#EXQJtmkG!n0~fN~U7aJdkD^)*$7vd}XnB{(Y;-{6dNd()q*iz{^!qhy*m zo!xA|;?+fxNO$C0?MlI9;tGuo@kkdVuac@_5c8~A!3c>4&9IhHuubtW4YH2f>gi6H zQMP@-6Oh`2hi%2*wtxQ=4+d}F zo^K6j8J7}F-BXn{t7CSyBF%QZN7|CAy*niL*)luxM3kgmYRNS=~8_DFCr9q&F6k9G?=R^fE8ZeS)M9~W27ZH`o zsJ7ITEnuPbC4E?Lt3{cu%8C71BhZ{c$G=S2MjS&WXOC8@e#4=@GW8*ge8YEgAw|f{ zo#KQlCS|5e`h5K0$AKiqen;!|ZU{XMxRVn&+zYc#^c_CPUW~ zN~_YG4NxSkj^O@9f-Fx?x%(X^^U#cY)=@Ho$AA`z*bueO zBku!8Wh}ZJ(|bi-rQYytALye&feOaA_uVA}mWMKz>h8dewhcqF`^cmatT2*`k=3kB zAw>qO=U!A(5~L~}U`MLg80+9Jn}O~xjSCg`dG~t{v+)y=oP3Z`EUmW?=d6_(*i|pd zGPiXOcXKfj9JbC6JplJJk|K|2x<7~j6b&v8dMX_X{svfs(Sz%3ZU%}hXnAMla}y@?nf_<$1^volVj1}rEoj&@ zO*grA#ugZCCbWFrEkbFHigUrzW@kx+vQu?^Vy?ESc+wGL>gI@H5;rTwXqsclcLTA) zZDpDhcWL}jGSWZr#|?D&StczOjT0_gys4(m5I2f5q5Qb0U646n0cM%_Ev*QEQ1_h}MPb6ebU9N0?9;L1L!hI3`pDh+W z_k+KZ+nqfIgUOp;!PnemzSjS*L$vTxWH2qib?rn>@GK|RsFCS z1zz@q+rn>ZNVv*yejA4eaq%fLWQIbVs^o9$BB~mhx(%YBocyYl5;NesnoiRao90G@ zN_AFpIut8~a{q}I0mIHcK;}pi3Z1_gAtjHpPTh40Zyv%t;YZWG;lVvNO$e5H!iqri z$mLmSWwA?({@kMB)>@Y~6Ff-Rzt+wboOAH!XI zkrWL^U%>*R-g1kYe_4rO!61`0SaOWk&j2kv9kiAZ`Tm_l4p9*Aus3P6p6SYl+|HIf znLz-KA3VYiRu(1G`IzD`wt~84c}Rx0ggtHQe4Dw!wBB(157&T3J$_#Qj`WPYXy=!8 zdsF-B$lS9__B|p*_ks#*s>6DR%xjX3-eRt_I!m+h6p5i$;SJ$z!ESHt+(8-61$cl= z9%LcXZi2SUC|4uGh(5dp^uFJXd-%pK!Ovk|(cwsDmgahsm7Qk~x|t9*&tUmdB{&*B zG<3tT6XS>q0Iw2tY1ZQ(BSo~m+En46lS{L+$_h_ZHV_BeK@)U#Q$uD;86HppU^koV zIIWW%tXVA?>=b6S9{So2Lx0q`Kt*7u5*V?#pA*BK>n89;vgR9_ITc6@18%L)UqrlEr z^G4Yed|o1JaoCr^u)OcIhbJY06h-F{nj8V+s@9q(5Flp4p$&`RwS>ve_1m$PJlWa1 z9?97kO%`{X?jNiF3Mnm63VEI9acU3#{>f|zr>csr$l8jluXIi4c0LbPY6u3%`glmS zuy{i1D4s^gXb4?{AjG-)tE=Ex99z2go-BF>A6qhDRoV`Jh{m!MI@43bEP`AJbRoHL zoMk-|r7$2VC7bDOJtkCa!^Vl?PW;0i4kdKF#yZh=($biU9wDuLs?boWi6#z)YHFjn z=rRl*hI$7?fm*%hmA;R7YGz=QgIc#Xv`Vg_1xM8raS)-XTKxhhtz);FFGqsoQPT)m zh=UE^%f!hmpj#-CvnqJ>j6pw^*u;oXWc|iK)J5~lr!f|B(MAQ?I>5kW!A{e{o3|9- zfE^By5U!`O)3nb-Pj!Vz0)$?C_ihK5K+6l@Q+(TkQwCAI5_G|T&Q+j&gn2#FeM4Vw zZ*f6HyJKe#&g$tMuk?ot#$-|`^&G=kef8kRcgFfh>{h*eOKHpoRJ1l9efedf?u7aw zDf2MiS)Qs^+3YGB&`P0ry}mkKgCKw|$zpb6fLSBW-1^_k2TKRV(zc9HTGnF(fDdur z3P+wDLT$3Fsht;s3mykY0fn?C$@Gn~CWu{>6dvH6wT(W3#W0=E zV}z3y;*mYlTly{84(!-Skq(z#PXwC1J9Qcy-n!Z?mZ}tx10dy5pDB>B80I-^dM-T` z5`(LY-#j_RMZqS;IsBHFp#zjyi5U>oIi<$zelz#?2<_4Bqv=Hm-kzA^wSqORZd+qw z(F4`h*+AqBTpkfW=Q$dyw=(M`H+ALW6+Ll3=|v$D9~M6;6TOc5Fb_<+fRiY$6u}{& zb6S$CGIetLaq1kh*HATjICJu%w2Yw0(L~g=Dnwu(Rt-4!0}&kt?y`-!{-MH6W8JCa z8lu<11Ebr=q(ov*@H6B`w~yw{nEsRnhA|cr!4jcZ`XBiIMORe|O}!|33nh1V zi9e-c4l^SlTEK`vsAFJbN0@uHs(FBdh}o!$81uzC5FaC|B`?dVLbube7Kw6lYTIjJ zX>F$kdQhmlle?^IFf@1L?YFhdb~YDt2D&3`>H~Aqo&}`)Bnys8mROX2+XE;LT7ghcQaRM=sSG$+mrgW9{qY zj1x;tB%Vnvh;q8mwFr|d718}w>2|@L1aV|DMZoE6e=s9Od~nb981Jje#cm{hucfPF ziRxSOzI-&grtQ?P3q>&F+#NyMESKWGC0oVuV}%{A&)^%zybb$zcpkH1?2y>bA=T!( z<$ML#f)eBSYl&Q4r7g!>9g^O{Sa2REL`=a^R^jnp)KjXw!77Fc2*3hPUle=~^s}3i z@-a_z(o=6)k7g@bfaWNRmEzrv*b6s_tLkhyhoP5tJ5lrQLQgBfmEiw*|oE!2vIRMWB>O2E>tDQGI`wY&{7&DL@Pb}vCn*D-doh?Juq`U?U*%iC_}L&l(%0yTJi{CL&-U54*Q?p`g2FQHmOnsSH3UD8)w1{T-v~ z#F7qrILe900CTn)RKqQ&{7&I;1&ajVN{Pk3WP*NXA~u->({|HoiG`tkP^LDXEh4s5 z6_HslQB_?K5jI5CWJ?-39qxD1WlYKqQ9wnfN_#Sg^D(WarvE%;ynf*+w@>n}s+>$2 z)f!Dl--`qpNr_r9JsR3x4r`Wv$UV;Rn{FjF%mr+it(^m>Tj(&sxyDFom#U!Wm$i9l`v1lu)68EuY|;bOtyV)24KwJ=~DMP3u&4(8O* z{36EeN1V(SgigCTL<|V@E3zUKqs4{7wKdSoby?nq2>`U_l`7NO?dPZN7zO zSLNsdNrBL|4pq+88hO|j1KEbgFS+s7)-}SNR?%Ljb|;#Q#elBy95sd`Xl~x=kNt4; z53`Cy9B4|*UrM@9nw8QPRcS09mM^Qr9v*!<95v11nxlxf8U@Lt>Y3Kptc`YBy^#V-Yhg)<`AO17F@#AB(%~gqP~c^a>L5}yC!@{+Yu>QHOn`T&UqrEK_@MC&Win4R$jaWnRQK7u6zU9<{ zF3w019|1E(rq1a?U=GW7eMaV;H!{owK##Oh9)}F*DB3V8^%euwnx)Bz z()yt_W#fOWH~wfZNRLuc9}49&{UX7~1C6Gf83Vy+6bbtok(PT+AT@p&1UEhF4azOM zc@x>T=}f#t!W$|{$Uw=Nzl4r%Rx`&u2R~q#-n#l@@^xfIwwQDQ=wOK4+oq=qJAr_K zBxbM9<8Kvqn$aUP%y#cRcY{Y!O}WB5v*~^E%NG~Ij+F{U?m&AdZ#H;I1e~dK?|OZ8 zN8TzVUK-lDf!>WTg;K=&B{F3Gcgjn;!>`H zOhJwp{d8z}E8I>R{~lxTnLR}^YaaY~4_SfwwHq=nrIE{)~{ zgk5@*lP9V*{;)4=l^@qcx34Z@d{gR8{s-j-Umi|Qc}Oz{x;dY$Z!$NziO`U%yb@W3 zU8$^Y^f4bT3c!mYtCD53KTc@E7Oo}{|0sNMSdXd<^Pco=z4ncCs}DUKR>RkgG~#Y7 z)%~siE7#;{&OB&CiL9IC-Jo~Q1(TH;&QK9qg0zy=Vv$P~(1l!JWy)gV?@N(&9S9Gq zGhVEm_#JKu8#)P$m(Nh=a_(ZY*;sQirI%Ow3+V!7BQcWENW?E`NySC)NueMu*ld1| zHyl?z_Od47^sie?$^F23t2ZDJIvo50-xs* z@%(&uF5RGY2HF|B{Nvr8^meY6S@@&cEyJg`&knv6i^|fz#4_>`dg3gFAF4crEyK9R z6BNJxS?FdU?NogGaPNK2{ic3vQ81?6e|G-%nHp?^cEUPlvoF~^8|lPnBi6>k(bB-G zyB2O9v($qpwB}~U+oarDB%(eZGH{O<*5`;Sqe)(e4A5J$lWGwGN?CCEXF-yf;+Ptk zFLQ8I@Tc{W#>R1SFl4oy{7tLFZ4<5@@6*YF)!MN&9g-dF5zMT0XPl}@g+P?psLDG# zW+Yv9{zD)gGgV(P$JFgH=TxVtm}Fa?QtgP5o-!)>+Z4uBgM&C79fiM$r}!#VWPxNK zd_gQh(wIt|R-b1rBD)?pX-G_XT(+8)w&kU@iQk1>z|cOkc1JT4M{-RUWeNjM7#>s; z54yQlsmRIgQG2GTKw|UB=y-vcF9r0G53aI9`}I%buQqqx;Pz;kaG+B9aVhx|OcL^B zXtykAMP>Qe5}}8)F0hOEm}>7@m}FV0yw{2iW0M9243b<`AVp<+#F}=8Fa`WdgcMZT zyrAZ!s9>CiymRMRMo_=aQL>dutB}2`mz7x~fc>gOo|qJ)HmUM6a{ToSJQ$=4v>}6lZ#MxF@sw z)h-g^Puj}&du z$#kIq5TKKAGB&C)6cDksf-$z~z81Ev7U5w+pew50TW!5DX`w&%Qpp#!E`rIV##+z4 zt_u;oKKPL1NrbG>3XnF*OpAxiSocn;#0TX~DrzT-UbB8yQ2X8ZM8VhMTdzCbHR}RH zPG4=+K0P&Qy4v!>F6s^%Vc$VTolX7p!<=`TnL#dT8U}ZD+m^LIIyV4(iW4ZyFe7P5 zL8@F=CE@#5p3&Y)VAH+c6M@LXo>3vSqYX5)kZnc`u4jYrhCxr=LYGfHmf6(P*!m`n z@!#O+=$a#Bf7WEfr zgF6ykJALWiHh^~yV&T^zY(>*1v`4CrY|Adx%RQ0_{g|jba}qq`N82AFG=7!%f7-@% zbw9?)_2IOM+RwEJdnL7(=kF5>g@Wo7C_bB57k%5j4&CX4nEAXL$JbW{K<&3yz*FKR z;`A#e)dr>*UP{^CnHQz+(15mN{>NfyCNRK53;qfp)A41(1`+PO7Zn@X~2IitD4kGBNMl& z`9+mfRwd@xQ>QeXDf=sTONbq0Y`Xc|AV+?iAu3Wfkf%NSVMs!RH)U_KNk6spm zohTo{37XtH7@D~5U}ZgL6-PVrMWgPD4(0d(e8wujeH=f?BoA~0ykP+J6~gFC1}jSE@00aN@!UaqE{C|(#;ldg z;v1%($N9emBezKVTr4?eOz>7%{Ga=;irJK3pB4O9XED-wd%zT6?% zCNAtClB8wYAORHBSi(t%sn!dUPKJC+u|*b_peSs*#z5C!e3=}ERu3_I*Mrm!b`QXJ z=79!j1j-d-%W|?_k6iDaSMD>)w{ie{0@L=}pDw-0^x9c?#A?f=JvzavL0`DDXGFFM zxSR4ku|8A~d0z=;>J;`HIH6SbN~ zkxoYu7)U!6(7GOV#2f`%h%kenyGfWdQ=iW2Yn=($Fa+7-dBNK)*Oo8N1nQxHnN~f+ zuvvGiDPsIEac`&39DkfQ*Xf}1uH{zO;FMkFLN{o!rsyM|l@rVwP3O{Db+C4nr12qZ znWqH#xtLyCUj0IqmHJQCabqLzwafjfP(cYEN5}Y8h94#FjnxdPyNpLYgqRHU+d?J` zIVOR<7Ko%;<^yom*t8PW&`i|`QBrg?f^3V71qn%FSt1gy@{tgL-T)jHM^p0CCiNpW zuj(H*Pkk%?huHs<^{n~s{m5@u@@k|m{)h1U?F1wLn(*d=4wJf4Uz2$HZX?y>=+mb&2o@;4#befS3_4XKV_EmmprNG4DSjPV#ewvBE;8nNW0SHp|4? zT(QN)=>@`G?GUFJ@#TTkE!A&=OB5A*^)d3)pmFaMQs*}#pDXr2uF5vs|#&(U}LZxr8I;W0ZxcLiJlnY zx{?tf2JDC~WA~b|TmHQ<4Qul>|H7Pq1O{b+EhRL`>GdcAd;m34)CzOaD7n4F3oR{lEl~fU!wYO<5MJgG``zG6mXkR0$(OA*`Cf|Hw52htrd5$vc?Q zl~pXC#0>{6;VFvKw$D^MD%dg<1E~(ad6-Uwtv{@#Ya)WGLo@o{h{Jb^PoP%4Kvl&Z z4E=?Se%|63gPbeTd|YMbzQ_d1!#1`_zG=`1G>-{tR| z*iBf?<@CP~ba_N?YcN&XOqo{_Ht^k%Q9;zpyy-=b*(Ex@ zWVs1|=_G}ikFVs$~d?v`CKC3tD7gBW=;2kv{r>uy2i7emaPJcd;Ho+ER7;ho2v89mbV28B*kv@ zS@l6cMui9^!ES0%D_S-t+-?qRPb}k7S{v#1@k4rABg-6@BEo@aOc5VkQ%?L-Y%5gZU^c2W@`80yHBWVo5;I0GQwvBDuww=bd?d;e#cWgGc zZF9$NY+G&IBu$#zbH_XHz2ESSvA#92<}5uKe8{O0$~@d=M1xi7Aj|a_tPWwF&uF{P zv6$Vkve~lDG1*ufklvml#CK)nfwfn;qK?k@Vn8$ygfz??-n@9FkZ2$}?`M;UPNZ~} zz9vC3%#@6Nr*)Q-Q@}hDelT2knWh$H(Ppy2io>TGC75dR8yKf1Nu#zTN1X(lPhzD? zw=v%?af-5-0nId_YM&?*o(1ai5keqerE%~7(Z8Xh$Q2$j_VX~3(Aq>vLK!#*fafJf zdc)9e`zB$xghDAsVO)gymfG!&V$VL?YNOt7iZGTvzWCzA60~_juZp?#dGyxSWtiEC z)|$Oe3?MZZObkT9a|8%Y&!#Mr0Dew!^K12J!O|#4>>se3Z_LOHYa(P)$~wenH?DuV zGgRu57sWdG!0DUvQBpeK&qqrU;M>(GV*IStRrq zUB;lXCuawFws?WGz=OpaOmQ4#BI=1r_iuzWk!rJBd|YUKfBTt~%6MPOnO!Z||IiT< zM#4_t8B8ulM=w?+<>;iO+7#}OQ5wI^Fj+gLo>>0J>;3EgElw!=q$5uk0s znGF*Ku+!oGz)Hqj;4ytA>W6V9hb=(7d}r~uD?}U^`k{9dYj0c)mbd7k<=+#9fw=q= zQCvqXZ$0>#1qnXGjQq!pG#KHj3i`$wgwo_KdH8vz4P0H}YnKlnUyi-64XT-+tq(~7 zFp`VQ-5xndgUu#HI0CKhYn0RUNHvF@yZRZ=PR&dem?hm(2N&(LNXEYAAU8xSmecf8 z8?m_59xT0_ZL~G44ySZ?ugq#{qN%ujdzLnK85fARbiw!q59Me)c|jOMGaiJl6Y@g1 z3o5c2&A3Z$+DPuPgl0Qr7|x0!2&FHg`l$*MubIU}WUH`eguDODUWAMjzIIZv&EW!p z`#=1yZ7-V~l0~3|Gz354;!gB3C_Lq`osBnrY?)7X#t};~kDtJVyvKJ11(j^D(XfpE zCNs5}&*}-I;#EnrFw0-)(5aDw&XE0W6g911r|!Qj10J86I=^`AR^?+AqL^$KRbJ$(-ET$(9kq5b;P<^N>GoftWM2_`4%J?qz{$84Fr z(PWy$y^CFgG~j0QkooP^+hzH^=}t3od`^l-wNjzPKKXn_ELU#yU&*XsHA0ZH+p#= z4?PJX3|s=3BLX(Kxr@gA`ILkzU~lTlfX;IzErC3&@nZxjl;$JV8Cf$6+LZOssEJ9f zzg>JoQ>_;3N*oB0;g85tQxOF*e!Ro%?-q2%Y_E}@4^|N_X*cbbHQy0mHC+stbi!C2 z3Cl6{Vf*ppxof_Fu;3?Grcd2xIprDa;bh8E}jt;2%G(B+gZ5q`?DR_Dx zas3=wv?xkKw6gOM&EM_p*3N7M=f4|CCd=-AG_kxb(uH%}cET9wo>i1QUmZ9z; zb#`)s=I(Z_Eq7@DiS##9`sG@O^*G6VI^T|4i`f$P~bAa&U z<2dV<`hdz{h$!9@q1lwF&>Z#E8RbGc2+-`sfISPUSi85e1S^#|UJE)mmp(4V zqzEkBKbD|%YKMB_Eu5bv20Kc$tD0)HoQjjC6{A<4gq%2Q1q1zm#U6G6MXot3HtTxi zj(Am19;c3K>|nB-vf6NBP9%|3f>ZB9x1}=S{&Jk*=@7~9GynR!ULC*dtrb`R=<=%V z>}P5ASCUf1NOQvRJWw{$Z#D$)>!vs~G0d6Zp_mt!^cmDb2^hKPg`p?JA11TXja27x zxO4pRxe(H73E}08Z#Ttu)LyT^5$a`ho&Otf35 z3cesp)K;k&TO0txV`R;+8urszmeStBmg^x7yrPR|vvv~avW!JU7EwJ`EJd1YEdUhAZY62TXI=t&Q8P1TsiS$AGu^{|^hi{Vx`H z%x{&s|1TCOgY3q<9_iI`+UweXO#B&i&i~u98~(=N`xA?IKtJ_~ld}^v;efNisE-qY zKckO%7x&zFuJo^{{@|TlMbHuH7Sm)7Kh96ax%|!#+Aq5T%X`dz0Gc@ks#~6cw%tUh zMW3YUpuYmjJ$^ZLuB_I>}<$LF%Kx25Zq8k-W7mD z@iknVy-e@fD)$iFwTkY0Y_P13!`BMM-|CELQwYi&8Ihy1*sPDR!(fbMVLMr&%>RUJ z!q#CQQ-aGvbTOp#mW33r5Uz#*%K1w=(h;Dos*cXNgSRf^jN%kQ&E6hrYgljofsjgl zurBE1$B;ANfoun>LU&$okmblxICF5Yh;%{q3hE)Y+004vL~f{26s?#%eo`}UZ=R8v z)AJO)d^W*{TEh*=nu8a@&q?b1h;=s+d_!V~X6(%I%74^D^1zF8^nbL5C{D8`Nc`ie z`{To34RzYls6@D=&XpZVPJS|XNd`X6X^7Nt=*~V=6k<{rWTM zL>V147+cViuoriU%G+iSf}k?Ht7Yi66dL|$4?>yZX1A;jn1pE`=PS0N}9yTMLAsBFA{zxJny%cPvm%#NXMkrDN(3CnQW zjn~GS3osIGdH7cpMD&IEgHxh710|37G7S}GAfLsJP3(yFo`vN$`XYc+Ep2Ut^mOsxb)L2_um8hR9+{$U}13;3mhL{V1Nh;6TDP$uy@htl{?v zmt-qK;G2_UxNCUhZ!r|I2AT+hzdnhW0UecuRujn!C)NF&Lr`wTim00=I?yf7vOlbe zz}uUM>FJt_vUIH@CuKH-I$NPPe}^N9mWa<}1Nxhz4E)|AN!@R8L!u$-YunsoEsbzu zY-H8PaUzjA^}J*lt*|w;-7M19g#M7^9V+X{LNHYl-BLY5$q!tZDl=(Ym*pg@;j>96>N5$j>y{g*aB=F<+D+G% zNMd*&&mK(Jsof&hl$&Kk@LL@&pIqpRGXUw%Iy{o?)f>~LzU9;+$Sdi#0o{#9Tm3{m zOIDc-`#NlAV0@r~xa|7(k{f_g3UwZbIA&%D^K?7IGNq?hI%iRI-41#}(Z*>~0fw(p zaicHz-JLtG1%E@F3b}D-;2MTpcA%wExr5G*?uvHF)u=J@R4LigQd?HzwaKTeY6J^T z%qbWogsv5m9fi5`S1!uUi}gX}1rTM*H-k)zr1{!hy{>nha5d0Sk7~%#YumA0$}!jF zR7*Ilz7|^Z7R@fnRB{pp)}?_lfBYzzFf`ObohNW^WviWi9|l5+>}mqv2ZmtLW%JZd zpW@EL+eBt)js=WcbxK|ZC6Y@$8+8s%Ky9kdD#a0;fdU?98GG}Mg}!LSR^zSyIDv`R zo^$>vGanGy_#41-b~63aVtNX{^v^=hPuk2|c6|VY>7J^IGr5-AD7;L{_ZcE{{Z{X(%axv zk^k!R!8_nT_q!;cjgnei-FZ5P_cnZ4X@3s+8B7`he#+y=EH@o>#; zA&lfxO%k#ZwBF7Q?2TaRtPw+5T`(Swr$OMtcF&_Q4s69aJ4j^xMh^4YuAgN=KsT27 zjv;Q#9Q5=qoFb)iI=-WP;GIhKNcS~Li}T`6>oZkIF3%@@R2z!;Z@ zG-BY?5%aAA$QnA#cFfBt54dP??6+c-u_NBJEo85CSEDSz0qF5@~k(Sq0I75Uq;=hLgjF z`x@UM*_fN&OB}p-su=*aRVjCwD>_Ricd;a{#5#q5FQtS`lN32$C+_k7sZe!{haF+D z_2l1uz9(Dla{-e-a;kq<)M);&W{m)LW!qHOM=S#NjMx2D5_s}=1Fg6sfx{zb-1lQsL zoDh;xr`XyI)Y1C}jWwsR$D{n8)s0cc+mH<@#!y6O{x|vcHrcE>| ziNurik?v%B9Mj+RW^6Q3BQifgk#zKjPMLAGMRT=!c*B>>#y0UIgc?4&>=kgCs!0mo z2%50Vuhf)k8FX7`M^WpstMQGQ7O+q8|AL?0{|+E-OFaCSy&GKqk6J&toK-mdaroU? zMB-58e_go&F(|XlhIX6&9l9Lz{hOZWXT)Zmd&p22t0Ru{Z>131#!2whr)BEmbwdZuwQ*Hx$;FkX){^MGNGg}Q zLMCozIoxvcFhj@yfNq>X3lOq}P7#uZLBy(&W$vB3`7zwY^kuA15qhQJjGgYTI}c+y zGv}XFBxoVd12L+9y^5_vosET2I^*DqJnWm0KyA!mI(7r*i%@g5krZHO6@oq9pA;tu z-x0u4f8j7gMEbQJ1a_*gmA9@*m)e~)ZcV)JBCom{r>8fA#;{S-jtHslmAheORZu_K zVibB(Cjx8dqCRt!c64Q;J)MttsE1j^h6Edu(JBfsBQ!IVC;|e^>S*HoQ|~>6g;Ql4 zilel#Cfz>^Us4?T2^S?N$-dM9No~_EV{uulo>ulyS(F4<$nJ7Tc`{%&(nN!Ae}2rr z6l{H;E)&5&FDx*nxwNt8;=ZeHrnfh5pwT*XeX+#GM2@{pFipA(5=iNFvxZ{ zJ_pTv&LqxpVZ4^h_;=Yly%FpM@YhT_nx(|jrR|u$&-@Ks?;oivwo7-&ZX;3Q4HsQ zKr>MX26%j4^FRaiWzEuNuJ>o7;Fof7LRV7juEkL)%neFqekfD$i z)mGY@u3BozyT1qC+xnb?4HenzH9`E(RpXh`JesEvpsjic4O_7T-g3PkM8J z&oVv8kWg_?a_}31|Ji@G^`H)vlkx*=#<%LF2 zR-mEp+#~Mn$>b@>`UXN^gop^o8M60fx1b-DH~>xoY+|a@yP&5FK1!Z$c9$V$C0=c5 z4iD~3I5EiAS01la)#EK0g5El$v*o&WxxP7TZ(n*Nzq^5*qVKsDcv^8l;=4`@i0}Ku z{Zc%a_ztVG(tug{T8?m|d?^5iFC2K~yhjx?iK7C-?BYJsi1k6_7my-cY{HB$vE2gC zg{R@;NWN*gq=g3~aau>tK(UmYAZ4eNK$V+>#(Eh`> zmih$_=T?e82!Bfz+-;WX?_EL)uq9bsm`z4JKJZb&xdRVdjk?2JUZGCyVyYymX;O_Z z{*={!-^RW>ZELNXhEZw*ZjIQAvQd~D`+IE7B_PR=zwge;1c@Xf59`L!bn}7-*T5ws)04?J52+5C zbHRMI2e}6+`?I@tc8nUsghArlc$#EQxOkJ^Vr1qcG4$lA;4lItmtOP_^HtlDtD)P)og#4oKdM#*peHHO-&2Q8I2N}=LK4RQf@aBLLV~= z?KyFJvmcJ`=p)OBM{&p@X4=)uCnrHUM> zwuwD!+m=mBXL<;$ISLb z%CG|NF|tjohMUnUo8(6?x5O z26v2QD(VIu?^YG|AP2_t-)9kZiVb-KXMS?(UtVfU*6(}78=Q~*zJrkazD_}~4Z-Op z5q)#PMHfQE80s5(A7U%V|LggK{WY}knb4M(=PUMSSY_EH-P`!+%lxAd1f~@RNq~g*FV+Gw^Zu_Jzmg}ks&+c76VOap*~kH} zVi>grg<>8c(X`e4P66O$glNEAO*3kmXTpH(*Eg{WGu}7-=VxT=*4*jGD zzbl{b9~es`eo8&I1Av;K$DA!X)1yXAHYKC?m zBFCDBs{E|GA@7CzQ4BDUn=No$s26;%3SZOgV5;5eLfoWppmbq`WO8x6@= z0_RYR`kcypqqm+udEUfy+Jpo(bdSRHKZT6@^iAg>n2_dA!uCSy$j_;)Fm6hM zZg7XZRK@%lw(ZmqaG+cg4kj095+cG;!9Jm8LlFA>Upt|ai!-WKjedLwZ$wg(rjqB+ zzDhT=%F*Ttk4lgTb&Evofv4u(vcZDqq{{Z}=<#4f*B`U2k$$loS$73J#-B%ZtuW;E z!K9{`oeZyc*lI>iRZ-T_UbR7s5Q@$QIE$PFh?BUZp3M2oa2v39Jv!Sf+tg-)4{au1 zY(6T|{+QwJ$eB6}NgS@;>ho6`m_hSVa{t`Uo9UsdzsLY}nAb=O@-R~{;T9;R-e3`y zP%7O6BPe|NIV#NU^6FQq)V(YIu3dbbN6f{S?%B!`8ADDfxNM8<$B&Ptg*PQKtu7=I z$P7l2!N(i)u~pGYv#-n@wt>@RJ?5-6qhqzR=5!h~exuxhY6m%`5jlk(ORqswqhX@UuT2_gXP+EcjDaP;4525Dg ztN)g#Kjp&T%OTtg(LuyVXoR{(geE;` zx_Go-3@II*-ysvSx+Ao3u->A*n$cXNM3P(iJQ*k9qP6~|4dC2v9L*nT&x7^H6-Xjp z2%<}_QHZhqc+CX&?Sy0RI4ames1WwO8|;G}RauHdr599XnNH3Mvd96XtzXQPVrigzL%Yy6H(DmzPhHjQq(*CoC{r!7ULZDFRZTBa(^HhtKqIWJ9?>O0*;g3MQ3yx+xtc zhE25810}QQG-RWianjd{o1^-dPkZQGrBYlm55#KjSTj<+Ee6E+gl47*EiGzO58V?5 zR|1Ut=@^ud>akaSN}dwBJGAA(M1gDgbI~_zmpzDi6bZg+OZebYMSQAG=dQO=o>YVBIkH>1 z&-=Yju~x^sQCRx-X!i<>oZ@aw`~k_gMiGYntjgQc(wC9=V+9;_da*sH&h4v z>_xhfJ4${mRmw{ATQ|?1Rt673!2A1-)R9R=DCjs?l2jb)1v11L!6tU@hd}@ZF*7qK zdo?j81_-@_cfo;ivjU1Iy4s?;Jom0n&58`(M6zXBSG504 zfiRYYCsp9AKZ7HA@$4>9q7?R*)4w1_zI;9&e0jT|4_dj!-UxRnT}mqEv5IEKHjloy z%W%GkCq}0JTZXe|X=AN=o>D4cX2(QW$0)(iQLTI-1aXp>7UI3Gt+WdA-Fpw}%W0|v zsv-RqsLTl)EQcXKi^Fp@7AK;VVM=Zav5!o<9YU@e$x5ca<0#Dc~TEMdh?P^veny4of%RPh%hI7phQa;_`MD7}ki49z8&qGCH5^O_$7YA({>$kZeBF zV?`4VTb`vGOF`QWWVD=6gML5kSol>BypzS!O)Hn56?QX z>t+KvK0q~MyEK{@w5TYy2faz>T-d|u~6EZ@4{+coz0vvE<*k!}dji7g) zvW#in5mx$qpXjj!)>&K#jjA8XJ4&0`=#t~5YZ@ty63oZ0w({GFqZMZVTK_spU}SgA z7|GLHo1sAYHqMSLlkl6%0PZ|-R_uid#!R`9 zGfhi58=aEQhs%|OWJ|sqp9(+sVM;L-&nDc4@fRknFU*~BQw?@}2B|x13Ql+rKIX*| zn3X@TDa$Tz<~eclBI13diM+&=cR41iZx7Q{Bi1#HNc+SW8~#Nz5qbcZ)%-@Jm&`7> zXCo_rW1aGBq_WnOQgT@7s>P8okh0Lu zhj?v)fxaPo)nR+X!3UOh(L1!1fq1CBJ$_}g;oAjA5uC2)C{*A6yRGo$m2Br*tx)mr zSqbBm+wgKTRYYrk@-mOvsV&B)cC0cANWu^ZH^!t+&CmTCFm=|3M6APK`h84>@X6fd z0#F)v@=SQBpJl`nXJ6kAZw)0F2Q_|Di> zN7XtJ1y2sr%sLiA)&(A;ma`F1*XCTy3x9RNd~ZdE#X!j21ygfFTWe zx~|G#IaG@mCQhj_`y`L)JIAN#ZboH?s0>#hLZ^zGprqS90WDH+lF^HQT3#mE!Pq34 zw)mJ)@5(Tmv*4zS3PAeyEx*}-((J(6X}H;Alth7)EF1z);EyQ6Z)DU%LvgP;W>aq2 zEbN$kZX$>qYp|KgopgY1!EHE2YVe8Js=z_aq68wZW*NU|}OF(4Ida2MpV(3PJgHVqDWE9*}DF29Ki`cnS`kdi}GK>%v%>0R!iQJkrrNvWTkXNfsF1Dd_)Qsh~Gfs z1!}tC5V9afTYm`2z$Z^?B@w=K%Z!pJNskdetEEXL5{Y7peA$3w)m^AW*VL^{w0j9w zFrKp1-dfkHg9X_uw$U~GbE>(Az^7fP8jn0Tb(Xvi?Sy*()UDfYCgdqc z^rR1Ehvp3|jxzjE$wjM8ZK%#UI^r~sH^p-n0&>YKDh9qVcp9M?Ly?;{>2H}C8^$NZ zP|__&e=eb^ug)%DT5<|H#B~2U2K9z4i7lm+W>9OzEAid|$xq0X!YB$^tvLM1vGI6< zx9V))I%Y!PvdScpnDO=It?e6*<`QN~oEBNsDj5$cUpkn3ya+Du0?(V?o(jHcO@V#L zwOGv?S}5cDdTo22Auviyx_U(KFIi=#8lYX`jd*V1LEaVx)?%tsW+Qj&|46+`^adoD z4RgK36NJvU>!^%ENt)hf^Rk9;_@jR;2D>+4XD2dfmFNiJWga`l-ZX)sxVd3;%l zTsTZyS<%_jOS!jFyE{hq|xHSkdR4GmrJpiXV z8Y13mIwm(CW@hx;;*S9GCRUGIo58>LU*uV+80K3PR_<)acf|=y_~kLe22dWrid+n& zBz+w5A$+4oDe(735Uc>b4PFE}Z@4EDNjs_l5m+X#y(vZf?doc*5$vVJrvwVF+G*k4 z%#wJnIC8j#cx#?>wX(X=!qcPI!Haqt%@f5}cMto*abfL6;Nm~cu3>u>&y7^K)U|82GC{|fU``?043aaj>tVc2C-M#jsyoPRW#yLxQ*>L{u_K9>yjh^lXcfa#nC(SqmHvRH$P z5*t=6Gdn*^s|x3;L?GFRlVRcDCGe(4QdHmSc{>+iH)yctn2w%y%)?z{{ATe$N=erkJyS4F2GkBXU z0XCxs3H#VLkpobSUC(@J5i}cSiOOo6L*D#~ zITpA&Eq-k~*`x+m6q*5rjG@*_x|M>Ea%AMu1D{)f?^JePA-7%2u~R~iX4~nmHRIws zRw8U5dZRi7&f<{@Wgl8c*7jt#no=lsy5Gl)Lc=F9ZL3uuRrgSeF$b4HIw#mRCtW~{ zrZyHjXZ|lh&0$E*hB&$$)pDlNU&fr4V|1FdJdKA0(_uLPtS^jxjiR8upa85%i(l2}G3{e}zb=qZufQerFK+TqO@S zOP4sW4B!;tWhmsM;e(sq+Yzyaj0(S)aay7gam64uY86GkPoVU`+_Zit$-_WsvmDzo z-=)vMPsDH8fGS_!VQwN?8dq371C*#h%EqS)LfS}f2iedN?v)VLD5pFtC&O)^+@Z&^em*5P=p+7N{GygKbBKu}rY8e#g zwtGU2zs6wa>m$-#tj_vc+v#qhjyLg3DRE%bjcCfwS2loVQUSYRg_oD24N>v#YE3x` zQwp>H&O-uMU1R%kUyJWiUW?b%de6_j9+F=>`ZZbq`igb=J=A?O`?oz5%KibK-=zFI zc}fyY4$j;gID6dQW0o#O*d}j8Yc#GxtD;c?;kp4|a|xj*X!E@QG4IW# zDk4P4u21s^Z3dr&{R`eBTQx`spkX2g_4YHo=q8mZDYCcVX6SK>E#<=r9ygtgJg!#9 zr`2AxPdFhlZz*iyMh-;D9&(NK+WOlAFz&T9rvA#1=gjIq@o8oSYwtejERw!XqM#J7Sri;fYpA`%G_l^Ne6I^a{5ME?gqzBx!wxL zsM;NCHndD44N`jVgE-Vs!AM~gr$Sq3Jw_)EA=mlB99$ql2~NoJ=+9>hIhudt0L7IH z4>!IPWq8U5(`TElerR+P9-$n|mQ{<(N#S0@DM=T1K9N=e(pdI8AhByQe$LeJZ zQ|NG`tg1E;4olAEI87`(b8btNW2#7$WiUWf)~Zw0Rve0@9v);C)#M3wtIGYN#aD8a z@6PZm3?S^A3vGh^1M#pEkLds{`;CdGYSt~Dk3g9;ekSKaO~;sPyVb(E%c{tFQ6ZlW zfy+P7B~28ia&DH?c2#8gXb48uSMsbe)>n&-{ciWX#Kg+1z+95GK$HY3d@{>1kyHEM z7yz@HA51LZU}k%Lkm7?%){7;gxE*Ryeyg`)CyRap;Lu<|ZDKt*1@PqQlljg;Gn8_i70Cg zo!>)H-ulx-!hmZI5#Kd7{-Lol*#I1_Q^b7bzRb&2al_;_*$X8fVcoK6s4fXzLz`@u zhwozWhyyZ;yI&e-@s8 zkPJ1N@BIlHBK6x}EChUI4<4O3ooEHWoD;GYo+5puC>IuEbpzB*f6WG787JywTCfhx z6J!H=f1U=QUwAn`Jxn)={)DH_T94ff-PcKCyxQsInP5}tB4=mb-DyoSt3UpB(wO0! zd$oJ1HBX^zT>+|iR_M`S@_zpNgz=%2eN113eQPM_P<-|+7!v6cH%{keWe3Dif7gFY^kvrs`i<@94ukh{f7X%?UK1n3oEhsQ~ z71Df=WrxUlJ*u<@6gB8-x{Hz_jQuNu4<5*vu=|F27|~W$2stb?JpRzCgR;3&KY@h4 zMj}v{x3G+nN{k*@h)cf()VTX}w^`_#);4BeX%h9Xc~W}}?`Y`Q$6`{rJBThXeAvK7 zf1=-%x6q|d?aVuOmyc}@$fliA!!zeySj8C?O~GkIn)BJ)PhlSc7FaFIMvP6n3^3h@ z3C&~qxm`6C+7m!jqgT)VcAZ;iT6=O(>3{2JGVf^iD;mH+5Vw(84J^m(aKLm@hf;FF zcJ$6;bAH4!o^w1EQ~I?|a>NOL4YVkCidquHe=6)?u01z5iq27t_Z-ar$rNVwgxqJ4rA9J%ZD2TC`nkPp( zkmQStq-G0S(l^+l?vOpw4|irRNUkkDhw-^kS#EqY(#uNz3+%OFrubf8E@WymLNVfa z0do1ng%F4;aF^-ukSB&kYGap^K~d(`Bm5U*H-^{3y~b&mrOiFL^7S@x%bKn8nem>J zNEGwkY<&!>4u~~BX7;EQpJF7VrOdSfnCLtaYU?N9W~O%;7s@^~fv&oJm<8NO3?f&A zGap=K3t=Y?6xU*qCuN;oO8PakX7kvDtt6)@Q2zZSaiL z(f{g_kI0J0#rFIJu^j~;Z+ea^C#av=|Z>@+(y1T}soOMWl+-1Y;PB}^Tq zMy0l&E@91jDFS65f6#Qq+@gb(ojl5W%;$VGKXOGWPEi(75n!kF`>i|4Lc)0}HtJHk zt7B#|lUh~x`kANWY8QZUq8Xx`F`Tzy2SFlxyEA`2jdIX_B&SP7LJ9_a^441a&`>2l z4o0bj|aeun9ecV4#c-Af$Xmi_Vc}{ zBQW;E_sjKNMCGRp@gK>K#$c;YnK+K$n#xmKB0?JAPw38tH<%p$19Xk2`YtnBGRv>& zQv=>P(yLO+CPF&|P2QzweuMuaSZ7!^OligAvN!G;nQ@Tx?sz#LCIxcZSCNPDSsRV? zlOlHenuz)0`mT8$?)?pB=C~6#3+PPcX9L#2!zYfTc&BuULOlb|LY5mhKmVHr(31Gs zh2dQu(#1cCBGV3>nDZQ%m1E>XT@S>ffJo@{KAVB1r@h&0)2gu;z0r>+T{j|trZ z7y(0_p#8`^$eOFh=Z>H1m43KWegbZMpF1k5W(z0thV4C2yU=EHWU?-;oUikW>!*Mc zT^<}NCYU`0YucOY^m4b>tlv{s)n{`~Y{nDmT>yva8a&}kXA7yrD+E(9(vtKI{= z=IIn}H$$ycx89Xk5M!c18eD-h1NFCVT1oI;V0MS4h`?;e)p<@WSRu_ohlNu02>bTz z7JBBsuWy1`Ew`{GZ!~imP2D;796Y>>NQUIXNd$G#kzS|X$2uE`Zj8<-lt}7bYJ=D1 z%ewodgk?%P%8s^?X^u6n!He3CgVR#Mha?Zju`W_`IMNQ%vwdjd23dDuSx{g7z#^pA zjT^nynq&V6H1wM?rq)0)Gle8pbKulJk{>Br)QnUt!oR~H8{Z6xyhz!hx~Ceza^)VC zVn?31DS`!$i8EV0%l9KwV6?Fa8y~rpbr)i!-Iqg75qaQV@iN8l;AdYl4lYsPPVJbV z9lG4+?jmWp-nS29J_({bCt)iFBbsI|%7nB6k4FT-H{25_Q9R8(S1Tln*|HH-$XSa~ z$HT+OWXZIy=+P!`x|*6|*3o7Cw#$b`3)cPy|S**;W>ziFz}fL2U$R-zenA^t@7G~k`C9CU|5W{tzbyX0-u53S z0k3_2QE=ap<~RFA^{oE>4tOilQ^R%4jmvee|9(0D?TnOof4=6@y*;B(_F$$b zdxzQUmalAB`A&z|J#2jiD37;@V}Y)Ivbq$MRsX#J!06XYI~Oy?;}sYq-Xh72P%T*b zcc{ww?MeMtNn>L>w~$tcxEtWq2oHsLi(Xo@#B!*JG{>fGlJ0Pp}z4i)vpoa*)d5F{Sk@E6Ue}1|JXxlgU<>%HCEu z)Ea>#ig!hZ12I9KOWN->T$)e#IScq zXu+B7jta~*6#6Fu((I!dYoNqti6GOkhT1`KimA6-HBaq>Q#RS-GlbbfSX4C$k;nu- zM=e-g;t8BxQ2azx1bLYqL3JZ3f~w6cBwNK3J5nhzFvcWn%67MDOhp8#8G({ZZ4l;T zTEsb}K#fF1nDDD>;f!CFERvTiMG#S28i2R2`*{7J7xZLooTo6{CS6%2w%RG5N(o1c5IP{Y!w z-}qUpgHKeSZn4B#Ng-ja?5v~C^X_%gR;vlsaPeTW3N%#0|7Y)Q)GIlTD_fCGiIT0c z$M64$zIVQFG`1y@&F1X2c0^VkpqrvN5)CwwKS^zYl=+43>YghkRBS>CCc!1eF@H@p+%O#NG$4L z0LCfT<&9BSpmf}?0I-R&-th;BUR03dPapa0WK4NZPCR0a(YRorZ+>lv*D(`T=8=4v z;{^2;E)#%&qVlJYOfxtW-{ux&?xvmakQxIZ z&^SgI1b+F72u`g{I*-uUjy<{C%qO$?1G}c^WrH+$J2lr{lOf61pn5mhXIvyMv(%eKZJH4>76Wy2qLSjr%vXHVZt_X3IOD7VR9VfsTvVII*mtoCO{k)kMkZL< zWo^toJ(8{rEUQi=222%Zf?LN81rp9VPf`I5jIJM+uHZ3iqU$S~saMjZ0u{e<&Wl1B zMtl)As8Qkz(QZ-dldmvF^mPAk;OiBFHwKsD>8QC^nz2Ei+PvcVR+8HNt&L@T>FvIJ zpW>P=^zPleYO~Y2#7`Vw=DI&2`KbgWPl8|B#^z@%_m7~zOx36NCw1%B%VhrF&g#>p z`=zO!jrwvZ{bD8fX(@ljW$P!}%RFs;g6vNgb9q8-a^mK)la3c8%D4*N>yMud`Z^5B zHjV3?b7Z^@=0BTd7B}nKP0rtDNlWT`Y;-4#@Mp((8IIk|d8d}&y(8cyQ!OIdQMi}qh|2^7vD$nq$XRNSNVFs zdM4%O+xvDNFmAGE8@oC0UT4g}Q+;0I$QdtQ+~19J&w(kXr+b8RR`mQZE>@9o9@E@{ zJ}oXs3n9kRdr=Oty2RBdM0c;!D9Ok({oU_=_uH?2n?BsvXg+W7rDNhD$YC!^GHP;ua)mU!uvfa>R_J?I0cDSuj}OUvEpVp@0~j1Imqr ztq$p~affOYlJ8{9!%P10n3J89ui;r*NWmn~c_f{gysU-F6Rh4?QrNi0(2b#m^FDxP z=Tnw3v9>~uOl(3k*D4uBcyF$qOvVi5+SK>B=eYnW$NH<^eibA3-qkih7XjDgOD~PV zELBYJYn6<9E?sNP>IN(S2x?U9jq`EAsci-|k_^_W(%VXy;Yuq1kv_}xJ5J2BAHLxv zZv+lD*qpY3&_4ku>hdAiE*)kyPCr1iB{U!p#3vAwMIQTTy@S)P-Drw@*bP?aCz{J= zO=Tj!L1&KwUSm!&Y1uM^@65bBWmQVwI$(N@@w_|Y{1J)b=_Yq^`}t@kkA*BwtAeSP z$)zlGo?3PX1tSYFNHAgt<JU$zF%~LY7;SP zWd?SJ1I|qHPr#a4L=EX`+;}I=$RlYR^lK&}H5%|^Ff{tIp=TENW5>8~S@B_s8B4!* zOyj|v)%-H&E572u2{Sdx1y%+uw2oQ^p~qkxG4h;MJwS%7q2pGQp@DesIM+WVh7avd z-Fc$Q936rwm*@D1*bGRq>brb;1ZF!fPD3(ToKM|CB(c)Svu{delItthjSYWRreL5c zihcJOz~apAV!k}r7_W}Yt2XHa6}bR42$^Nwyi{vIny8V-r8%1WKmc@WWr)C_*zl#p5k8j@!9BPs`qJls;1ZlAhLQ? z_iV;FqBHJEIFIgro9LZDgVD?$2LE7+zZiV0kQ6zB-S_;6z`L)q*YhDnc7L8uv+vJU z$S=n19}wuQAlQDM#C2yLTz>yOs?Yww;O6N|mzUcoe?HH>{UmYU!&LkO*5L0A$p-#d z<0G^r`;L#szx(#PZ@&HJ`w!pu9lhHiyS=XWxA~7D#cD(BMaw1msKYNWlwdY9=QaNB zJb$q(*PYFD=xh?dJiEVp#MIv>09=veA}lS5XT^Wu4tlol-#zFr3HbczBjqo@%p=-- zW&HgQZ01kx!^nexmKO?AOI)(;Z;f&(z$Mz;neemBd;fW^)bYmvV+$M{vAIP@MI^#h zdyAkj^J=RQp~Jx`njr#|o%}EVgRejS^BSJ!Yzf3(FH%We-XGtSq=*09lx#4VBW zg?dlt6a+38)ysH8lTT9HI1aB#2I3A9x))v1Q~T#QOc501LJ0>$lwKhi<}?u+lfdmkU9D_tOlttCAbW>?*cRmmPKdJfO6P{ptUF`^`5WasmClKiMG^*cp@-C$|6w>)yIB1i-Sh z^jX0n5n}*wV+p4SLb%I1GKvT{oF?VRX+;LJXng42aRDbH5%g*xCf*nLT6(efrqRcF z)_c|tFviRm<9>O3|KOl)vEjV(5{m`g^GvHhGj&F$|E=^ z6{_CS$r6$ir5TS`!WtV#qBMLbvLio>g$3Zy0CLFmBYu3@r~KoT6(`>7ZP(=!B#6t> z@oCJ4iTeyh8i&PcO-h2&@z(p*yFA!Qj!9ILKFE#WM7Q!m3Bor)QWcVZfT~Qnr|3o&A`& zY3c5kR0GW0M#T!VZKq;x5EN6r9poqK@_|a0@~bcMjFz?h@MV4^3rVgxiWU5Z0o9NU zWJ2^nXZN}^id#=i=ISYe8gwTYfwbm8%PR(KD%x4-jH!mI^h}A0LHediT=?~DOz6vC z?Ga(Ft`qxs74VtnX7s9tP3Ouq8Bb9;6>|jtHkTXkDB#m#ysA78SrT?jBp=7=aw`7% z>#zUkzx~gD{nvlZ9{ykd>wo?E&wu{@eNJ0^{J=KzD+Qqn)&aqbX&+x3evU4G-h$6g zxGBg}8Z*ghGs*tm_S@fbwH)zx@4w3r%SMICXW72s4fpHcf1MojOtL9HEaGR7w>vYt zG@0g^MppRUckkZ6f8V9gLg&0UXTtjuKRd3UZl`Akckj3HFIIuGE;*dLj@iID!0!aU z`r{w}*g1#t9?UVyIM*dT!Z|7Q9r5n^s>Y`lux7$0O;XwGImo~M-PgJ3%*PK&IZtf4 zc6`n4;XGv`%{Tj?jcLwbpTQ~Y6U)>Jy!(P@wSD)>kAZyq&9~fInD+A$eFEu4iq9eJ zZDiMx3$NTheZ@O;uCuekd6V{-HrK3mHg)$kf}d7^7keB?jAyhy?)mbEFLRsGv)1JJ zBG>I_lgeS6Ggb1 ztnWYn^FQZIm6gqd+89I(k(q|th@mNm?89U3UAE5x%kPh&b7BdIqgBPMpi(){x)Z-9KyMz`sSOzeDlq>@4x>pn`Eh6K_=+3 zNlch-EU2nlF1{+OWr3>YIM_6s^ngEm8DH3NOyznv*U0Ioq#WA!)`LBgjVVPffCxg* z&{5|jmaOb#10qARm@=_y_?#A#Bs)`Z9nKC~Zsi3V|3TC4$H?$lTuR zCTvG1T%6>Zmr@$U9(0$=H=}p&va(&8-{!jG4`2CYAOL`}XmdI(XNf*`5}d`wZ^>7@#!BEL$9S^jSsju!K<43t`8i=Z+HGdQYiI1#?=Sq2P!vQOpLyw_} zXE+fNol?XLn{;im#ht)RSTHZKa`pV>hwpO<9brC%$d4$YRnPf9f5XF4MTi){_B)Jt z!DyD^*?uk-T(fvNu=!3wa@yeZoJn`@Ml+my=}D}Z zJt202lG+lhO*tQX3L))CxLR*(xtg64(qV-y$QXU>#qAxyTJ~>I#IDuI4^U>zUEo(= z=2KEWZ^&J5%Ff|F)t|a$RsnV0aT93>)eVGxSF_o<8l1@)h*JZ`&18k5H$vm_DQ4Xb zySw@vR&i&|X~&nK9={nJ;TaGxiZoX_uSfM+4w7rOaa-<>NSt%c|8uUVZ2#|2e>)cz zF>N|t%<6}nn*Q)lfB2Vw{g*%fk3VKh_mYvM2W+#`Adp*Z_EfQCpXP4Q%-c*!$Yw%i< z_$bdOE7fUk$#U@i;SYbvCrr7*&gFF)?@G^F?7Q>34UT?yi-Qr&X2DQ~J4nreE+|!ug4< z{K1F+`mg`=bBAv@MdunUT9I+?;cG;@c@lAKYDtI_sDmTPK)|Kql;;n;F$@m=kL`d| zET4v$e2+l~HPHAW5D`fJR-&15tsMgS@?D8ha}Tjxv9QFG6iX#?3o}7@*{J7LsAJY` zkg9h1dTv}klpvu=2M2%LXu}YlPat@3RkZP%pn2i13&oKV(ra&NnCqUM%R`X-C~;bT zK-|kJl1-HC5UdO!ScJW^!XWa;)+8 zeOe`tm(U~M8u|hkB_YBAl{C`vZ@#EehG{74GnF5LeKcg0WdOi$T9Em^}bJ@>QXA#QUnK(o>(sz#}MO0&QKr4h-dXJB#rB1@lDZ&9$d@NT!~^OOayH~tqM4TZ!`hbDNjmtZfU*NOUv5cbXvyPLTZ`9F zT9ZISDu#B7fVm;zBsZK9>i*CvSF(kTSuOF^4vJsOo48UjFtNQ}ga|CIc8*$81f#GCgDRKs^ZoAW~$p!nbehMAuF^ z4w+A9Qft<5ngUgDoPcI_ycnmEi{FmJ(Cak*XjsPIea{ce=3*oNKo)@AmkY4qjh|Am zW*~3o>%kHPU{gC~aQ|O5!9AdXB^y_eo`W|U@^ABUqq)V>&wArP!!2i3s+kF#<2T`` z`7*?kMzE+O=S{>TD6{yn?bD)QA9koU!&%v4P*u&;*u_5HBBw{2FZj}@as5n&V}^*6 z#2A$4reeMm?P`5_*o+t5YMd0FWy++?gZk)p!AeoKskr*D*myVsb_=Za%_-nT+S7?>>u9# z+)mL}ek?ORW6VXKV64_iBbX=PwH^Je2gm;@GJg{~7SRbB;Ob18|+#aVJU}klRHZ8fBFIUwc z@~|g=nJ0ISJe}@?pSbFr&Kvvs&eAOGso@zMrge{3C7IsGC394S+Iph8Egx~?yNiWt zcn#nNDwxJw2YwctsBj4LTx8|9T9R)-ve3P+NY4?>$7&qE#Rrq9hLBsr_U_|kLA-1R z_-r}Yf&TfXUd~|wqV*2cN7{N`*+-JA#N4>Yo_EDbcb5|B4JT6^6hfmLfI*G;eQ16V z?~D9_VV_)*?fR1=4G<&oS`c7;+8V@xQq=Mez&bfSVAi9&ZncT}$Qq?VH#}kG5hCKT zM%krWMYMrYx`H}-*{~N-Jhe=E`PeIe6F)JsuSqA!)AjdY=DDUp=ui?g5n72UdRD*c zgk&*T4k{T;5=Vengfpx>3iN4oXSYPg)}r?1q-yNshs!j`pi)h!49@;DG93NO=lKsU z-oIykfB50s{LPlobA6LKe&f5iyoxr!T`K$4jA>)e#FyJz9tc#-{b?9TuH|Eo4gIZ@3VN#BHCRC;;fF#K-op|f%_a#rca6) z90P?$C$K6UEu(uxOXQIilkprL%9+bVqZwDi%ETspY_3BPn@Z%FIN4)v#cbw4<(=2R z=J54j{`?ou&m6=pi<`t$13)r?+IIx$CD7D;r?~8RYKzgwnp~q0*cu2{?@3Q>Y&JeP zcX;MDYXUthev_7;a6Jaz=gv6SRX*6GUK`0FlR%`e7`QbUP($46Af4Jp6+g;7u(LCO z%P!LL43??E=x&(6oWRZLO!<8xT&Yrkww{n{0g=j2DdO+Jh03-Z)_*;stf+mF?&C4wjGSKr$Q$na zd<<^m(xXlLdoDev6$}O}-!_ZGW}Y{dIFmVrUM{s(F4{m6R+(KeUf zAaHGn^XVlx{(nw)1J`c%Su6Y6-&l&Ec`o!+4Iy{KD#k>g9tLU;lhVZG{7`+{Vg zL0ueYu^`#P*7DQ+RO4olOi8xkZt~Ay$YNcUVpsH_AYpcMX7}CyYBS!=&}3ap4q<*h z>)UUB_ZR+;wEz5%|NQfR{h2r69MOJCjhPz>uOOV=_3Fyd^yQyS`E|5hMdsG1XUkWY z&fh{f8`#S#?snbLT~&`~&b5@L`{8dv{qMHj{e0(4e4&SG=Dk#|4-=R5vu1MiZqP4LzK{?q?1^;#cw z&)}TRZAE!;N&c;b?)xpTIsCfXo>V@O z(@D1Hffygje|rrMrHhGYupFlx#NH=loKu#hKXTT7Ik{XZ{Djd-Jcn%(%$nw@R@S*^ zs!V$nAG4nT`EpIVMw3gPzqob(<3Il+PcW0_zyE*#{-^)_Cxnkj<;nfXE6Mayd4HbF zvFSCdTV7PhXD2T?mZ0jQKjMv!1GcCuj|Jg%?9K8NC zQ!dMT+YAALK*(S*8?F*r45o}9Gl>y**a+iQCWSqYz%FxaGVzZ3is!||6L&&dBq9*l zDpjfido>wU9HZ{r+)i-T%KuFB-TVA7VuU1@6#I0=WMd3Mj`8=nqwn(t*D;5@Atzor zq(6b~K_@$PwH&vPy%5xc?t(J(1K`cEK@S;zmER_R{^EcBPqe%h^~2yKo?mSV6eleb zv2ccq1V2~W^*E~%DdT&A<4-j-fq;3J0|{2uqHXfyBDd{u>*9cZns`dG1k_o07--yC ztTE{%;EhAN| zKxaQ^GPgxJRpn%50Ic}5AmIpF^eKP~Xc$)VB`0oAQc#P#*KX2|ct#0Ws3H2&~Iena{{|IdHqffv_TSg8;VOC>zz zHygYS&TN_|BF!o&hm6g1mgxxBt5KL8!pHvDRMlw%Hl<3QjE`3aF^;w|JiXAls&q*w zz4;TphLfNbGFAaV#pEhZCF}nT~UGwGd}Ih4INNO%_;n!&;0Ui9LTKPlB4}cL2Pwde3cO z&YR!!Sf7Z#WGEeGd8;!r<&lH{2CZ3!byETQSuZB{_H5OvJh^8u6)+N7Vu&cQ2$EiG z^0RY9p}?`AMF`H!^vZ)C?*5W!-f+L;L*e{&$NU}X^Q#Xs=1qz$k7x0=SKL6Znz8y` zImKzS8NF1p>3FViL)gK)42Tnfr3qT_qF=jzd4XUV!}ke0>e>z3f% zamM%5TNC2#Zft;$pib_j_fYyT543+10X4I{8mmb8FAjxx9SKZyb_zPF(x0*K~e-(O+Oe{_0A7Dx>D| zB>fk?rq9UTbI<#4o<`26Xnb(w4)FZUpYp74a!$(;uAZwFPIIT_U9oP@BE2TTp2wRe z&+Pi(@=Kob^bL%Rxk3Fr8;jWEKd)TseCiN*wduQb?B27EkvQiho}U3>GT9+^vPF5( zSzo+wSKDPwBDVKmaJR@+_DUj-&E>nfKS}j6SjBV6YJR!buFI#GG&MdL<&foU)s_8} z{V&J+?-c2tNU|eIKVG`dR7gqtRczoNuS%1hTyl5msVOPviX?kFA3XGf@Ow=TASbgY zMaVwi3!PM`Z(fut&3C>?>QW)oXsP3M8(DlJ*3WmqnZ$PAF2UE6L2>kKi24?4`X!L{ zgiIL4&o}0}cYUJ;mZwqu7{l{*Sf`m?wao%38`wU5kPIidhTt$+Dr6cnp%@F9M2CIv6CF3R#03yqNKq-+BxdBnv)4 zIhrLf(A^kDG6qZJ5vrrmuDmG`t6!!FmxedBD{sy$`I@@L?6&56beC8iYn?MsxOF*{ zi%kfG0c$)NB)hCHiR5gZ|6?eBs3X5|=d_|0O!)<87J#FP>{TKQZpynKWP-IV zinH{{T<&b!xTLANnaA3%Ruyx@_`XG6`~YIjr4|I)ON!hEDle$oOwZFKRkI+`YFl@> z#}qz9)GN4U4j+X6*JYBUI0md-2YdsLX(Q)Ujf5%Q>f@E@ehd*h8Pgj(b}`4Fd>{L$G{!e3Y^yOC5Q;>@yM!4d#Xy%MugomexrMX^A=U*exiFfOLqOQq ze8x|T;zHP<5W>@5mmv6@RDF&~$O*LBJ&7#?vv7#yDs$cJG6WL_h z!?x@Yt}*g~e_n0RQ%p_H&UvnVb&}*=&a>99E`c8prQ&w1^z?JA$?=Qrpc zp1j<--N5v>jhxAAEX6|(Cdu_H_kL{UdxAf*vLChksPIQX{(Dqe!JfkUcJ&hz{_o-X z_l(NweZm>aK$JQKSH({ZCXv4jqo0BYf^u$qlpg1-?8zc0F^O|x8nkR4G$O18Uq#2|M z(%z>Yy+kv9^$>{@QN3&TB#Fv6yE1aIsP7x(Vh(Epj0*;{s}JeZ@#PW}Wzez^5zx`7#*iT!~pY zWK*$BBL!5IHeMuPcNP)o<_hxQ_r>X1@ve3Hu$Zj9p@_ssJ$w9023IMI35VFku~%mq zmlETK)9g~mS!1OVZrq6;%Wk{zVy~El5v_SW=kl3=6hD!>u9$04In3*zrPZ3eBF}@dFszG3 z#(6l0khx4o1sMt9rdptM8r`H11fWPT)XTbHQosy|4on$FpBZT>9J4|+BTUnhQoeag zMiX2;FrIOu^s6l3DFGuEpUqQ8H;iPPpj8S5)>H}1+;oi`-DZA z2}C%=%W;-+PXbkA<%$^+0^M)BXUqc*EmDu}hMvt_#ctGNU~IgS!O$U*q|(_%Fe8GS zjRYqB*qS@4dfk1S-;rZ;D^p_33IL*QgSb7S8{=7)#NmY1o>z)PO(QlxNcT8OM6@)t zOw%-~JhNq<&**@ zEh}ObI&Y4KYxo)=mcE6L3zU|jHDVSU;Hf1JY1pL)I~;V@Rn)p%OvYLr_I-ekJ;=>Q z+WVPJqUXvg;B03baZFIJrpIvd<&~Md=`-)) zZ|PQ@R}E`@T!2|o^&5*KWU<pmF}E)bw5Aql%H^YF&MgJ1QQ|EmrjyfN0miT|Uvg zggb?u)zK8f%aQdHbU^ar*{$}miW2ySeD|Ao;l=OaeUU$keq1ORfY89U;E1eE`YARB zU}@0Fk!b7;1F^8XkH=S1@FEXh9Ct85>yAak>4L=~J8n0zZjz@%gZy=~r*5xco}}Es z7gg5M>P^0Rvwh{Q$J8WoZCV9M@|=^ibvSx&$L}USrP%4``FcAXuLRGLb%|c^=%jF? zowaMbcpo!GU0jZW*WQv>zLUq3RLJl#=TB$)(=9)Z@sH9>9?ui}^f_wM$(u;dYOl1F z_~V*B*{)w*sad3C+sxBX31X4a>#f3-A)_bxS46UD_HOxCyX23_c6OzF-wGLjR#|>5 zb7fgperzpIlymj}=>z_g{;y5JM&5nSmj1k#Il10<^~Vin2*WOG z;X>q*1PHq*gIVpGHAGpR^*WYF_mOk8pBSsJ~rVl`1EXLEX9-vDOV;P@~V1U08X9Rc4?JLQBz~*GcI7 z#=~S56H}xLM#u{G|8+T3qpN@GQ#w932WTEK)Wphw)T}3WpAs{U zIUNY+R%p-Ckxg(^wTVQ+XwY8< zaiullGOSh2LWOIN(yOsc!w0#1rUqk{Ag$rKT2#CkO1O!ca> zeuj2&;~^thG)ip2k)oxDrhJDE^YQI54q6VU!LJIBAal0zNXwiwU{y|{>PMAwM&)$i zm1OpOt||?ttGU)U_Gxi#F9z;lvgxtc0-LaE5yrkrNY-E8fbg_e|qfj8|HFM}aU2CS^6s$*Hy_;yDm+3OC$5);Kp7;Jid| zMNv0Y@r(+1GQbKbj1d;-f*V!jmEm&S>WrY^+t=30)H?kTHXu{9V5K$e=}T}h$AIAZ zXK?A)w<%8alGlp$GRM*unng4iJ*}x>-}I%H6rvKh;Bo2(NEAQ4VEqHp@5aOfn8-*f zp$Wz;Qt*JlB_by?VCtuy5}4{Cx&58bBvNEO8H+yyR8nrFi3LT6@ge95dlxRO3sH-L z>qL-(I|_fB;nG27LQtlaRNURg*dsKCU5kxg&(LaNHC(hJc*K2F9%0wz%4*-6V82Zp zQfNyNqucpZ8fOahpg6JME&3qy7)@KkSN8*zC?ikUJvh;sTP z5r6;z)E=-)8?^NWpfL8`|7f=YXB^21v{{h`Yn6Z0S+5{c(X@Gq*Q_Uk*wF>H>Pd$J z_fRIcUV51v#o$X%V@X*NSmG6Iax{R^$*t4?Xa%4tLJ-^JL76ZyxPegu)k>LEum58j z1p+ zkk`2EX8m#Ew=_R)@Ys9SUEKg=_{nO$ zu=LH5H#kp7*DCK$hVHEnw-3@@SR(&WI@#7&Y5t*+UI6$xhI^Sf*E2bx{i>P=Kr-e% z``)OImx_1c3m=~hZfB*lrMYG5g?CdODe39g4 zb+^G;(9h(YmVB~-a|4!S-NPrrk1pLk>m1$Q-Eo-rT`*ekxas4lC<5bs7SJ+ce|v&c z_u7x+lDRk(?AABXM1l0PzsN^U^3@9y8&3q%obkJcJfNlxd1*^y94e1Fe^JikI~(GL z9owM&@WngtgK_dXad47&CSD&H8D|3BC2{o;h3}ELG-{b2W2O-6t5o9!(AGIh8x#Q@sU#%Bn=LKn(WFt zl1>3MYB5=vIS|B0MI5xo%lX9pe=T=#z>iz-4P@qXG)X7HBO46HV3%iH)a2EVTO=KW zDHn#=l6ZxrHWML)!=iRIs{O9rkkSZWm{@O;#nh#t-d!UG1t3%6tWwz$Fta(gtWZ zGZ0I)Xt8&z^jEv^Efi&-WMyAgjt$=cn6?3$QS+No&OK~r4N5qynTJf`cn(2HjN+3* zElEK{^g~|L0_YYnP1>XA5=@TnN-K`F&D=Q}*t)Gcl-!9s2$!gIjs|MYV2KqdF6TeW z8tw+KEmho+Z^VL_Je^eyj*F{ZX%dO7>*_?t6*dgO$3+ZlkP)7F+Y+{a%8$*USDh>} zf~MZ+1R#38I*b9!5wp14&Hp-0TBH>aYSHmBu&Px=pG(*Ph67Io*uw(3oPX~2UOs6FSK(0(1p}^Hl zOj%9Enh?%|6H98P99aisEYhef#E9zKb;P6pVx6^Zi7-^mlod9$*kRN%V3DR7!L~(< z+)w^%>vpm-kdO#Qc%IUADhBYUl5QT|kQQVk@oHg@06qx$+=T&)RJaJ?-YKulqrSLd zH#V^X=p@x36TKPZ*f-u(Xf0JC)}ciKCumHfKBkcZADSbBPRF1iY;VeNi?j`J$MU3tb zWZZCzKj5tARFYVk4FIJm>>(NC+Ry`Ms1k^Zw~rDgR<$>KeZ7^aP-oV$rVmlTG&+1T zNn;<|>Jqegnv69po21Ndq+M-yY-}NieY)9NTq4HwvKb`Zhk_zLo-<{b!-L59%Bdz` zum5g?Sw5$lH;H(`KGvqj*qLae#G$%G^aSV0>}@aWeOeJRrvWF8^_a*Qrf??hE@mllUa-?#Z(U zGUf4&tv(EUM1Kej;{wT!faz?i07*naRI|z)@OeA;>j+=(;L7w#4qf6d=;yMmPf~n7eK6dyBqlfC2$bop|0Uk7-W`|RYGO#NqbyYJ`wSU^tW zx7)B>w`9ttTP@MGW^)%u>3p`%V4k5k>T~vBau+(Pyvzeft$|G|a{9iB_;g9Xhh-pg z+-(`u7qnQ;dEE57t65VE>G#?@RdZj3k@u5=DZ!BuQmgT(T;nu0<`6=HHnA+%~4 z7oa3@XA9xWizF#8U(hWO=Z;KyPDe8%*v3L)J$?YLHoZ{5Wn>7(MI{vb-!lmj0-M50 z-ZTzQ2C5~35gg0m;5Vj*MsJYVwfN=%0V&DZ(wNFBvk74VKBs8FXmV@J=uYBQRXLBC zOCn+73&ohnVSt)7BI>Eep6DUz64WMY3qU~xIaIWaSJ(j3t!TwS6nTk~12|YuLUfc$n-O|)ak4*6n;09$g@E+m5qAF=w2;MIp z+fgjMajHqga!|e8E{BRcZ&FJZ8RUz|nu+LuaR*SE_!Cn9LXAra?Z<2fu+W(z5xf$R zEl5G&AVSO<6p1NPq2Ys%NE91DoVrU}2WB;BG=pGvn|7@^PKki|Rl3Y)+eg9K_@IG0 zyBFe8l1sV*o!HebXZ&XU9RUP$of|&5SkE83Ys_-Sla!LD`=rj!W3P+}zk-&88mZiB zDK+Jd2-B8gd+dlrs6RsUTF`n4AZQHhOn;qM>(XnmY>DW#>Hah9-e*gV{`)rLGW1Y=fb+4+L_mxb` zwlYi$+H*(*p&K-iX^#NRCOd^SRT6*~6JWrWvtnQdER@I&JhDmo&U!@~#IugQ=*XuHOt$nN*$*xfMog(K$sJ2( zQsFK2i1f$iiJN+gr0b+~evh*6^GE9Ul9jEiQT2VhD^@ROv;c;p1WUKMVC-$g1IjVM zzJd~elW$k!&}SKdTFPa+w`^0YriSEDkl{z`cz zU+}@6gaLSeTcrsZb{QUC?VY3z|D2;v{s^D`*wp-Od4H>{x8_Q>7bXbz49U$uZ?0fI zKPOB1)gWl_5Bhz0Y_<#MMlkInFhVhfM|9CT)G-ccBkqqUFIA4Yc`t4Q;h^IW4;p@^ z3pNuH1%oLX)^XQO{0N>&BDt^@6?QOovb!RNBO*)Qd&|K#=N^Z17Bn5`d&yA=+tx&$ z5K?mvKCttU<`|l?bBKI7pz+rDR}H?5Sccvo`E4xtR_YTTo-mwX~^6_8a zh4UqHX!0x>0*;J0%#)rSME1{s*a_0_?4P&K0^O1fs!Xv68ZfWPMq}qK2jaM&SPRpv z;19|?T60Tb9$PkHBGZDx+v0)5R8SJ&W|>*mOVL?UiRi*5A;TM24`=5bq{An~$}=EaB!w6$wINZlM-kv^lDf%`FNG1#=!R(5X?LtViMK#dv@1MD_)ltCl$ ztvF13un~{)h_sx{U+oH^ch=qHJ{HD}h;+tAXc*RAY$K6k+&8i`?nE7mj?N|ZYa-JEbQ%`-aw)MXj{#Giww$M$^?JXA7<=l$V>|unmYJ#KF zF2fg|$y%@yU|`5&T4&np&-utlRVc9)nO##%%Dp2?`K5=$k}ug*Rv+~cH0p_M?r9?h zrL#5iiK3Lf%urUs2|uK^UdVx>_!K(RT;t>}=)$&~*de)Wj^H6;4gAPT#F2~66-IEP z3!+9Mm#4k!7&mLKIO?T*n8fU;PW!3D+%C{ag+QWd)i=-rPymIc9V9N5al86ts;<@+ zjb440U5aYY8m~1C`%7WcAT{gsu74m$y8-PlUtoj9ly@qQ_!Grm$W<7_lx;CicXrBl zf$io41DhLO+MCXsS=S`LM_6zId+c22^2E^q^2aJ>rGK-x#e)dsPe4G}o_38b3`0Tz ztc!QgTT%`^Z)py(Tfkw{kMJ_Dd)w!4F(zDITZ_jLqRi^=O7w5|bT8JIiW`jpjgj*A zN|KRJNwxWbU5M`r*w3e(3DMp^uizziV5^U)z6XO(D+&&`#wnMUc{kcWdo6f^4VU-Z z&J~wmeg)(`6;v6%Y=d)T{O11as=RgdVe)yozCQbr+d8*jHEfI5*)$)e=#hIHV5TVe z>I<1Xd0tulWp)~uQ`39yqOVX~a~6o0_xozd9NtVK-E zg*x1xjtD5u+T>LFc-)ChV>P%NDtEMBR zMn4}Lc*g?cEX*u;rhUMSmrC1+%)t%7G4$boJlfUf(7~MWTEiQ|%0A8ACy%nDQz@A3 zUZn+^u$#=i4&UxdK^Q0+Q}ME;r5FNGvl|qkb`vVq>WS*XAieQQv>fdLQ-7`Vt!4$g*B~mcCnQ)y>j>0I6_lo!>B!0ykY^VoOLx#VO5iqG{ zOctsNB?M>k;E%;_;6|%Mcp#9O|B9Sq>h&qTvQ6PWm}V7w-$d7x`q|JHoIaFV<^=`w zJw~;Y!x&UeKV`>R5XI!T$_`8Tf)NN4;w9mNiza&Vwkq zl=#Mgi;T$z?xmKD$t8!ipj%_A$}#TsB1|n(pdJID7mP6T{z!lXY&H0x~&~Pz%0U= zlH&RbK@jhEtZRnhULkuh zF#z|d&1No;U1P~(%U}o%;q`i@KKQM$+Bi_*u)-!bfT&j!)uYz%NXaNMf(A`hA0!k6 zs>sl_(aqS|sIy0yO>~k1W}+d_D{KL6l<(5n2lNtW;`*m%ZXdDr+w z5E((a#(i9&QlbJ|^G?^<+VZ5-Ny523xGTk>U*UruwYK-Gj`9j=D#Zm~Um{c5ey zer!=r)Z^c~$>#Fly=wAxl)bMtTSXbje)APj%GLzIeQ!S<d!0wa`xXU+nj98|+d+wbB1Pb@lk$^ZF|NctSrWn7inoCWv$ONE@Hg znPS_N!&$rYLP$SUjYd(zPwft*T-MB*!M!NZ4s!kUc&^WDNPzd@^lFnCemWE3jW2nP ze(`k{wAvPRIbN`Az7|%Kv!iLLhs)fy2-f&Riqt-gF(Uv3c}zYNgnzpmjrb&@C$z%m zW(cSXmg9$MQI`Dje(3Tp0kS7S^sLXU|?wV09J#fk%28**ihRl47ujayg zjkzF_sI078=)jCpW0~i{M{5kJ@(Xu%x@%QzzO zc_h>QEPVHXI-XW7LQkl&!@cU`dW|viYa)1kYkYv@I$m@q{}xxwc#CxOstdt-N>k%+ zp8-DP@{tOV+-ne_pH! zyb>c|dU(H6dtjMNC`RVpe(*;YTGWdl{SMJLOEVhfQ~fy=FfJy$rp2inLt8L~;FdbF4L z5k)|p^}KMjr@_3~kzRZ#r(Sou#hZTu*UbZ;3~2&_eQWyU1B9)O8q_-dTDn&Gp#YhX zN=s>op02r4lr1eG&MI41sPre8!qrsmA}yrSL@kd)+%T=E%P<6m>7}hjnwwDFAfc8- z{NqsZ-K%5mSY{-w5ND*pI_lHv9VCa#R2cvx3o#=GJy;7w3r#&<`5a8BNJeq#0~x#) zh>ahDxX{~bXbZXfGEnb+Qr}~EOK>;B8+Dv9>Ix>;jpB>fyURxS=#`_7o~hd@d&l8Z zJz%LB^vC+9S@tG(kZ6`UBSRXVn+ z%+A>gwSu|MO*0IRhlnyhB~pec{M?R{Qv1Nl6&aNxd6sI#n3_Ls-$$fI?D8Yll^hg# z9vhE>brtaFhgwG*C2kfrM&3kh!cr0_2F8rILSt-YitDyccc}KKahM$;1-?kgb%;V) zJxO`2C6PRWK1h~Q2pd@(-sp=xsc3i`*~YU%RJx^&IVQ*kfwKOOV-Oua9j0ENk3&;9 zDQl_CsZ!YE_)CCwj>yc$H;7s8C;9YGIZd zN*M#g#4K!wEPE<~@{G?ou~7#M?;;FU3iMZBvq-xEQao}VzaZb?>QILbB)1awI>_Tz zq-v1N64Fv5^BYWzaPX?WhBz+O3HyMeEWjGSKtPU0@XK5;ogT8)WJmVv2KAs<96gkj z5A9N5p$7^V#(?XsU-HnWGpS9vZWA&n^gMNFqJqA91 z_G8^oI<4|=(pt}zjd|b}J5`xZGhtmJmYDrm5~ggbOug{JHtB#`9&v7Au3aD?ciC&6 zpu@W%6EElxa=x)gwI{KFCzJ+o&~uQmDySlTMEw{WFT@IwqI!(M_XDjZ@+ZGoJ8oje zh)Jb@n%6&JTM%K#@<;-!Z;~YKvmdiRM}McuXEqzfR_uyHMLo4n0TmFru=8xHpxVUw z1-Z722kYss+yr~g!{M>5xWKy)niC{Fzs|Rh8k#omT}fS@tA{HN;}u{;s+Pa+nca7c zguq&$llFMvq(Zf6rTDu!R_(D={SFFZ8(`QuB7wqFJ@hD)Gus;QKuY#`hrYTM7_HvK z{_Fz)+lWTQhO&xH1m#EumP09YLV&@4VB2_2(2J+yOB%L>%QC8fb$q*LB$HN^6v6p>AA%20UXi2h|$j({I{wZ&A5Ls;}rZQy56X~ z#yon$IM1jYsDHKkj(vTpzjJw7OL?GQHoP7D?Um)rdb8}s#Wnt^w_uw% z{i=|_1WxC8235}Pus!34!pX4>W)+~DjTJwB=#(>aoZST=WBB=IhGgECze{6 zAe_C3W71z+c;FpMi!04)^^XC~M7z#=!iAUrFz+eC#LJv@!Oc!7G6V60;7n$tn^xH? zSi?(k(twCLSm)RVXV7RPa7fGXj6~pwb>mHa$x0InyJ_OR5LK-v!V=O2{x6hHM=DYC z8dT#==RMHafA-z{B712hS+G{+s1Y7IOQ@)$ex#bR_#g`I@z_&!T55?iaybzVM-hyo za5P$cHuSpUE*0j!@W&XEWC$73P(?9(dpUs)SzHN#`jLCgN)mBANWi_XF(}A`_Zx=d8AqhR>Ktwep2NR>tEPP+U*U;~E>n-|FRA2Vm z52rb+Ox(WB7uY%>dJ9^oalvHxV|QlJeYzKjSvu`F<=G^)^*#&vQswA`wG@l!MX+Ct z2%)%$q^mAmlFOS)u6<)@KJRS;(-`{xm%(4LPb@7INe{h5{=HYtRT zc^77Q#KvQ$5}AaPo3e3!zaq~qXydH3!7pN>^uw~FmyQqF*9670{~5VU*t89zD`ew( zO!oBt;6OLp>HaB@le&xIT|Hn5NxRY@Y81`|k_zIQNIX``@!`)|)rwd-_g& z9oTMkZ?CeT{%h)qUX9_a`*z?>RN$Y<&U#ZNXV}gUqL=viZdJ;kgzvD7emjpbpJ(A1 z8OzW^J06=SD)yqWCyCq~SqV~_wN8(^3_Zj|8Mv8j zNPh_T0mvYA@GzOPtjn#8BHYq<89S9DPM=NOH>mFX8~J)tS!&l#(v3ZE-cz#>XaUi7T1?Pj4|OQkil#=h6sra zgjfTPuDRUB-Fh(4Z9Juf(#v*)IM}6WiU3p{#rCOUtN}C=*36>c%p{jy6!PptA5Yq| z-YEQ94eUz{uo_?2!{wj_2w+sa7)XX+dQg#3?0*8l93#Qp0a_D~#7gB(=)=Ii@gSi# zn1xm$mW?3uoe})yo_#FM^D63+Szy3xfNBc75F36$l2W1WRTcWfp$`zb9A4xQl;$*? zAf?ZaaK-55@pkqLOA#9D!Cw-f(H+eSxCRD!Yd+2ylT*4GbumC%KcQv|)e8Ckq6#ul zJnXhg@yb5ecgM&N%z*=h`fw!Un-TGGWO+wa{IO>a$>qJ(&GL_9+;H zTPfe%6( zqDpmq)vLk*+9M%yVlTO5JzE_Y=1{VMNpb)=ut=y&218HFCo`j5JE^JBHR3mKMn7s} z+H(wpMr(*wEHcEx>o~rb+-s*0+<=gpo%#A9iH!WX6HO0jaQ%CG`-oko!xV*$RQfuI zmAi7))9W!#1RZ)u24a**OQeG6Dy_>z`(7pZ;U9;D1;-2-ZLcW0A z2gj7|Wt`hT{r5`G0_<@*-CnPO^&W5gf_c}g-FBz*-fupP&e>@$dQ9)%ztu@UmOIX7 z{r_pO3>MW^buf<`7I((YFbGx4QEwize4+PA)vI(c!E*ihHU6&r3kp5gD0DGv@8=A( z+;w^!RD^C{_n0i$XNxcW5pxD1k2i}OC`yw>St;`bgEYa#Qt z6;Yb2Pp`$9QAl98@+QhvtXRYP#()550hADqxO)vT{eu~8_Tah*8~wf!q9of29I{JE z<#5c9Lg*xzal6LVEYsM?sJ90uu~Hr~B5ZVh4NIK-tll=Qa~p~cpTnO3G6YFOeycc0 z&eEffnG$iTYYnaWNDD(&{!sD=vTuv;f6b+(CyZZ7)pAqmLX1o^vxS)+zReNjy5knq zFj(cSMC5ijbmgrbuh}N>c&K+V1CecIna*%k66^Ol_xidyFJUw+*wW^qwV7Wdxf5Me z9Hc!svi5$Y(m7O?Ophsvk6ma%_g*^$$b_85rI6gvh5GmmpZ;@_IT)k!A`Up~b6Q*E3k znDS7}&D1ZoVz{FI3FxJ<$s>F?DQrO4uvEh`XD_hIZ^=|y$jRleXBROh|2v5X;+ zBAeqL#D(Xv?23}7ju=TfHzr%jQ?`l`Fo+tz6sZ+cGAv8>?P}g5MA%ekz|UKvvYh?i``#eNEllSdyFjbiiQ2kJMI$t-jyxsDc*TqE!!eF)LY0|@cQk4Af(qG zz%PsTejls+N_)8bsu7&7tHc*k`k5$mG}1+U%#jvh_P%I(Bcx0R!Z6EtV{?k(MQ78; zS=+Y+<6%LmLRO0f1O#b@Kc$Xo@c)?I5dcULJkFv?ULWdE)e`XRgR8Ky9(uxSj$+N%v!`IX1CDi1OD6DMS5S{jNi_yey<(SS~9IX1lbK3CY1 zWt_O*g%c0Mk6QbHJ%QcDuNHo8lseHR5(hkG>+hOt9&?vY@Ne>G4TqI798PRU=As6S zP}-7Ny zMhROW&9a3yt412GwPgK4Z--mXvG3SBv9+ZB<7on!1j_ceHk z!Me<5b{P{xW~flSdvfXrB2IxVj11LErNa`B##`Ks~k%|UkQodJ=+B%pRka(9ca{`vkLm;TQ>1>3(`Bg7~X{^t{RFb zFwWU7z9IP@Z_bo(C7i%1QW|%Ksa7W2l>a7g11Y84RBS_w93ffQE?&D8cvD#al$3N^ z!E~-P+4^0}pjuMf38ykm5)CNi3YR@Qlw1c?!&u&&TMxhF&uI{_^{Yn1D;R*%>3p=v ziQ|M=CgVVmW)oNXD_>^!Jv)?Vd2~A86UN80n#-|<+-ND8fhx*{zjKu+=2AI1Vggi21^uKOkKeE=`7WX^zb)JmQgb3vCw!i(&#v*D0i3R*0|x z8lPCj^psNWFCOqV!B2q!t=<)+Xl2ja@PR+@5;DN}bbGy2J}une8HZ#5oU7!WYNSvH zeI?KX+}i`|&$!dN=2a3uO?={=KTV{Gh?SDgCg!#7-4;3WX71UQqE<}C2n>fc?uHKM z{&3CeG<=g@y$X-nVXdVSNEbF`BASH!sw0}wA*728jss|IveZ`qXir;{h4hEsELJ0f zCDC6Gt~LeM$N65=#AD%yEcdl+0U*9NO^v&_N(vzm-SxN z1G0;L`Ai_tfE3LJ24bgP?&BDl*%bQYc)dpy?8(RV0FYlqIM$qViKYS6$C5K_zm^G0(DdSt6`->x{}~+~PVq1#tG%1{ty9?re%y zRQ}-_*{5OCsKFZSv}!tL4zdF~_}?sGXZQ)q@(k`Eu@Pj%PK2^cyw%TGl;6T=Nd8W# z9CO2j*z|q+6g4$bpO(ZUV=aw^$gTa*kqy=5KkYEkCi%W+g%IJ4Fn|OC*p@8GH{Xr_ z7=^5=Pw-X1m}@WkHMGFEgynXgERI14#C6NJ4V=lyzgeu|Pli z`C^O5Iz-|!&L+`DwVw4xM;wkU-id&MmM-Sv zzzG#CzVCiYJtupSlW@VZBe(?n6^s#WNJn6`1g4xYKU?0JW3RQyq>yAj@~A?6*%Jh3 zmEf`pr8A3Xv^_upqnQGX6KQFVTL+*xZDfW!Z`{&;EFFeYA8d?duUQVj8P-b1 z5_>nW`3;{eZINby-r1HlIs+%G>#40NEGX{L@fMVQN%*84R=5PV80kGJgs6JZY(qtq z7G468#Tm#1!@5-yqHWa+T{hqF4?%d0Ue1YUXH}DG`ettjVOzM81&?^;NTI$Ldq+7_ zms&L}EQP|oH-;M!dbsA#VpGYw0hT9x1aE<74_w{%UO11*!t~4I#2ChafNMjp&$J3( z+?Vug#mD6M!aj$YfN;r2X6N$4j+H+!ckg6V2}RA#$WuN@H+vC;eQ96C9#jhkrw=1R zl$$hLiiWA6D}9QioSPe|vw!`jii8OE7ht!{hBp#vOS+A=%Vk*DeCJ6|-S|SnLWm@B z#)~tJ1?y3rjRXgGm3&y;rMdh6RQ-RsU*7`eNLbpt6y{s<^R`9;)g z&ko(QZ?0)V#!}a`;_sUN7BXFKM2>Tdh0?KxN=kW9X-e&KZ*V+YvMumf27dFU^>r%J!bupnH z32XyYl?6(M#QFeN#&|a|dO1Q=CI{Hj=z(%GlLP`UV5!xTG_M&F@Jo-z2pHgEM&@{c{!4)Fpz1n3&tyCxQC>MWxfi>a(_Xka}L#I5eX^ zXKP2UNSbDJFvye-YHwxdG`dA?Im_6ot`&qmWnjs24*4)2B}^f9nOP7D7ee5ief1??M$uyV5N$(lG?p}3Zq(vGF?%1G>)MXKOT zr%;9wkM!A`d&n?zlQ)C)GW@4>#sh+`GRT%>&$bx^nOQLhU&|{B7l|p^h_7>Q3JyjU zJ)pi`S~J)q#I7zJYz|_~rcAIPfQ)(O4_UgS)`WE%@I?umN_e;kmV=Re!IvNxNQB(Z z61hSyt>XRD?LCe|FD%S*6pj;P_I;M1c6)CogWFZ&au&WrQ7!qlqHOoys*-*`q96Vn z&z<}JV)Ka6|EnJ}e~CYS20cW5P-A$|qJ z*C?)#zUlen>>~`~vHv<{pfvbs9+IL{|#~U-$Pm^sO z*6~R)achaR^g5_d)$8+iO|&-~oUHf*%v5QZIy1^xOWU1Zt_Dea}3UO?3(}CO9bV{ZJyh~MdmeH=F!a^5@y#k>5C?ta&VglYgd82<9vd%%;sXK)U`a zfWS7MW!$H&nL%uYx(Ji|UW-{^t#qbMsRjeB7eH`XlL|%BJBrjI6(SWOA6mdnoj9Ys zYrrc|CG|oWtU1KIVh{{}Xv>flyucvnYcw)lULS?-hZ=E|e07JKaZid3J4AtVoU0VJ z7@X}YzK;SgS!)7e^RRy>;;x>|LS(TiFu|*7Uu)Z}!FYjar#_HXNpEqWOW#eMw{}gd z^V-CUSPQBLH-zd# z{Ite@6Oj6|e6#-Z^?%VeaPEX>EDxw=0>oP@%>g67ltWN2#ABf~ajuXX;YS5;g8Q*` z(S6!yJm_J>iym{QWTtxNm^k0LM0pj=tXDg4ThmN{XrQob#dR5*uUi3I5}%~c2%NKW zrg)PjJj*v4u8*bxTzfAie1jgsEDva0rO10`*J$@`De_Qdjxv8pjt+dZ>0ch1Z*8KH z!+3?is~sab3NBBrbb?udHP#1v#lNG?|5`MO4Uk`-gvB>K4jsV(nXm_q~Iwm zMR2s$s6TGoE&Z{srB&>>nw_?ADdCP8iO;Xrgai1$Jz^K$bJk@X74}4nY_BA)X|@eo zJOyDXi%~RrF=9_`I7PC6TBkly#PfZmDLX!t($XxrChY|d>*&(YF9qw&)H)MOGOy0^ zrQ!z=ry+nTAl#Vsv!Um9MsDR>@^~Gncb_DfX@#qWwVCLaVxKn zeqY5h;-W4;!*gY_kufFoCCiQmIW-+*W$(X)M^irH^x73fb>oo_eXa7Fvr-c~)yU?835L;RfP!*DoOew*fO1Ltc1wdZQE z1>;yioj~6t4v1qQLhfw*eyt?m#diF*3OAz>ZNEQLf9No{n`K=(#mHr+E277Iq38wxpFag!ghMs77QdFn0<5};^n`m6 zb<-xkMpoM)Eh0+Ma=a|s(2ljjfO1`awP9RYnX`$Lw5)JDa`Jft>EO~y4jy=ut5^U&~ zkzrh?B=eD0Wjq;y5pm2rW>7Cbh!bi}lRJD_Wj1ZfCwym`CvY`$MFZl>-^;<&*6t0J zTr>{Mf@K6lwj2I>*jEDNAX2IK7GH@%BhK|#KL=HP6Y7zW;?NPZKDa5}JAG+xl$|*a zDDntdf-{H&Ain2R9#&=OqE|~{K^4^b`Zrw%<8jkWuk=dW!F>saX_B)Vf$QNk0w1pB z&5v7sqBX%i8DS6O5PH`m4}~{earKy&s=)Q~ZE@t{#JTiYV(ktXt*unhTt+Ll=wpWh9tFu_<6g zhfb-Pjj#ODD1%H8bkv#WpcrIe+jWB;KJ4VeV`hro^8yjDoHy!Z(*J;^CB|$%m^eUU z^+>AO^tL`WF3<#()DiU`R|ea_Y+xEd7s`7dc8Ny7&_{~{?QZlDgO?#F{rsFc8n zdmbY5->P(qBR0-6F0*U>rNwB~I{;m$P|KRu#PEd0wfGRGaXws-NIsl0M_R%xKTJ2W z_xFu$n%M72p_JS#f4CLzd|0=mP@B#0!xJ=3J{3O29W0vdPadJEHii9I>=^K%lb z8yG4Zyu}EbQZp+g@i!tnfPKwv;JiOCxw1C2<8l@*q<%e(1hhDAqK3nbgY2(Mr;|0F zit|^dE@s<9=#co8`;N)OM)ZI+%aGA!qQ~8!oi-VrJ+(uI#CxzFK&f!rkJvb}WOxgxRn_?iy z6Uv5tT#0u=tUo)vT0?Vk%E2*9q`_PFJf=~P6Gjuco*yBLxcw61^y8`Of%zZ_2ahVL z!fAMwMQO}%EYnthKLFkj#bU{rIswP->q2{kqQ<9QYTVz2wwZwP*OcxhW3srQQXm{~Z_z z-+?&(<7De?Q{wT{wF;02iVU#de0hmaiIqopNOGqRS;9|xAL~@I4;>6&3@=kz?Y0Ty zc!4l6jbQs219*}VKy~rx9f5R63sTU7l@T(huL=%IRV3Ve#;hyIX}d< zL&WZ1^DYnH^i!qLhr?iP1~q)YfBiz5HhKqFx1Yp0)p6qVAdS`BMueGc$*Zha(fCrH-8sWpE39F*v1>$^0Bb^oc(Exd-F&oPo%uk9 zlM`pBW`UtIu&rMKN#Fa;6=HT<`p4S*o(dVz4bLoR92m^qr;A|wXP~hi-Ejr{7vhQdBW7hQ?5UAMHN(?9VkQ6 zao9Yodt^J}a9RtcUju46X+++cQ+K$$;|E5ugshUn9RFk^2%JwJr+`Nz5mVRyQmUx@ zH_wzgW(mr9d4FZgah95tS9kSnD#J4X`6$dbE^j;!mC8mwo}O1TmbG)8;QMq$iIjGe z1}{DPw`*kFs@G)4EbyCYhA2b(2w&ZZIc`||lBW0)WUM0C?lI}7@vNZ1(nP^Dq=_F= zHxLUpDkr&%p6T*@vO1<ierm7q!umX-WXoKy7bt0 ziHy^+C(uwjN!WS%!h%NqJPF+9n5MD^ER#6=L)mIvsw#hJ2q*PfA+K({`bTvk@N4;i z%`1zP6PL`cYpTQBX50VolR>I9LHYqUC3bnamei7o$!vI=(Um=RATYf(8d{F5fjR7< zah>kdYqYQoL}Zq_A~Oq({o_=Ekw|L1-;qVWbi zggsWu87;nN_Q=-pp0gPbOU`+}0N2!I#6H7ZM8@z(!ce`;vlcs+xypK=qcJ1yG){of z>p;v}JAk2p^0a62*DEB>FYHK3EfYRGL*)EVlh37J$oxNN(M(Bh#8EY$Q_G37tRLKo zn4i2HAT0i%!aYp9*%Ws9sLFAx;=(U|rWtK{XApO`{E~|tnOk4&qXNSfVzrFDGFh-{ zf`H2hab@49+W^CmmR?QKzm#b)*~Z9j$E7#O_b}Q_TQ4eWR1G(3HP{n+Iny)y|xaw1ZycB+F)k zZokt`1J&pj79QSpSeoZ*ucb3Kh@HF7rwv>+QA2f)j}T-I!DAjOm2|q&w;b#u3u%pr zSxax3@*1?c&ApR-ZWFXEe#OR;uO(*bamTUDYlys5Du_HsL#y&@%(C|!MMjGdWMjcx zzD!(Ct~tt1T(Ya)^^T~GLUso#{yrA9^Ua?1zs%~3vk%z4Y#~K^83`3=`GQ7;Z7(U! z=y&VG4>mKbNKBvJ6$ou>(G2q1iYTN1fkjtm4bF?1md&2N#8@UcxtwFpijl5WV}VsU zoh06~(=^h%nm}fqno~unM#_<)97Z1XWU!MOg6O*xr%O(nnBKY$U3B8*YnB;G! zuWLv#(AzArvOo8qf*(U=g2GtZnBQavz|+PU!Xoh4{yq%a|9JuQ&vPgT9=*WQ4DWp} zKwIUV?T%*nCu==-^nftv#CK)lCa;iHSQj;VrZVjpaP|sk*&?O8GsJLa|IL)v(SxIX zPw{<$T!7HcwL)Qc$DBd1-J0S%P-n+=vhVgD-q@vSQ5mtMrgKd}Av_YmNl{oe-#?RE zn@ko5w=L3aT{^O+g40!Y-iU9zS`!Er?>TKmLj9_7#cE0kve$1!HjTE9x~ygcjsH$} ze*1FXd=|rcEs_1SYTDhO(sAGNp51|0Wh{nQV%_?i+Wh>_;$@0U^|}8$<@@01;#1a- z%F1y7bpm$k0)0BZrfY)%XT{EbH^i?Ju`3`lj_C%UEvxxb|D30h=sL1H*70KQJPYSuQSyoFs#FRhTj{(5Bi1{1slxWq8M9Qqd2ha2l z$1(57QJuGQw-l!Mewu1x-)uDGKLmlGQepQ|xVe?!#=pMWKHU-0=*VnrkXBwbKfH$W zuTG9fc0xYN2jbb)$Y!UTqC{#OW`~4=V={MH!jdV|mDjy8-)wq2vJ3SZ&i6`jO7#aO z65C-@0>z?X4HS^PS+jb;$&KK% z<*JaBB#e9VJ1||oHBpo<{nVPCd?AJgeQ=d&uu)e>^eAHWn>Q6q@o=IA}gz?sR#DenQ(mSqoVmIVgpp zfC&MG)5ev(c1pjX;RxbbD2S)ZA~v}qd!>{iCQ^$9dPc~)G3)(xg+*7*Z+wmY+e#x= zj5)Eh@fv1KXQ*J*-1{Lb25(w`$HZ}Y$e8E_D1k$2V!|f=&N*6{y<3Oxw+fnL>}K$s zav=CUx#z4Et#!klBwHrKy+N`KbcFD>kn3>MRc(ec>ORZ5cqB9}(vup)#E=!T_5`QJ zA_|Q4VjVWg#s0a@WioD#HgR)_#kkm7P0HtbL^fY{Y5{XM$z_(TIjO^eMA8_ycH1(s z2+iF84b#i`f9U$A@XWSt+qh!ewr$(CZ6_7mwr$(yAKSK*ik(W`I_uoM?tZzi<6*wc zZ;aj>dT%EtFjY1nspOKs7J?)XMet#Yz>wiU(yJ!-)9xNBj7`yE^zsN3p`#f#hU#%P z>BH1z*D||-0?_e06?5@$7BQ2-yVm?kF8`*2boo+{`9y~G?+wZ57yHvE@7vOpb&B#0 z8H2UNBaYKWX>NVADTWX==oL$YATbSJWF3$8`w{+_NBW);FJHG1RgRvBgYOc;ls^Oc z^Bn(W)W(R&S*#$sF4MLPUDJw(wCeXI;3DUj~3P!EvcLhPaeG9vGRB@af;ahyKn9zwZfVCRR);C`6W$ zptvR21h2f|8rdcvuWO@o-=E`q=ZIA5xG=&4?4r85cvp=fMJqNmfGpGsdT8*scEFxVrP;V2jc4N^TiA}hoxc)=ZD#HM<@``bqwn_cSCRs59HWv3-Xch*b!mkKE!D= zMN)+>dzf3%=A+0K(D7y1rx#j^(}oF7X-na9N@9FNG-d3SG0{|nm$xA`Ml;$c9#Bggg_ z&)|IYSqB^U$mK(dPn9sLCTT3wHQBHnmd>3!(D)S9gcu$M@VfA0P_*qW1MGj8qJG}? zME=2Ew*7X$9;ryKfZboy%3dQ-`5sVE9PWhV1BQa(8tglg z0^OkD$pIs5*~8PS%o-UM2{526az;U-XmUJd%FyHhv_-|#MNRO2#$r$PzadyAUM8n~ z_1FVZW|sv3jUaCZJ>e^0$mt1OXH>oRGvD`-iA($WRu$)Rp-8zPM?~ZK#}p#>jpviPmPBy)Me<_t##N=nn)? zg2S*G*pC~#cBhtOBy7~;zj>ehGQGAaKO0SnqPD_&6Cp;wU?ThJ`vwu@Ve6`@AG}cy z(ppr41X-MOzd0KA0>Dvg*pWvqccBJjf?*e`&DU*t7%wHplT6xSC; zNBxpO_Bq5(-i6vcbNS(nvr2~=VdcGlM4F^HX zHBH?mA7#a#^Vr52`oX4{rH-5h2mO&>kmvrmkxc}uu{sNk(?_QE*BYa$XiFS@|0RVj z1rppseRDMll>QIn{=X7g0KB8>wnp1eUYF6p*rgl^p4GU#Z3rv|Z@Zzh2oFPrupHTV zQzNtSy>5^+xU*4%;tZa3ct=x?D^b)9xn4gwM)2u&^_H-75jl|*WNy$PtpB@xMI?gzEu3v1DP~tB*Dtbogz1scZ0k$KR10cVe7{)8q}j6TVbH04FlD|!E| zwzuzL&02F%WM(*ugOnUhx9w=Z8!nL6Ct69A<|8hRv##xR)XrYJ!~lcq#m&>InRfvE znC4AX7{Zo=*JutrvY=*sUxfrz7iqC^$wA$Xy)WjB0!7-C!mVF75YZ&gz#uiG$Sd39 zv>MA*4Y?;xr~z~+_wKyB83he$_e)l{(fDpOE_-F0+-UC8aR8*t*op<}|w@?%|qo3vlS_^2=!&9<8~`O2*=u$Ke~FC2dL zD`h$6Xb~8TzyNpx6&d}z006$snjKA7Wjc8*(LeNr`n4E9Q}RgKKq^x+&ScU(`6$4O zK0m@AT2h^qxdE*~EqkV!gn?FxoM_-Xxc#KPPWni8%bruJE!>pgBCuJ*C*FYR?wj!AnL;;(8iKU<76QbjEN^=T$`>r57lWyye}m58m+U@^~85d~bXo`LSGny_ha zq(?*T=;k!I&_PI$ZjaW(_3_g+(`&N~#fYEGG9}TjdW1+nVXko*fNDoN#2tlWHc3y= zaXs|C%3H)7MFNAkJOl2uWJI|3*)uZm(5zblfD}me*tR@GA0H9t)za7Stf*E;i_)dE++z#$e zHPQuLo50MLWH-Hz%gzhh_d*Me@5DSWLRUc~n~}C>Y$TFHe*mj++=h!H3Ud$FbBZR^ zh3oeC`(c26Ix?4|Q!#$H`6V#W4wwxDhAMcUyLFV{?b{t3W0s7t)C>`y=f3gXLIUV- zTuzI!r(9^>MZ>3HmYtP-HI767a-+PaX;G%RISz5`WJSilG=tQ@Z8pe+x-okjF!Sw} z=e~yFDu(?22-rev^S+ma!gHYoal!=Ned0iy;A#S;pj|pnI%?CrMTszTF2^ElDw60f z5c9YIoVe~QB?jjwkv@qh&mpA0_^lxN2}?jF#Gqb!npWoM+De+UP1=tz9MxmO9WWgJ zQ{XJ7Z|!h@ALcP7iY#9k+4|cGGqWwJ!6uO;?fu*`IldtAhQ`N2qaa&o4khx_ggek1 zJwkNJ&vt15*G5_D20Jd*skI#)tjMd%tzlI+%jVPU5FHhq*(rN!eD_9$^`zHJYg7!) zTQmqFJCUQDuh!CBrT?HqpNC^dW;2fiGV+Pk$6s)-geUMdZ&y&Dj71dD){}5*gilwQ|4goJJqQ zP8imkot#2DP^JIV{>SVzQ`as9J|Uw%pwd$B)$UuMZf1z_x~^Q2l?B_4gF z6i}XQI~zzP|;U2($}#%>Af|Hy7%#3EE`fl<$+HWs^As{^+)wsKq2qvnLw83i1a3g(5rA)y5sdFOB-Mow8lJ7qlCH=( zqAy+623O&kXS5E^bp<$l|3?d}2mSMjxBj#2`!&@M`d`-VYM>g_rR{i|V@waeE=~ux z2Xof>H}V??MK71&i|vKevI>5P6Ws3xV-QaD2rd==u_2wkqQo$u?sb?bvi-x8le--7 zwDD2a3+a_%C%FzGQsLjUIzdDGH$f!!Dl9m-BFXr^gUo4u$lnRIa81V-vOZ}zO=};+ zk=1e{(QZ;8!*;JXP;}@_Aw2|4Gg#5IeZ#A&C?pSNRiVO?xL72zaReAl&eHeQdPX0>8JGHKdy+f z`AbfG`-zb+v(41u7BqSkWYiuCxLt9ac0ie&O)(M4WZjnOan0bKg?DOVvMGyIU{69x zQK|rj94Yj7$E`Lr9H zpt&$~H&3XP2}Ul;9`hccMyZdAS_YJ=jH~8cL0_dzI|G)MmQtK)^Q!jU9EM@ui?-CU zxcSy5piDY!;gEeKjIPqZe%}_dZBfKk=1YtU8QEc#p^Y3J2VVI#S=%$~ggEE~7@@+p zcdA6b2bCLV)FfX+IHSEtchBSRFg+FY=>%!tCP)Kbtm^RhT6j(h!^}fgkplCwnn)hu zd+|>1jT{?O@4=vCt0?mt$O5AnsURd%5mxXPd!T6a>SO$uMNz`&>h^mrL1LphzA|Ac zLkG*QsV1WEYHlm++GGC0HY8mq71V`qVw^VVebiQWn^qLT5d^>LiK;6nOO(NEau;(i z#FB>D2s&Aaj=Z_#d^a0qgEfU4dss~3p#d!O3NyiX?X2^Q1HXelM~FE7OPKXUF+6 z;}wT!UrT>({?l;(e>`bBfqBl}!c$)I+4ar3z$impZ(~|<#ib4ej66|b+PLU^c9=`)mYdpth@^~AsxwhY)r^q`gPLe#XbCCrNEVm)z z!fKh0?Ut=YFu6a(MNpje)x(+)qyP`B?PjAg7Wkzb&I#e|hRgS>5K@TaYYT?A1OcR)wy`!g+l#)m9li=j18I{g3qwDVt_FdD;@0x^!@q zPYIpF+xLF|FVJ;qxZrC63b1>+iD82iO5Wy4E)n*5$ebd73jz?SSvnEWX3Ef$7)e%T>o0aM_7mrOPQ zp&#e$pUhW0Xllccx6!0$fi*0_=jVl!whJ=mi?UZ*G2j8@ezyaKyttblbamDtfd-UE zWL8d?{V@}zCRMV7BK`%C_J$R*AH#Ou+AdD!jm?~LPO=(f0w4~(i(1oQUj`VzYka>f zTkS`vtm;_aJNBXeKzvvt#S&ahMD>8kz+z&nNTX9tku=7b<}N;f;*_8Q8Fs!l$I_n( z7Z4T(U>gf}W=Ld-v#fn?Z=?;-0vc-20wSb@ygBUZ2FI2pke2zQ%2pNii{nXKQ2#v)ulPzM>)&sLQ#_oADgCn38+mL{^bnVR3 z2AbS$)Tvp}#l>AZuR$Ln6XG42$WSbI>xSzPNDGZn>c`iDpOfiJyF~o ztMbFv4u48kfI_ShH6B+!13^t)1=e1RB9I$P<4doJm=#$8(+(qem}EPFWt3Lsk)~RG z4hJNiw{|8lMx66@N(c5Lyo}=}wpq%G^<{}qyPH^%D!C>zRZHx*M0cFgtFnU8Tudsa zVTFq+ER}SBI8in`wx}we>3nY$^L^+!5vrF zpxA;7!PPqo#umXFn18GX;KZFQ`+ihdh4k)`IGUm2 z@>j71*$k(jFOC`?A1Al1-z>EJ-poz6FHzQ%edOTS3>NahVlORFXUv?2A)=}Ze#i5^ zg3hM%JzXVSo*joXa_20i|0B82tREybCu+}Wx&SQVklfa>56sj+&C55VTnt9u!dh-i z#H^%u?6*>>p+M1fDYPHuan)yO5(QDHp2txOF<_!lAt|GDq~ z0tbpK?wYC&J(V=%6?0gi$Y@Oxi)n zOT~n&2t{tcB{Q1o{@UGUFOzf*Rq*mWz&62!ePDVsq|@=JfzfEC7KaTRa==H$cbwmO z%svCKi#O`j->(-L7nObD~n-KS1e5I_~?iYLlxW}@+g zP?!!#I6X&?qoPgAunAgS6})@t% zodH6twoEq`&{C=dZ)#0?k$iN$q6;&A|H{nJ&XPD{MRhQ2REPvr- zQhOfs{*d+ix!&UO+Y0&6bJQ5=>)78aXGF0t{FIQ$WqG3y;is(YLcJV@4huTMaNVq$ z$?4LP8_<*hX**rh7F4@g;;OcJW0O!&@e2o?_MKg)leQr(RzUd$kb&E4oi<%wOl>s} zoJ6+RBLKFUVHs15xX-S)99r+OtdAMeAZ_o8}rkJN-On*LI!aEY(B!@*;mbi^heAUW3tW0@u;Jb>wZL6we( zaL)Hf{Y4yZv)N`1;Ca{zP+2$~7jOXXS>A{b-pun#oPpZ@Mb(!^+uQ+(Fy|=IWO2Rp zLrz&jr;iy3Y70>nV+J+_t0OqWY46=<&x9D`3RnTG>7mQ@E?1H3UR_Ei8!SM&qq5Vu zM!2PN)~>nHfga2|o0f4^0uytJHMd%pAE=rufGmGg31ry<8AKgCGledi4H{oz;X`*3 zQJho>EV!L@VSlyLl;r%{UE|1o!V!nasLcx-6Pc}@`!o)-S7ms_EM*^fiRZ9tMu~Oq zs|o|9mo-fozgx;^A=`7&UFkq;smNzg5wP`O{EZ@7FoD3O=p64)5u(K`fsQi#pXtU{wk(}wR z40?=lmws>KV7BfWI#9Bk)hBCOVcqm8p}G&G(L9e$gn3HOCUS-?S_88x0-A7yl^*nu zYZ8eSr`Y_HH@%)D^7t=lzzf3RH$~=vb6G4;h#tg@#oj*aL6`>cAi-YJVm|K^O_K-nE_J z7Ed?BIj zWqWC&k_w5fj>o~+;o6|c&<5YeYjH$vBr+dmnblx45>EvWx$JO=xv-6L z{CDN9`loU)-+YYxTe+)2`4-Im&~c6Uj!qKTC8oF-0JDh>@sm0DKRr6Om-svIg`8e3 zJ-%@6yMDdjnQd-oeUAzp-QC61q?`Brl*KDR%IlIrFhq_e7GXBRt4BN8LnR7*IVC1+ zg~;fwjUubBnPRia2300Z7q3~iZ;V4m$1UW;lmLwmQ3Wiy9YTTd+7zuDa!?z0jW|Cg zK~-&7AGLN&10mH16hd&(AG3vPJq64eCskQquNag^@`zb1>1jhj9wIJq3vC z&v2-4_Ko2%l~dH&N9XI1V3KLEnz7z6hi$@&p0=$v$V>;7!2jj!^y;llo8{_aXgg{J z#tID5ITE3m`PzP#GChI|Q0X$hs=F7SLDe7sKukDb4bY%5winVy)`yWoUmB3YF!6B< z4QoX}Cagd$*sQ5S?cRN)kVycS80UqL)oUXnW`v9iwwSs_m(i#N&^h))l4Uq|ziiSW zSgdm&vF+_U#vXSD1dzLP8*T<1m7=i{;WT_3helH(4fa5!T3Bw-G=!mAqfZc>?zH^a zDA_at+gR>QL3`WIB8}Y4#~V=oq=jSwXIn=159y%Kw$4sATfq=rq-y|)06ahvhyA|Q zdXy{P-YdtXC^$SvREPAVZrB`Sd*;+FvVZ7UL zCM$q2RolFRW_dM(>@^rP`wYO|#UTo&mL9crYL6farnAFH%>+AqVwSr}!%81;!goHD zfJXX$Kw>j(nr$C;gb*j(z#4Oh0sfZB0fES$iU~vA<;^*jVlq<;X8o*jfh#r#y|aAd@3k00Hnwfo5}Noq^KhMpg)oJlR(^dd41Vb z$TqRpK5Y{yS$i;}YOPxU@*Ne^z#v$M8ivRM0>z7*;=)8WP1O7XEp0s$%7_={iP_rU zlTmR4N&I~P8lK=t7<|vYVXAs)2eB0F6w~Zbr>4528Vx|(DfD!c`a8i!R0=>HoVd9& zJRiQTo18WNWo(}gd4DQ$475+vO$&gw;jvAdjFv?lPjzlw5R4{=(Wu04!+6Bf*-M*I!g(|j7y`?FNRU`A5L&-wVv&8+^p8b@JPO>i#m++WzqG?@~DU>nJz* z$7LRW^Pcms+||Ds$P@frvPnlVoJY>Z=Hh&nMtoG7DBOz&%qJ4_wtN)};XVI~v$2{t z;+vhrt}iB%%D_|Vo4>ka#_`Gsd!Ti_`$VJNM4yX54 zwr-*4H?k%$x#pg=em$F}3OPv7?sZIN}@MFd@Km`*PfV9v*#7%UnFt7#G@#@YhD zq;#!LykdVO)OeW8L<|or=^9`c10mlC{*?u#!@ZnAn3hP!z-!&RPLA`}qL;L@@tD2@ zIBoMBZ&<`O=S$m$u1j#D=tv^1uWC>gOf>*_G0|(7Ol`Efe1FS?WERDW0brxeb{uq) zN<)gD!o(aG=3?VUzsTg5&((XA#8*>MCf7ZQ>nv&SKe2mHD^<^CBw@q8HzZcc_^r@9 zSe9E#f5&f7BsT;%P?lAf7c2`(AAL1o8(*A-w_$6!#Tg6RI9c3ez1)=SMWmHY940oBInr!x5OC<( zlk@1f=@hp*vgqYYq8rRGKgRSauE;goL}iQp9may5WWpJjTDPSHi64@e)@cNU(9*Er z`oy^$@7a*8iaHylQdCQ5F^h{~+`p025}e`HOM&^1et-V)f9#&b+ooKyvD}~jo*fW| zmKXaJ@CI)bY8#zlwTz%ml-3++eQPYnB#oMp0FJw+t(t`krlMtEk~DpVp&Q^9`S@=c zOu}t{EL+__4zZ&_SK-?|k%bd~AoYg;&f?}ec?xSNo7%by^QMKG+f-l%9Cmxth zK}Nfuf&+CTqib<>3XW|K^{v%-Vs?b1tlZJ2QucnMr)D`D#`kWUP3!yI;BW~ZE?}em z%OE;`sE7WlFnXI<%xWvU)DX#FadP8jK$><%Z(kg$e-?*P0KhrT%%rd8_0`o=e=^}u1jkIAJiaRHyF`K?KWHPn#% zfbL5pPPLz;8+BmoM1F21TIPBfOY{RfBxEnt;UT;8nnjEMhHl|8|fTH-C!X=Z@FG{eUN)%ZiKO}DX9?ZmL*BPa-`zRqj!zExPTeVR$ z8Dp%YV-ODin`4uMaPcK(4!hT~R6B50ou3PZs9c{p&9tCcBFgxpUzQ%pDP;%Xq?e=I z#zVqpNey9EezK`XY7HzWiw0azB>3h!f^C4=Qk>}zHF8ic!fzac zMe?XC=5Nh$@fy4-*a^jkiUuMBiWcZNJigf)mNGqBDX$METTKsaH^9JG4-1j+0aDl? zy0u{ZM`DHcQ(t%->eP*NA601FOaAguMH579KSrm_TX7jMQdq?RcR^NG!3-yvUjLZ} z^pL9^f$(L9=)uOPa}0u~V4Uc2*1J<@8_NmF$H|O4GS(-2{+WL$BL2e&dydIbDt>l8f#9&w(#}W zhnHFCH89?I;teJUGs472p7CU}>uzi!=F6?&!>0JoYNL!QAPmSr*G1@qA(>s#nF~Y} zp8$JbOU!Nrz18`j=V1AvHdE%U))-Q&ot=!wHWui&`C?Nl-g-Vyd60UI;W>*bgIQZ| zM`wSC^2LVqk4EM#JTsceL$kEbbtIDA(OCrsjitbv^`=ic>$6Cahka-4f5|*TljrfO z5t|zEliicFj8wy}!+MqrJ;)*e^ocn4t88C{acTlVzbO|^iy|NG-r$VQ6R!ZAm;QkP-U#&#Q&T_^=}l@)X_ihM)G0dMo1#6eFa#pmq(3m=?+P?-LN*7Y~Vm3TPZpGphT2sm1iL$eYay z3Be_HZ-+R3a=#T(zZsgc-QyTgRhpklxtcf!bcWeP*bQ}*6WV;kj90qnAU`|bGqbI% z=uI4hWC+?~Fs;qoce_p}fYGES5IfPO^I(g6piN{bq|Kmd4B0*gt-K}3NVi1!0Dhp( zE`~fjqNtkoi+oK6l`CklAY>Fa#eNN$?S1Z;!3@YevcI(z02x@Jrn3*VC5>S>uLWo; z{4Uw8q)_pNC*Xqk23o~4IKHLm~OICbd0)2z`a@6lG4}s$L}u!=^Gs8 zHFcvvg~dyZK+N{hxlPoSOiGYy=Rr0QdpoVtwJleBi7*vA&9B3_kf|*;|0sJyv!n>k zzoK>}_qD5(3^U8-hj7^y+(A?mx8LljU{=Nk;vVo3z#Z>%0D||>xpD;5HIb`w&ZDt0 zC(*i{A7i7ZS@k^xpY=dXBq>zBR4{<4?zc!jO8?}%CmgIH*raJY!up&)LlcKfz<9@C+=N+OEhHjIsWi(pbIeB?~1!KTgi6i}4L zT)8vT`x z*szvfRgw*T^D;AzZ;NU*9CZB4Ur5nX1~-aEdba=TTR@4hj`X#-ye>P@fIC_d6|H%| z<#5c{9@r4F)?{_7!q(GlnV8m5R@-*Koy2{U7@ri=D>c!LX9K*?;otoWe<||Xe}MKT z-sjW5Kzq+^f7aY(^Qq+pbi8vVSa&vw>-|LVERi{5qluZgnL_Xyt8g~4b?oTg#(?pA z?Yy%qtmeqhgErju99)k0fh0z^E?z@0h!_( z=04E`Ue2e(y~+c2(0f9b{>BrH8#OEpcBz)8S`IF1bxqh%Hm47-BjL&0$+!R(96W^sZGl?O|4NAn{-=nd~^tJ)4{CzRi3Hw_XP7ILaEA!n6XP= z?S=vynt);cE^%UZQ#+_ZCQNbQ`S=|)cel%yc8}i|O~E4%00v559q^j+R7AIa-@eF_PC|X^BseX=sw+D|X;b8g6zlKR(hS$&qSt zoLG3Dhq~5a)j`tUB zNk3)LLI6mJs=}EXmj+P~2;LV%1TH#iSb6w%*ltWFl+5mCAc(#9p1WZHWIowD*65m8 zL4pO3w#qM`#K0UC!+X}T?A9ARbNZm}@om?3xCMCi!d!&&R*v+5K_&gqMyyCLD~M@1 z%rClzF?Kl3fZFKORqu;DQ^+7DCktANtt`>EKe%AvjFwyzO;C*;&rn1uc4a1g*I^lK zj1$dNyCu8Dzv;KTkmYbo-r2OyiueE{K;!jF74n!hi0P4%fF`YMmEk!+n-#d<WF{BvCd_ zmJwTK9o-LIk_kQey_RLG@8k6jwHaxJGcv2-W?O5kpo~#l(P-krc!@6Cnw7eGBFJrw z2mTYDgw8UI+eyjLxIX`Hj2A{5j&LcCrJfLAfN^Eg1t;BU-i(OO0hM}xH*Gv0?OcK| zKw+2x*g;B*Lyz&s1q1SK;WtusQsGUvhl)TaDm(Tk6R8>FyiWL*6lRl}$A{P$^*a;{ z{Z#`WdSsr}EeCpl>GtBZ>YH}mBIU-4;fKh-^&XzWvh%#8VwIOVk~y97+bbZV(!xge~~#M{BCMmP++ z?--UVf$1n(_(UT3a~F?^US$39w-piLE1}&Gi?Gfu*cNL^2{g2Y2M;yj6m3%1p`UAq zrk>%Zk0X^O>(B@SL30am=yM#97`C7PL~$Trdm#&K6lkw74f<14#<*Zt=kP#4Ys|(p z)X@jPep)}m>m=$D73)?C=3-}kq?NPxmoFCItNMS9(j0MXncxI z_v+cJkunf^G?}eLv*`-YDj*Hhbpjt=Z|H^Ih6sH;i6NNJ*EPK(*1V6I zHsH^jwNuS~6i%*k;P@SU@hwZD8^OYrDSeZNm0V9}nj-O@3(Q(@z5t>l<$)1(d@TwB z-(R_{Rb#){4@&v^4lceys*^2DO2U_5T?`7~$h%S3DlpYW#EO=_`&}ZjE7zwqLTvTI zptYOsqTSr@6NOi!amB9Hl$}8Xn34BWp@*x8jP!xmc&$wA(b z5)(J@uCPt?{oJ6JsLYiSh_XJw(P+Y4*gZFbsH$v6;a7gX|38nJzw|%+SPIYc#lOup z0p7X_HUJmkC3)0xnrS}^z%?ENryUcUZOgmk#QQi&jQ0ZbQ>@%Lh32aMIp28=B7Rt-IxRgE7D%b*}vCr3!5?-EY0emk5 zJ$6P9yY*8RWZ8yTxfLCH(#BZ^1_b!RH2zSk2pH`rCk6^Opte*>-1ecV3oPFEPi2ic zbrE)5tkP%~iz|tfX*g@uDlWblda!LqtdkOhu(9_fnzH#;u{9bAl#0!T7cND>IUdnP z^nexei8)?|>JuZ(nwaBNbLiL;R0V92;py(lXc89#tvw4F{PF{W8e8TM(>Bm4fG)Uc zdZL?Wx7bz{E(anOtO{>v((?Mqu)$)o`*MdtfbA|O>D7*@uGySz-HwhGtw{-k>kwaj zpj{z0o#@~;NV6BN772ccvMT`0PnNi;41h<7=>l_QcD4Rt0qVjlAsGw)f@Qen|1(^W z{hTH`W4>IY?7)zuJIMl0i0sDg~E;MEVdXJWxn zIN%17D^a#l-KsV%QU_I%AX!HlJ^1R*nLRpcE8eN?o07EVv;zS(xpD@>JLU#Rax>~m z9a;gPHsNFO1z4Or(Sh6=1iI(eaq_9{v_N7|KAH&4p66zx(3S_YIfx;qPv6|~J%I|^ zi2#jt5wSaz9kvUo0D)-Wa0;EK^HHOqj_v^))Ny)u8J@|`X|Fd`1mQ8V8T&QIHL9wu z#t32sH8#>dTFqM(4JHcjI~;J0d$}b+D8N4%-NsB4K%<|IjvjzE)Yb#H9DgM=bmGQY;=!@B zhati;$T3dn2*`_ELvRFbZKEzPI&@`t3a0@NEU@E_@(lU!hAp!kERuis#>W1R+i%Xk zhB!L#E3Ap0e*c_fIhs4RtB5X>IsqL({^(V3N}AOofv4q+NoM&h%l1Sxor*|=6&f;y z-jk1(^TP~eL&3hNae_chS#ZPtjLGO?>m_=?Eh1}Ijv+tnV$`#zAPY$^@>S-=yzcB+ zhet}oc)kaUc|8!Qi!?KEoSMOfE6dOk?>5e~Rfn))&fa?F@coY;*3`x%bJrqXqqKnqilcd zx=1$HUZeO&R~lEL;i#y*qr>LC`clv_X9ZP9&^VK7Px3Mr@5-=j(@yWK+K4I6N~&5$ z+>lKLk}lOD=&B{(85usOd>QWzFv*e2R8^m}cD1LYrbYG9{<(39li%|_A}+eY{9QjX zqJYgxQb&G4S~&Bb6mFUpYlp5YU6a0*M0ERSDn-}wbm2rhuPpwEffN{ZcNv_Ys!0QY zUI1pFsk=xLt(#O1(raXS8GcuB2pl|4ZalW>`^n60ho9pop|d>1oMg~K=+Y1}JMsCr;u|=xwt6*T?9|Rvm5Ra@c|mqyr9KlHr(!0eZ@CiQ2s=$>3#?{8 zk=7CQ#A7lPKjqswrW!Rx1mTBh-A0tDc&r#R_gEF`rFg}!;7Mp`=Y)v`TD?CRD0M*4 zi5L!a(ZPSeu55SZUpKQjcd8ryh-4lH`pI(OE`TwDP4L{=R+s5dTy8q3BK&DZsEHjA z=G9K10%dgvP9%=Vy@I`(H2Ue9l;Hm`fl3Y*2XXs-0cGK61PyXo#)Q@>y_WNVENR=9 zl{?zeN+A=G5KP{19gZWCP8u*`qb`?i>pAK}UDhhYA zp=(^l3gbWPNI+B+X7{6seKw7-i4?6SZUo8G%#IE*SAGY@4oaXWsec~mc;YCZ1B zfeu0LecllII;~Ue8V_Wof_;$3cQ$+biW^J4B&WheJR9*R?y7)ZL?(j)5f)bDh$y|q zlj(BxX$QH25sJ~chjuWmVC=Z@;+Ua5Xy)A`Z^yAfq84iPT(;oWEbh7c0~s5WA+;MB ztL4VJ?|&~z!}@1Q+TZ-4@`wKzQ@-0kf09l?-K>W93~zOuc5s{x<^AA_csW0ZVr+Ez zif)wXnf5e#=Dg^J)r%WFa!r=E-5GhVMa%(3Uvsgd_3SlUQ?8osqSi(V99#G2zdpEl z-^)ayds7wdTHpa7uV%gHYBVWrsoYOD(oKLB1s<^)usKD=HA=FO08M)I{3-$v`XlUW zW0yEF2po4YKg(Cl>a zx7@OVr4 z8Y>sd0mptpI$Brmlz5d<%mLN^_GC0HF8qhOX*{PK9amh(p^cMK;LU$_0eDfTq90{| zXxiJpuN3A672J6@=Q1Vl#Ve+IBDYJc!1`B=^eld^w-0#WEP`cMK^^4$3N^Lab{-`y z$Q=9FUM6_n5F4+B@T{o`ek!H|)R>L9leG@fpgrv_VV8?h2K~Gc?>{YVEpUz!r9X5l#?AtH((4t>jpeAhKw&JDY^@jYqA?WQ2`T^9LMjtkjqe#j-k%Cav#I zmgc-_K)$ll2?(I-nOX(4@tdhNu|*g=>24ouYvMWiBt}GmKpRzT6Jh` z1Bj3^H+bS!ikvmw6HqX@-t+r?FI!b-$*DUQ@9D5P7BK00{}qCS_3G6ARB==N4ISlI zv`3C&OpvE5o6L9FJ?j%60-$1l=-Jt_Yw97j$i1btWsuf90nq0OAZLZzl%-=HxmxA- zZE{E+6Ay;&U$#$(W`c1TJVyy|M86$YLFJUmnk=Ju>tX(&u^P^S8P^p}<+a3;@wOc_ zH7v<&NC`Rp2r%bRYvuOd;U*|(Iiq+-qn zCb6BHQ^t&m?G=HMG1SjsG(=8Y-O&Wcu8AO|-m&kQ3{j~w{3Ui=;o?N9oP@&6)oAL8 zo`-HdHoa}3i7IBKM&Pa_2cL%owDd%3!-_Ltyb}yOh3U<)_w`>Z84~_knnc8Lnf(5r z(?6x4_=;Rp&ys&PAnJG|k#>xAZZf^>pGnnkbMfWnmIln?(~AA3AV{13n?itjRj-?H z?NPr5NHw7qcFw6D7u)cRu#F$UEO1(hfChIY_f}EESakOQK7&-RsPdLuT zNQjC_ALwIH46_#s@#(>ziFAue&_F#mq%d@+82z2s4*uaDY%Ggp3S-n(lP27DIilGI zGBIlXv~>_DoM?d657uxZ5a}z*t1*j+s=P;z^UbW=i=wif zXLMUHH#AdIg^6XfMc=ex)^cqBSSMq_)s1ac5$f(F{r+%ECzHJgNC`FRCJp<6*nvJ( zGc)o8LlPxT$g>EHkQv5$zd10*oU;EQ=%7%Zy;`VV%sf;E;8%e%bdaqxFKQ_nLdVOl zi&x@U8U>u7@X)2o{AP?aXMJBYg*CPHWNLQ)){J8miS;AYtbkp6*{bwNV`mvKAX>{{wyCAF|D4~ zL^*{2mjgs#@VVLgaDqS@X?)}{22(%?I}EkEt#=c_s>%PM>KxcJ>#}Yg+s+f)HY&Dl z+qUggY_sA#F)Oxhn^iF?I@R4*zo)T24~YPVqzSRJEgwmvVsP*Dhobea|9+B??qUlzig)C~{vbp=?67-^3! zlJkn<-FxX+v^sD9{z@`d8)woSr%X5|nMXtXdMr#XC zqazKqzaS*VR{8CTiqNP=C$*i1xs{sa|8$~dB5mck1^)dPpZ!-x^COUg{sqDrDXOrW zJ8_qdzTN$pq>4u+G8?bI(5s7@zetdT9@PbM?FGv@;7U9?EXI@Vko+r}i%?ovk92=_ zDe+2sY(|Yl>#k@l*9tyLRtR0u1F6AzRKt5#8MpYmAsASsS9`RBijfz2jQjyB{UXZA zZWdl;U*AVq$MbbGH>#zG@fI4Qkqm2pFB7ED?=uQiW;qXb0do*nE{e7B459%%+0B*V znhMuR3|3!xk#FW>e@3woMMkd$+!7FldwLVcWCAG;PCnT;>%IHV7#*e6qz5Q_;BILA zAsZ%R>D8i011Ki4#*Ovcu&`)omk*WQq@2=54ASy8etif?9}fQv56#I>r`A?*6+PH3UsaF2f3;T z3Uh%!;m|MZA{fIfW@;na%k%yKTY&^lSjqa=l_PKBx7wHW%@05_N;MKzM=8Kof4Ij& zcUK6c98S|b=q2GW6@ko&zxWwCD!c=7*U|L;)UE$(`_!*#NOOW=N1ZXhO!W=~F zwNV`XwT5#$Z)2>G!Km9^*BfVu{b0S0gyUfCckFXF^oIRNf@Epx9)wZ2BI1nvGYzr9L zCl$_9X8!CCo!6T?_Q=Sso3#s{A1BGtAVA2(v!4MhP!D?sPuhUxonm$#QFAP?xAEgo zuV3H8NF-W!aAC&7DC!3lNgW-~l58c-H_Kq-o-@`CVk=vRQc@G7h7t_4(yHy*-rG6( z*bjRoI!;9RHaM0|C@9Sqp!C`R69zrNl_KVN^?`?)D( zm2PO_?IGoNvWiYxs8wYBe9-_6AV1`x!HYHQ0@D8&DKgM%)jGOD%!Jk=6JE5*hkubk z`}M!8u-#JdU9N?I6uig(zt1ipcsT}n5`dajo<7tIxr-+{A$;gN-zyx$<>uFNIVWMA zf%>(M;mBajLaX_y+AO}3$5a1lX`OiedV^_~+@iSSbiC{HXEC9@$Okz~#d||cB9jjD zTFcj78&qu{g6t3?w=`XqZ)AUGrqC)Ih3MC}`y6In8O(Vu8m)w2DNZFH1Inr8+RoS&waI_GV=}kqU20GWKjSYrMwjdT`K$S)dZJqv{e?)CVx8j?>B;BTeYa zfBwfOM0ih3u>@c1W}&lU5B<&WL(kv_sZFeJ2vn-drA)8o?Y^k;CoHvlSn>_iA{wiB zbeO4ew)3UTEE1r`efeJ)_Ri$4+xCQ~D$n*n+*&0_X;59{x8wnP-`j9z}BJF-z~2tdK=`P5Gwy~}yU z0SzD4QWOy;e3yABDLYxO9Rt509ZW(|1Dk0wci~b7e?G0%>BXDD9k7P=8|Rp97kFM; zaA|wI1wD1|dktjX$3J4@KhefS;qGyM0X9V*@E8U6bI_8^wUYIUc)N5$$6Nf`Ksp*! zQFUO%Yi7qMzej5Px`amNxkWoY3;B-veE;gWbB(5)GBTM1iq-6dm{5|7mXc%#Z)zKW zaC;Wl3lszYh0|jv=EGp*l4fO^@$xj7i~uZ8NJqEj>)b^x$hFNiL8!x#5veqlYS5Cw z5-j+Nnvk3v&(*Xgm5-gKYNMagkGTiMFRA4uguXDyT7AKrldqjR*H^g zHlL3~#cT}p&d=@>UhR@iK7V)|&1zeC*- z_p_oO5g;l-MVRp)5o4@R{nkDMb`Rp2S9!fnO<#-dyr<-AaKL6aau_IJkyxWcDaVpW zYtk+*<;tAC?g~(2+fVrEgc_$($*w>+NXp!SAgw_rJ3WEpE>laJ;F29yLcVRip?Z^R z%rEK?jTGiR%J1y z3tClzmF3HoL*7Ka$@+^cPb8f5Zn&HrzKEB5V_z@XO(#PvH40CmhDwEaB71C-rb|4cpDu z8-~-ZIgyK7^FkFT&~IDF_#8Fru(!}{{yl$V0EYz+JDngr%f3dJA*@jq%q?mkZbgmA zh+qEI|7u!d7liAqN9SwF8wqzVy`LGfvam*N<8=y4W0@BA&e{djKjZ?sR3Qn;$^6PO zy?^knm=S7?NcK|U3 zvx15Q)cgwySfUQob)kw`%YLkXF-u_mK?5&)%k2EU255c6Mz&+D-tr-+^`ClJb}D6P z{4V{R-ynwGjV+^+p=!YGQ`SBl5L7#`0^ceSI~$L{cyf}H?^-*$LGt+mt?;#DT@4wIg-0Xr=hcT?7 zPgLAVCYNsH3~A`%;X?^y8VDEaYsRAivd`eUAw8X=i58%#-B0+grh|q(zAzA>2m%%!)F#E;Pkej=~FG^p?=bnmP zcp@A70J|v5OuZXV(`vE8=|@bUf0ud2QB3XYFmO*CWrQ?wWs2aRW4NbcL)EYVJ_fYv ze6$cpHjMXf*M?*--G7K3Q4Cc{`H_ zN2ItVqb#EFQstpGo2#JrEP0f98xQP{`hQ?ofS_IHc;5)u!Ps>47!_F(&H_GLdgfKBJ1`Y?Y2i49G{x z-TkKntscDB9=gY|^rN-y7y9RapWq&alB_fJ1inItUKxuU_(-c=^1e3>H7TYB$jgG2 z9&JmqcNweCs4SP16N0m7%Vs;@B=6uuvLm&JtvwfIOxo1cyucs^;3kE`KN)51RbV@dJK5t1!( z4}6hD?`N&ODcNT{-GLPuVCTB7Kw?B;FO(}@^n6UQeH#(rJyvAta?HGd5M0CQkP)Wr9{a0-;wC%Lo;4Z33eoJ>QHEVUL-v?| z#PLD=NX@wIFGml0z8Fn9E@vqpCM;?Sa>Onpa#=2He9?OItq%4(NKxguMQ_}c3-Hf>oq%inYEou2MIY*p6j(sNIn%T zXMB2=yl3Xv&4vcyi{J~@677>@ea(gtyP7lygrg|2qtvP0-ux8*`C5h{W_+?1^e}o$ zz1=aagB3cV*{D-vbc2V?<%{>?>O`6k*Hh$+{l}p_A4NPWVv0fd_La(lf3d-iA9;lNH>ra68WwCC<{Tslqg(70T{+b` zGz~rQ}{dtPI z%3L~Q^=sSMv2N8631{1*q*Es1=UYU^g?;}o`%Zc)GDNS+M4=uP^O?o@nH8?*BryNZ zDzD^3@*d-fp|`GbWI^(|0vl|isdgSx5Rg^?r#yb4 z&b3`?}2Y4L*grnW`he-{)MqQptsrqu&+wZ32xnQ6pw>;OGSo1f;hjP@!0jxmTNV5xA5OHcWBOo z^gnCvuU(`geoiK&w7A>@{%7Z<-{bLYc$9z1ZLeY8TG#38Wd2IoJo$}VhldPN@72OF z1*U3`)E6|*@xhyNjwf1XxK*d|1&LZQkV^ZdcT@Q-GXAzD;pwj&R6V$gqHV|H05s%L z;>g`PJ5^r=D2qFj0|aWQIG0qXhtA4Va^yXC7QX&6FRZ2Hq>1qml%=Mt3g01L8Lp?S z3);9n%A|(UisFXEe9uGY#Ox0Vi!Mh+(}QVBOHgU-e9CaA1HG$=!V-)Wg$;btJDZZI zai&806mk!h@z5XdcEiSf=V}w))t3~6z; z_6zeHwQetxAVGL()7MTzgtF;JKeR@bJtLu{7T|7DD7(TMl1yA|Y2Z`sNFdksnB%`t zBZO^P4)Dm`eg$2;yU}G5?>^V_w`3^@PY-N`N$shszpV(tN=@NoGic{Ur%aYs!Gy$P zv@pKHtD4z*I3#V-N#Vvs8I4=fceKCqf@LO$Q#l4)XrYS`w81nBPhn!D`>8uQph8ly zw?@y3(^~P(lz@_B>;ak^SnS7Q=Oko~uoFe4`p{KmPj1}e8;nnMaJRyzIXI1Q)WfpL zK!8Tf&XpeQmw6R^Q0t1+d;G}#sSZp(d6?TA(P-aj7zkZhTz|BO;uLQ1jn*MdY@Al% zy3@u3 zGPXy?S9&1)GKXb!Xu|0?j+;yt7H5Aq8>t$I=j{~>2=2@HY`SiDCgzMn>I!Fl^8c3 zVHY1>TOItO%?-#YbB4CNfUzdBy_lU_j>0jSUoc%M#(Ad;r{HdKwP)WVZzCEw3zLuqx%OL{?m!7Qx{q((R2CCVH0S0kNReo%g`{FCQ$-xKWq*YW-wbeyC5{m)8Fli*DH zYR1$T=jbRMIFtX2c)ySfT5wnKfgT6psXO}A8#QS3gy&#*t%;C$W<2AP$^1Bwk8H zYEb};vEVc&iafBPJg*6dW1R%d7n^K0)_$xC0gL~&wiNYaD%mpR1s8S4csD6+y&PrD zjp6!04x%zPs||1afI|J1IzsbY=m@*^u=r=Vs(~Pc(w!0PG`whZ84ku1ycr#o_Fx za(p)}tOYQ*UP{DP+|zr@RD5j+OBK2N{f^N5q~M!cw?~1x6a_O#@;U6vcpKdX?bNQy zv?NFW`ZbV>yEi$>y#oLiN5$WDZhSoj$_qJ&$i<2!Vv!lcYmt;%>aLUa$qXQ|BHr39 z7uC$4k$t&=;?M0zGDEMK$+PaZw;-?%3)|> z1%P7HD^1pHP&la(=c*RvRc5Hicz1_^!NTrTaIVK92WKwU1QTard8Ga-KgFyii2Kl=#EMzFWA3 zaKq}~L#rjCC<99DdzL_`e-xOtQs6Y~B_#JL9h-=YHH6(}#;$n0leA~-ttS9ngb7;r zICaT_X8UaOwy~dE=@WA_%rih%Arh^U1naK%F&+o+?f<7a$#E|DH=cEGzx*Haf3Eo6 zOCd!Qu2Zj(jd}Nzp^>x7`4r;CJaK1dtfM#FZ`y!)*Fwv_#9^Z3nO62$p93f_TI3;6I_zuaIfM$k5mZsgQ_*ajy4k@pSSxbE#OA)(Md4Ph+5JMFsq|u?W9a%X7js4p;>8BS;gRA^}1RNvSR75RAbgw#DB5+Uh6tUYap1uv0y+ z`zSNnrd3w(x#Z#dM!7$rJV{vv!CbxVP6Uy|mYYG1E30ijb$qYnNdP0PV_4XRvxqyb zoUUU%Sk;}9C@`veiB?Z0qukTp_h7ZF39l%f%pjp|c<#Y($ry1oPyr+u00yq%HBAO^ zoJ@0#YrZRSRzxzK&+eD9LbQdZp%eeEt4N5YxTkuL1I#R0dTkqAe~5jCUh}?hSa~(L z&iR)u73jTS*xMi-raIL_$g}IRDvl@RIwz-_l^Nz8T47pPDoR*T0*nkSVSPQFTXKh1Sk(cmxGN z`LJRQl>=MOSP8OU|01*tIPBR1vYfXIX+fo7GEoDtp6=BlS6DT-t4K@fm#4b#WXCw` z;Xr2Q%t(9dkXW%h#c0E&vf}+eIqJEK(DhIHB7L?{kCj7W~uj$trELy0dKl|4KMzz z7qC5H5gg&3*?OL$`g90ns1PiS`ematQKo^>b5{0na$tf@P_G97tV%^bwH0+)6_a&P zX*QpXRX@#qIA$b{o9EyymNw(8-z{47+VC)})|*|dsM$juV~oUHQ9HQG><%}YU0)Jh zIJI)Wm9<1Hlnd7AP5m~ReB-lh*9&1~>!_~^0ym&B1r8*GZonKE?{lzlU-x)OiKTun zLc1uloU}6#Y}3OMlXvw{GS%^0L2R@e-G~~57=uDKjZ6&-!FaW@O5p=d8~b7&6B>JoFr;ilw*rQyIRyigF=N`{p+ zeXxWRX+5@z(q~dd=l^y@h(-STzb3Wazl98c#qsLR$LNp$x>N&qg9#UWdZs)3@>fib z>RV2Z%CkyHphb|6V&iNR;sjk%nal6fPp^K9VZT1@{82l``6D2IJ%mHv><*g2 zCnbAWU8_y(Im1{Zhlb*Q4$7%ZQW8qOG^822-laMcLV3m!h62yvl`q$HCYMBQeST7s zo0;d{9uKajbM+4ezR);kZO}h(Y7@1QN6L?VV+WFloaC)Ev1^}TJ_NvwpV-cVQvrez z_LC(t%ifV}!@;p=(^4)_z|4t)pYYjYL8f4Ow)I)-TA|{D-Bt&YAV&A-t{x9=eB6xJ zeySzeZz@~}8W>B}G1j1!@dPS@(E-ZU5!3CEZA43jE-u-QZ>GXJQc21jHBnX_Oo5@g zctO^v7ciBjQ|o}g5+W^Wc??Okx+Pv9KA~@Qn(*;~}m^QxJEzks?riS2ZmD~0W^BskrPgmbfh!bnGzxp;WYAzWM4bp0x zFj+81?gP14hK?j{2ZYOAnIM&dV`1pMFdT({EF_CMsitu`4VS5i^q~X0USH-HB&7pX zEsiHBM5blO#?u-grNisa)&@d3RNEE&Nmlc9%$lEl+WrS(1`Vr(Qgtm$`HzSGN;cM3 zy0C`pzg^;soST?t-Gl0H5$^-89r2;cUL{R_g7AX`r3}guUITTo@A)F1w23fglE3fl zS_A4vo;ho|R;ic+F8mXmTaVW1%L+|~@ue7XD^!PE)-wB3A%$XQd=_z@7xsA)x0 zO&Y-@O$twfl<{Bbtaodb;TzXpVr^-GU*09nwMR_+9Nf+$p-Y~4@tewR2s;AE7A_z7(abMu+0Fv6 zA^ft;Cqqe$2c2`myJks!M>hI(-NW7EHS-GT2Qm$*V9F3Hyim+#jr{1u@QCTJgYM)q z(k7-MLpO{Tc3|IZ**PMP(yPpuU1;jo#{pX=BlQ_Z0RrF0#@D)Qzs;Q^V?wszJ9J`e zhd6R>eY%IZ=!F?Ac#@YT1oH_TUhNbv%MJLkhhqVzB}W*`$9yW1pbK$( zUjI9X|HTJ+{96>?{6G1Tv%b7h?i}ar!OO~5BwydE1ms9dFU87lB`!T~a_yP7qdlWP z2_dr}QFKci>^#JV?7M!QYc|Rty!3O`k<=EO>sb`Z;j6xeS5V__^biD{8;64y(&jbI zUWTF4^>`FQpVZi~jY=_|CEwm@qF08xn+A}oX)V6rAm+S6BBKnWiU=asQrV)K&rlw@ zB<+|@<)H5RG|`I7Pe~VBG?6v$g&nVBOW8GhqWb>k4++yDvh;&mN;)Lmu5+h zUo?}45R-Z^Ef7#S-2-7F&komsa0XRnbrB&=GsFdc+JhVH+VYAwNEJ&#TH=8vi^Tm; z@yUS8r)cp@i%>S^aM9mBH|}``g<`B7QDf&vFv8QUwF{jGLTeV{07}YWhGm^SN?EzP zrs=bkEJzBLW(gp`X7n#qq`@`{-(^`; z49}>DLE-@D64=C;Q)SY0B27yMgRNH-+eGr7_MUk2yo4E2ZHH$x1aVqSTOB&2_Rg;~ ztZ>qigJ_fA!6W%+P?Pk2Lhm#IcV2A+je%@nt0-`Ck~dUJZf{mSCq3FW1ca4ZfVGuh z|2cF2z*0K`aC*ianx{k$^9wr*O_a<5ja_S*kq z*XJ5!@BR}c&BU~l>&Zof=yykUnTITCbJZBJgIT-7JXVW7pnD1>q@+M^X=I|PiIXA; zM65?9^jOlA#r`|}K^IaB^o)rb?eC`K>(|s9LJ_VICABy;vsi8=Cmd0wVu8MhkSo43Aq7@aQ@#Z57{-Vk}6II z(9<$tof5ZZdu&S~MO9Im<%eL%jVIGVsJ5@zf~|41%;4ic5+oar{N|fnE6mJ*j6FSI z&2cZFCfQ}ykS?oubwFh^gAZyOFu??^Iv71Aq|-60)RD=?#GUqO>ZY3psrOKO@MFTP z0C5+8E|fNrH1I*Fg>D9)>QiY&F(RX=J0fRDGo+)T$>XF|Ue5?9rz*PSVW~(!Jpy!4 zRXVqJOY(_VYcrid)+3L+pbYA%7`9W6yv%lYd4QRDb*J$pKqlZiq|`%9ui`GI?WE3f zk_{Lkv=wL0S{qO~fI?9M2}dKJmuLS%!ku3Yd?p43b&wffhEm5yGgsyBA}SegCxe{I zCyHenMAylKoI8yppZRtku`#)Tv~Z+M;(^94W3PA|oy$pK8w6Eoge6EGnDZCo`2*?Y z<|_~GkIlbQCB*+e>EEFKd7F266j}mL!_Jc)d30QkHWzgiduh}L@3CDBe{d^w;l555 z=HB-m0|wows76=+#3{yZr;_62#9Y}NO($Nxs2jq0x-jmOjG`<=~gUu{=M>P3zaW}b-Ak$~-RzupkQabVx z|7Itga(1v~>>)#O6B$gmOJpO{;-BP4(77WB z0TWL0hgg>NDT#r@c&r*0KSQ|l$dbCVAj{EB6hm5T?c#tT@o9{L#|;pLjFDL4o;P6B z*PpZt@(%Th!z!Px8h~5x=r`Nv~-WYL%AR zS+dxkUE&wf{3t@qFyQ4ak#y~pW z%8VHn|6_f1uJE@(i}^3`5(sd2>bzyy2)>2nNO*Ka8y>wxWm{%U6gIO@ytw%L0^*X* z5yVZW6?T7WA(5t`EO|U56BXx)eg%b%thD~B}T zx9)Z$=!k7*RF=DgOTj^AON}_%DU)t?+0yBOGuZ&4p%(~B6vx}UJ@mM(TKw)K&}Yj~ z%AdjggSjnsWFwNrKv`q>h$^q>&C+qa8_~KPM$@v$niShV+`%l?+cvU^9d{3;y5@3V zVM618}W+e9$vr zy;i91!Bn*MLQ=L1B}L(-H2Pvi+KqHe}6KY4F5I~|5Zt9mErfs5AID?5+SX;(auCA1GC4$B_+!y zoO1ClSgrLW;dyK?0yh1d4`;a~sGKTZ?v)DWN`>y!+CTqMJv#}9ns>h6GofC6v2+wAe-1}W^mj+4g2-R{^veUr{2V24O~HB zPNZ5+ZWhEMyU5`Iqoy3fg8q&+z2(oJ%8mm`lFbR}Z^m`>4w+C*ni6SS_*|?8VBh1d z{B1-OHom5z^A+as5Q=Q@uH?Lg>br* z`B4mPRu9b_M~yqXYf4@%jkp+?*+S7^!Y@?Ezb~P4gm?t?RQE*^PYp0&^`O(bgiTp| z{hy3jpKffI$C*|rZe=_(QvcZgr5w_Olk4QC@{TzTOrM;Npaq3?m5xVDk#zOD#bSh^ z#}yz!=Rvly>cwB+y`vSlh5*C#h&{qmnsA06^MFi@%Vw9CaqKv-3tRmzd9XwYld+Hk z&eRW+vfw{n76RZC7q%6Ur_h}kgN|R-vuDd}jnDFaJBnLSs=Fs%!5bi<`z#~)p7Qs8v6C>Ld^Red(Y!8(()UA@={cc61tlJ>7x&waBHaV z0RSU?D6n7jzf6YfeRx$*@1 zxmOk7NKHdw4aNV_l-beuJ}c)d8AA|<2FfzDIQt#Y^y{E?{aJi$_R&bk zyQw#{y0SC29}mf4_k?7-?lL}nf!CmBCze6oDmRtN1DB}~L-2P$-6bXS^!g(D3h!^u zhBg;o4G{;-=gJokzI)=NA^lHt2XXVidZr(g5wtB4+Vl>YgAZX>T6ntuI|BZZ3xhf9 z99Y10riFd1+p;`|cf)+5f0LWC1CD>)1udPeAPvlLFS5bY16|P%>Lqv!*upz82z%Md z$vE$U4iKr3KMbUoJz=~Jk*S}K2SFyrd{&i~x^B|Z+i4w#{sjMcK?Z({GH=D|1-yLRsDYh>-$3R^X*lJJF>N>+M(No^4v4n-w1JD z5_P|pRgsh%Wiuh zB@py739xhYVQ6ed%c#qPct=~-76g3K13$Q7)`lxZxalp5)sm`m#MnAxuRqxWOa}EijS(*#M5j zpRLL{I0T2k?wMQ<{$`(Itf!%|+Eu~j^g-PVhjK@th7ygzCek~?6RUNS1X!schQb;} zI{!wdDeri05YI2b^&Wlz3qbz)xWP#;A|TFryypy~unE6Ipl z^MuVEt(GToXaOGKE)mODh~IFVTunpmZ9p>SIiCXNRoJCyXYE#g_MBKKl$S8Tg%F_B za_kBo5}7UyG@HnkA80|co6w$Uz>&BO{)CHiwwZ5jzLZr!(5*u_Nx4 zgB+SRQ;ZIM7b_rl<}w(Y4>dAnL4_fDjSza>Le5D zN#bg6mX1;J#I9Cr)j9ucTqQ>}O|A-uqZ~8&Ajg~us>BMeHZ~Ep2d-H$1AGhz2Nc}t z#_2}Pw2BRk^bL8^v$ac}Dltu{eej)WVyK$A=lqn_pcKmo8$$?I#UAKqaZ_!HKKYbb zWgwsy3Hq3m?}4C1f{FbCX9AR};|%eU5v8zbfF@;43)It`>?M{mgCW3Z%g2!%^ryTL z-ZFgt|FNdl{9Ao%!h0J3xB3>?1b#31_$B`U{q~KIIkFjZHaI?o;77ABh}IS=mnDy4 zjH|;{$M0hX^vrMm`Ns)?H(!lr?*oQ?Oe2P2m?=Eo8N8E%=lok2n&PaSZl6Dy*ySGR z`cPya`#Nja+*HVW>#5?{_>+$YH|5YDRWj4vEWfNubQQ*NAC7sbA}Iu+iu~dGE2H(a zaUchTJJD*xtynZBJn=yip{lV%Cp^gI+k%NrD(AFK_e4wVJSEMn)AU(WW*?M~@W9b0 zJIOYxYKDetXd8tJiPKW>hHz@@*j0@+x^HC{d5VzOuKUnxLHRLxuXhCtcIzYl<44Wh9If zJ>w%vb^G=TeQJouC7Qm&!9|9(PZ0dPtR$yRLxPhlvK4J#MQH6h^N8D%#R5kjL0L-< zn`@5tqwTA}pq3>j;6o4ydKugFX7qWCt3Edw2ipM4X0cu48vJH4AY;-JOG$8|G%nGN z@hZ8fhc`%&d&F%z*&2Hm(4H|t%sglZqlz%x?8tK6I#yCk0QFFdH7TuBie7&) z4!yYQ^i7d3?z)jb{=lPYS+z@XpmHW8qi%j_Op9TX4yHX6eu3-HHDTsRqyw4rPOqzCr>9x`F!Pe3J z_{?~{@(= z07WYu)U}raH_g4JUxmlCgT=#@ z8PLzhY$LcwN}bE{iwOw_=+a%lH;rSW4zz)!{;VWnq_6)Xrf3QNW z>=R0zc$m3*Y%s`i_)tJ$TFR!k{W|2eAw)ca5zV^DDkmEvT8{~#HX(N2%3d+`t!p7e z8Y$Gls4vFZcI{>e#^ALqzk0`MYiL2J9x(;3_;W!fPy+gy2F~n%&52jkrw+w_slif_ z^t>ZYEM{paj3H@kC$Fw)J^4b>`-3_vrKAVid%?q}m661KSw(h*8ZeR{WUE&Kz=@@m zjLQ-<*>#Jn$ZbFKBw$oXh<1DuBZKLBLo!{qt>AV3!U^lnL+ti52t+w=6;(3t_KCNg z&+}`+)?9v11)Qm>HM>Ts4MOo_D$z2v<-Kt*ly_G^&jmFd`7(P?-z!3Z>q=WZqmf3V z(9+Z^2GEA$vtEa>j_p~Zyg+S0pdSuqu77DycY~&f@Ki1@DAwH>ZVv&jOg#7_#m3Pb z=j|BST<4>zMGj50dkNOO4&`jgPz6xJ|Y zkv`5Tf8bp-ABPe$8X_?v372J0QW%dl?_Skislr}L<%<*ttm4Md<~+yZy1a_ewb<@v z+7@9YxB$laZQdX*Gp7pKi^f7!LqDE+9&{7#-_xzU2iZ2(`4Y} zLQdJMixk)qYCbx|$k!6JGXW1_5dCoEq`Lz&(?2-?CMX}ipTG0rq^*>kne2DxzrPMk z>-&;f2pezBHRgODxVJ$cJI0~>%g4ey7b^Y>P8UWvOXe#;v?Z021k{QCOi+b6W+$R~ z$S52}T_CP0!$m{T)_7vwO%pdfm|DA`q=T*U*b7%tOxL!1hLb2TXhpHmaksfF&rF1r zsP_w1<77)C@{=kj>u=^mr0K8$4CJ}&Ghgx#qS9BL1*4h!%0B6=vO0&y5aMfa~S8HmAY zn0ZHRUqxciROK%g?&7H6yno7j9mq;CdXo|6OZtsZCwX~BQ}jLDV7LHKm`bWlXH1pC zr+)IK3rNuHl{!FgDe;foW@6*JtYTqctlurjFc@zjmTGs2dmDuDcOsJVldVvt`^PSi z)-jCT|JJN4iSNDbH{0jM`TL2_#n^O!O$X^+nmd$7P{cs8s6{Ql8v*v*iE&sgLBCz22hV>}-wT;J9 z6E1&X#WwVB#bAWd`>ubLr1QroX*uhJ`3lrDnnqq5P?; zEuI2xafaKlL4ojr7=BYZ&N+0;NAyciTw31m99J!D(vVZlBRuZ3_}oP0L@>B(I{@#2&1c(kn%e zVqY8+t-N8oSO%4VU$Hw9({G}ptkFPFT?RMllx))7(kydO6i#0sV>`=n3!~1q?&b+P z!Z+*z)~B)JK2Gd-EPJcV>r)5}qdd#NeiOa$N?Cma!$Z7kVkz*2&=qm_n53`W{FN=i ziNgSO9^g+bnvegd$=P?A;?EEv$;rETiHm%loexvCQ%=*lxcq#L^g8R~mMe$;#?ZEh z#OVLxf`704+iSD=zrBrr0SOV)Jo3J?Kf{flGtVDBbOFjNU zz?rF@!8r`YD2mL2wzXtRM(o^l+z7HJ-5t~@2BHCk=zM#(=x0fus;g>u%mzMTnE&^DdG%$Bu1H+Jk>Q1VI{4>DiW7; zyb+Kc>>gqWkT^4m-dk#}Ic2H6pP}#YHj5F}#!6%-s95%VL*W3k?Y@ZIT^iWOePS%U z4~`q`Z2uczJjg~WAZ%jSbfs-)QH|`Es;JKHLXJErBb9`PzlsCiB?2d)(^xEYrTc)c z+TX*JVj@_jgu5bcZXLKGn`=r!GcOFj1-FeiY8l0&z}CgURQ1jDcegqHIl_YgJ$$Sc zzC6ZLqg~FS(P$hTB2}idOiqI)=>j8_M2KG0`wbuJZsl4Qv(D*^3x@;6Di=fRA~?&e zTJDG3E#)w$NnpXxI zZYf#tWFi7N!v#u(4rRT@YIJ1LJ|6r0w!)f9?W`?~oW_I@Tm&}a)~~F$460Opos%Ms zAW*miOVQ2EDXjnjW?lS(bFOv$TJ024{(GPB z@`ktt9+#B^Q3HpliK5+%T9IO$!a)>(yn!FekvVrC%dA*>P9kWDj|6OR*EbRcnw;<8 z0t>;sXOq>6u!_}DtzH&>v$i&WGkCsk*^|t=oGHdJj zq51M3!TuA8;O1;3IQ~96bZ`?%!NJ+9_+cWt?(oLw+o$uBD|<}Y58?VhSSWt8VFUV# zcNV7ip}p;S=y;A}F~UyB1z?oSt%O&HrdP1u2~~hR?lT=#c;-Zqc;nEy1RIA@R#RPk zTiuq8@`8PeqA8ew+J4zN?U2%UOS=ZC1S;hyGSx^HKxnqxQ*mY^Dkwb3Pv*@sOTI8c zMoogffFn|OeUyPkT*-h(aoQq?gR!`9PgLa)ExdyWNhJIp|91dRxIbzTS3o}nRn|Lj z){55Kh(1A}qH(Nj(6+p3wMY$hu~QcuGR3=)IDg4q*-9YYPK5YYx;;u=0J1#|*=k@M z4K4GCcK>D|9eD!ds3r*su}os1F{*+82g8Wg!~*G9`p<-ssO(O+?7)5=4A1{Z4cO_#VjCjE47|R}U=rjhLP5 zQDxvo=4dUFQ+i1izlpEd6PrPwN_sEHbj7G~nU$$O$EWP;MN}meieQ(afhmvoe#f}# z*rutR9CKEMx7oJS<9b4pIi`)cSrexRt|3vo>idEC(dZUBT~q3W>2!WJy_?>f8Z*yN z4~5Yn?5RK1vf8uQBvSPvWgeKBr&mm#T9 zTjqj>j4xpy%Mjv#9|^579Qx(ctW3dQW{MW{Vi~kshU+X~Gs%A69PkVO^Z(Kvzi#dS zakCLWU#R^TC=<8?J`d#Uev-~;*&td$o(CkT?dP2jK4wK>35bE&?&Rdn85da($$DwBQ1Nv|S!JIC?-@za+8BFBVSNS~J5)sT!dxlNYs&q&bB zye@AHA~zpPx_OO#FL*l*&+wy1EFoT);%|D=gLRK4{BxJ_Fjdq<`qJjUip6Mu@Etw* z*ONdMirvT9x(Ie-GhFPsdQP@whI?9II=RL&uCjZ7N}CPky`Ls}i(~S!tfR`rzAQZU z`$%RS<68o>M~9)zvR22fmL7JU)(1iU6a1Iy0)xlynRc4tcUNg$LWNK71{XwynEr|I{X?|2x!=Ml*L|rl7;g@O#RbF}L}jLAzWxP~XrR z+2rGqmG(|N5elVCUgm<=NuFC3^f--wAH#7M5(?x~#ga6OjSY~d$EAylu+mBgYI_^8 zcc?gACVHNqMWAQ%_`nHH``RZ|_a-;Fhi_PHt*UDCxAw^sk3M;`>_24#NJ&W%< zs`jf3J|i4+5q932%IjXEah%t&Gs3RvV3Dm)Hq~v9oS#j#ERt5vMQCy0Wjl)esx^xr{rn9 zc1OJQM_3F_G)dkTZVi+->Jh16oR8^&IG#lyH@jndk`WPfGjJDEhHQk=|ZYj z)#P*Byy-4qYTbltVZE8Mxv1W8t|z4&BkhfXuOP)%JASGs270|$IGGf3DM*y^0S-P=a;byG)IWxLsVvCI-^W@SG z+4Z>kozNz$wb}K#-f(;VhNpzF!hZ5ax0IqKCsJb3xvgMxVg&RZ+5r)JZ3S@u-PhBQ z3zuPa3Xb2bh?2t~b)~0IZ|0$O4d$Z}$QDN=^%%RMJF8;t=DxJ?$B+JlDBmb_00QZy zzJNE2Aswid5+fhQRrFVEx~Mlo^!ZGLygobR_zy(v=-G2qd~%bxokOT$Mu8!5!(eeM zr|g{B9dRnOeay97sU4aLN$KY-dWs!f4654{T%F%41~cjUh3j8@DA&E!WZ88kYWWJ| z;AuxpvQt%udy6l-N0@l$opzn1Zj5COBJeNxV(Q86ud5)ml zH~G96F8$2!k{W23o=D>f{Jul4>I=L;^yYZYdW3O4gq3P`waR3&jk`r9e~)A>$#`BZ zA7BEH@l#tA7XZ6sYQB*}5cIx=m9mW=*G)bKLbtV$h<=M5eL-B^%_3#m%1tsC!Jc@M z;4(ywtGMWPWvuPzyEu$5W$Lu!hNtg6Wg?!L!WFC!a?npLo@=Y65B3WM8DV6EVKTXq z2N=Q=u*%3bGA1#;CG2~U?XGi_N9={*ZRk)Sxp#5AXJWj@Uy zq3^E@%x}mO$k{)m5K->m-qkd1zfwFbcAt)YPK@gRI9L=}c`JL8$b1{Sj)xH1Bo0PO zJY?h_ePrPoB_vL>a?Ves4(vJm=R3y4Tle!h2?uYJu_j!?@uXzRh-I-DRS;nzz{`Kw zEcI*&YA+C0h}cV0QtQJmJ(nL9vZ~guyV8t9(M_SYz0+nI-=dN11R;lV*I^9#xlV75 zA)`FWOEl(+H63v|>^t0%jmrXD|3|U|Y(wc01CA2}b=j771#T^bhLpz2^j;M~z6Chf z-TU;oNmk>(9~KQ0d#$cVo82>I$$*Lj_VdqS^|{ZEhtF_!;h4s6|4$J&e>35GLP(vxBu%(B_mgH;3Up?|kuYl(K8k>cJ8`(5=Ev-)qwzsXYB zc_BLUP2C;EK{zFF*YKKqfoFEYkT#r!KYlk;7>L_VlD21aGeMy3>0Gip?CITLP<0+&nHmP9PH(6?#NqtKtV_iBP5DKs9na>-UwOCkW8 ztJ68Q_U~IMNMHWAR?**R<0YU5D!xAwO5BAxqv-t*IrWD$p1nFU>{kaH6>dWUI^Ncm zaHtMq9YT^{89X(vSSI-N7#6duiHs2~?^6xCF4PcL{TXy=00R?F6h&Ep$(lBY>|$Hi z{(VNDtS!|Pma8=-$wc!8S(;mG{s`)2sbpY#xxu{(FeZ6(f6x}PtJxA^F=4UuLQ4jhVHD*{2E zIVGz~>szHej9iq0-!sPnts*w8Hy2KL7R^=yIA%svEhg!L@T_a@Ds1n)Qxeq{qkz?x za|4$Xlg7y2!^XUJ`_}M)k)e&>&m7Wm5X|N#&)r6NE0l*NxNEuP=EooV3~PgS?`X0r z1dL50_C8o2m`TF?~cI%a#cdE{|CJ*Y68Z>>nfa{ zWMM5xvwGa7In!y&`t4_wnfl-3vt%`J;MLh)_V7NsRYTFvb-v@S*0SMArwBVmC-!^~ zHO_QDaCJkQ7iTW$Tgi4OKL9oln-g+1IFlDW_?WJ+S8tpTH>g|Ua!8~hJaYx#J2n^- zB*c_DdP{aMM)xw2xRx_AqCz-mlU73|lrH5|U!I_#Pem{!X-A;H51&}p0GU9oD+|2c zgX>JM2m31~a9&2^DWmPq=0v@aXMp&)KlNF!W?~37hF({Kz*Jv@Df?3TTQDCaz0WHv zhGnx^o6zy)?butCZ`)YC`D<8mn38&qI;YwWdg=yc=VnEcNjk<`jeIiQ-ck{}9xiWm z)7u#5k>tU3r<$GS3}zah1RoWZ$SF+rB{WsBUf|kx%xoTN8Ss5c_;TTFBpH$v^2Pjh z1@kLDEB;%~{I|Yt#`Bbcnp=A;pLU;&|JA*Ic~VQi357Zn`^Oh- z6pZ}pHpMHee9YRzMXq&K&P&}L_r4w~cp%w=^vMWo%7aTRuiguOTE+71a2ynWvQJoL zi=3GaZ7Ndlky2Y4gC$~{-)Q@p7|9DZyc+$kU382qukx>cS-x@aEi}p&VwRmKRau3i z&dNFkwMj=_aMSb+AR!5&y$ETVbg*Va*{Jr%0yIgsnD#CBRT}I_w-sn^He1KC^>{VoU&?H3;lnmG5GufEeI$g`--rHU_r*2$D z{j0{5|BcZ<3VYbz(Xnt^-j-qhjOA5VI%6z)`sQ5&r$Cq zHrOW(hUM*O9IxU=bi{3TRUE7Knc8>H8skuCE!xiDofz! zeI7q8K3ZC-{D{~RlwA_|J}XLj*9Sml0yBU3LtwU(MPH?%+WLi$?V9%5f)i)|HX{jo zKT2HRYN!M7Zc3W|v4(8v-wkmqQZRP7`6YLNxPXy9jk{)&I_>F#-!jpZ*-E=X=?z{q zOPWkU)wd2L4RVWIA>uCxrP48@<)o8E3sUl9sHnlWcu z$c%x`6auU?M^bpa4MqfGJA~|L9+2T2iWsJOEvT^SM`?lvrc&6^{gc4R`_|Gpukp6U z@y_A)&8}3>9@Xrv0b35M=FKdm!3!&!<>D&yqhDFvVhqst?%v+yp}2Auq{Zt zYT5=un>wTXY!fBaiIp6h{i5?S>t*39pHW`;o#>s26qJ-f7&)n}j`*3Ek6DB|u@I># zFg@2B0lj^_=s`yHsyI>QZ-IU`QT%j|xHXJ}AxFwXky|U}f>D*1u(UTtiW58MCztQy{rw~6OtPrs;sx_ zcgVM(4+HttGZ>N$MrRNIAXO;|EIwjrucz`1NGz3*4SqK}-sYB}%QFI>&v9&_6J~{A zUQtSMcMdmBf_+h&pnm_}yVU==*7zxQVYL2VNvkl?%kx(+?r-yw_FwO5peu}vjqIx@ z@sZuC8r>P!y!DH#^089_gX2w|1dJLJhb?K=2>DN;OYgcBu<1P9Pct+fDtRP5d6JphKkBTY+sty-+hBL705iPoCL*ZtQRY7%H$RMvyP;Piz|G}u0~H;Hk11AZKmD$z+(nZ zY|+0?OGJDpfT{DUeX&gI<4_# zI%RN;uP10)b1<1;JQU-h94sazoqFUxvyl<{m|_L z;is{EC0iKu9)*3?a_DGtO=KxvOm$ zg|W4tZuJPe^z=$SuT&#B=)3et#i4B6Ta3S$Y0RtPpT1jYx(LJas5Y zjdX|GD1-pcyAuS-F4|FxRFyYf5mnq$dHc6$dvDNb`uCf-0$Y4gSejWDC+kW>gz_Xd zY+5CK4?JzrpY`>2-&&50Wcmj$@1HB3T@UHdWjc^^oJMKe@f~fTgM_Bgu$j>2RyzMx zi`;fxfZaFJ>Nu@Ge+wK$7NitUs<^H(Rj76?+Vlg_u`OoaGpn*VtY|ZR=2hSRZ(R6Z z`TywSGXHN{%D_3;**qVN%lMJqc5R}wBCDFFGskMG6RmKk0EehK z%HebTtfEf2u{Qsigd1HP0igxkZaV({WT#wF&A#{}f&{`sS7q$J#5&eB+;!&NqaaC> zHedYVVHaJX-XI@ZRvbfG2vgltp$PxG>ZZ=3z*Iyl#<*rmS>*IoT6(ts4j@P-5g&wm z8Zqk+z-h|P;e^J#_T>@R&J$?@73Fo3lw$W_>$0YCy^K~1p-qgM;JaFVgffFMDK!p? z0VU#-OWYWe* zo*v-J*sd@+_#o)UKb*CpeH2UU@5)g~-&5!(w}XX7iw}x*+O3R~;~515_F$ zVPsPSYgVr7uewk0vk`w^!6Xp@BSB;CgrruFD$~>|bu_!$NJ*qE>1;>2f5)X;ONQb- zS#P8_P+M#W2EO(^TO5%;m=fZsx#T*$5P11as{|YCjx@Hz7#zz?RF#->tKctCsKBl& z=auPlCzD>)uRTupk4l4$_aphWI{cb(6@5GE2Nx#V#+hsLynbu-?PGU6yD?gdNd6|~ zo3${moW5@cQNq{9nRpV(g|Dsf5np}2zznRUBt*SWb$E2|x@by8t9VZp3ZC!~VC<3n zI#vDB%#MhE`07DHE4r-B=*cp%4O&HEe|OfV%MDCgWy6H42;?rCg22j$g}FEh)z({&3R)QRoIXY(;pOm|Kyx(4pt-2PVa$; zBl2k(o1bThC+dZwl~85OkOy#^4T1`{^=_jnz6t7J$Uixlxj?FDdlYeujz(RI_IjeE z{Mc7n`npg1ZSkDEM~4%Fw@!lTSz@!%6&*BE^^ZdG&geWm8ObP1FEMXeck8oC=NdYe zt4N$b@R!S_ToP2js#KxSjhW9e(4i_f=t~Rsv|r|6bG*Fh5h@SfL}?)h(Y1=)jI5I4 zKrgahv%-he)ZC+CFUy@pVVKk`ev1*;+qb0CxSHCk2;Ol&c}ca=@km3GO}6tLSa0y* zo8sj4;}ka&IEn-Mtjv+BN_RX0mTp1H!1&T+W;A^*QW}(!7Zz!2}`LCgTVhA zeb&c+>o}17=l%5A7wfV5zn1jR1mx}CYQzviEhhJ2XJ~UoeVr0YPC^sjurq8<$bIoM z+ocCYs2IYRLg)o2$YIC(`Md#g5+W7PcqtecMA^@X7j*m+UnAJNyqGaxyPW|Hl#BSq z2bR4zJqB+gF3eE$@6_T($}6E(?xu9@*cRALUG56R_c!$AW*vJ)Dh}%(&MUm6%a;v(w71X9lJLh(tiJ@{p6hSI0 zn{%%7anEcUGF?=(fwX0@0(+|RtZVe<#dG&COr8}vP-3lKGBU^K4siL}mC?5e-a8W@LxeScPeYK?i{iHBGO8JjKFEoU!uvFs#(=m+`178!OcI;I z%)0Z&{`xGI_)E?) z;hm9%osEq4aCC5yae76dQ9L+-fE-{=`(!uWsz3_aM@u=XC$gKf1^!Wstea9(Kl8!? z{YMb2EyjG~6jNGK(s!jTH|^?u+crXcD>;;~KLa7yc-ZFSi4QY1r!r9r+0YMr@O+Rw8}#we4Z_0?x= zOPFT63|EL4^UFTwV#c&Ia2*UG(}?<@DO|*4BKwhD8vCI$-C=!8BC}OFVLPAN8aGrO z30)H=k6y}9pu*BUuK9G5Uuwz{4o-yi&FPpg zqwQqtu+w9)(LGEsoTdxuVp*B=(nqBsnXz{*J(bA89lE~J;mL4i{A@$Skm>ok@cKb< zcXENaX(G4BzIyP{JZTQE_rPs3sTG|lB_AsjV{yPAtOI~iGF0Bg2E!c$S9xb_B8(w! z|1{rpk0Y?@y;_H+FKr7_*H$w(zyn#4$~%f`a72F#Fczj6BVp9Lm8s;R3=@%v-$BL^ zAubmD)M2MzqRLn#?8-{RNYHlvscA>x#2EI+_gDnn(?P-(4>^^0a>c=kL)x6~644$( zCAUqaA?X*Zu?23OPzJTZPtX6B_!kRO3g+K6&l36PYoC2!<-3s0U-hq^-|>9VUvu|x zp!iqg_?P{4C75`ASE{+rm;E^2Yu+pG-~CRkU;ptKNU0>?ubb`6`}lKghs>UpSzv5f zD#b~h+aN={*hT@46z1xQ z5SD7ltq;l6+N=5yzn~?tb$-KW^k8Vjk+l>Jui;bSLcV0u6hcQf9}%t zphk$qsj7RwevgDlaL>f*w!Srm>oSzJK=X zCL6cVPYfo;NTwMnBBkmK5_rv5w#i+Vw-m9|Y}wRHgHk1NQ?&K}_CTb`{MbLBcSuSn!zBf!t7P$T zw!(8zIG@`wa|hxpe?`$7betrMgxcuXFGED}9OJQB8JLtn-Pnj)Ly+nhzL$t)-gPbk0{&qkDRGk`k$?99^V?WtLs0hXpFxZv zEpBLL%h@s?-!-yxK6vz=D9DO?d9ybY#7gR7DOnYNg$rJ#g_y?E)6_uUOB$RdJ^eKM zt$;((lT5j@-MY&DX}SCPanrYKV4$H&kF)ce!*!o@A}y_kxlKBT^Z{}|wyW!#RXl73 zZq6~ha_9j>fASJJJ~A6H>xB8CwPdCIE}rO}wN;8{_dS05oeA($C*DPR0DxD2o+|ew zvDF?OWdhfV@fd{O&84d^DaZ;;+maVL`*XZoLC(Njwqx{qhuCqmml7U z#U~W6CEDdSEkzn(B)csR>##PqT0aG(6eZ4Xn2o3$C6fwzQ20T;)4qbX7p+z^wCJ`{ zSfRkLlb1{yI}|bF1a_q~YEtq>j#{^0uD(hR$u(9{Y`Mmrv2xz{Eud~f{hLe|ov*_@ zmRG}|D^vl)t$y$i&EP^`ifhox{mgS0#5~rmu6sjaVeFlLaHV1JZcsjtrTMlm$L|dv ztE~EGFW|d?T;K0nPS2RGZ&&d^2PppvkvH=@7cG8I{|}P6z*Fj5WHvAynj@D;#DxpP zYw5Vcp6jw>(lpYb3jcQey)L-;CC?=X|E&@l8To=-6E*F*&CBCc_cSwA%_Vq=*=S*cRnl|!>oT{%7`(Rx^j%^ zb+xc6B?@&S26Z#767?4L*Je>A-PP#;Q=~6@vI9<2bA%QsS1qqXscuQ;=ClnxPdF*H zM6rfN6_53?@$ByE-mEztz0_e0#j&qSvf>`1RlA%FQz#(U>|+~WXnN@>)kLOK0ZLQY zT|F;(fM=3HktPq0zWq4LY?$VnU8%&w<&1!9bdebu6oKXSRFzpR3whmjk z%KnjISjW~;*63cHyDgMKJPoU0fMZ8Rwc>>=v8a>5ufUbd5DIwAah^}y#tw>9;})~L zR&NlK$00#jTzDFA;mjg+m$FWwPCunLQf7%Mr|Ls15Nho#pN97@qBGxO?XLlwo3h_F zHPM~3E%CVcwk8%9{wbqG|`N=JV^6L3TC=nHzywJT2A^wnY73-d8aw8 zmso?ncRqq7Q*F}E8^O)H$|XN6;`#!&^YVlEO%SRJec}g zuqa^EgkZH?cq=THe>BOm4e=Qt&j(1;=KfAn3JpwiT0j2P3eI#2F<0xXruglgN+{x&+#wPQ_|T;qG{*sDCUk}tt$xTob8#E{Jt5Xa8SiVMjGLTWJq21%f#1@EF#wB10F&#KuEJP6DEOcK}jKC^KwVoM!5P2Nk z6)wpfymE1S-Iv|J~eQM`P1?Ft={e0GtGL-H*AN!#V77W6ROaREe%J)zRE|-2BHz z-{D=!Q{e|l!MBonr*x8$eI`G0*&-d%t8=eS)zAQuHvgbm+V9evV1NZ94E>k%%x}5^ z&UOl|c8~wQ<9&(T38E(=?-gw$am6{iu*7Dvpj>ll5*o-A^bEZJ*oJ5zG`aOQ-7QTTl`%|7njjapw4E@NBU)S!a&iX9$8Fttu)<{A4I_Z*ndimtSVnAM*R>68ir zH6gJHs-p{_>6T@ufF08~2Flv_{R0=b13^PKQiwBBl2Yf38RM$IOcI*6V z8J+x6cAlvQ#A|906k!c%4gQ;;uSH3^4fQ_1i(>0Hg#wJyEOhnx>8pd9`0EvGeFzBPqs(I%$TW&&ieaS_eyMluQu0B%xNoQcs?<-~Pp zB)klgM+IUiBWCiZV?ipz9R%BEdRK+DSd~eV`PGC&AWTV3N)!N|DAS@Q`iaLpN~db@ z9Mz}RjU%p?fy@aMZNZLYtQKprubx_(EMTN!y%~{&yqRwNm%#9XeFd_et`6GS6xgN< z8j4a|5^Xu$ZU|KQ34#*YYGqR2To}r012K6@y_j&3E@5~`AjjBY)OnkV(4biq!cL^c6k0tnhkKkDVt(p&HXwk%J_BREBw!fc=waSfaAmGuPfh3|mSnD&5B=&TpPu3$rP5`X|0k+*l3Z=S1C&Y zd!Zkdm>p@zVreqL;zLK2RSUUwBK9Ehe}`$mn;Ox`F@yiZ8G*Wg|EnK-1}~_!|DS)m z7bn)s14=dy>>r6 zWF+MSkzhDF(jI?g8hUU#{&8UXeiJ#I15_6d#c;rpwo@FA*OzL4&v-f@xBMOE+CvvC z$1uPBoyGwe0UYM>?_S5*4nSW=<5=?9vqOm-+}=jPV9*z_^*)tw)cvIzibqSw*jFWI zhIh#&@ep{53s#*jc1Vf@WWdn8i=Dj(mF+IGi171E!Db8yV@_+9;v#nyMOq%d5&`<@ zvIah>g-q01O1MP#ua}0~0UkqmOWz|U0SgKRMCZAhbX!fsNlC4BVYs*lHWKVyFPe4p z7DD9I)fA;JBi#+NIW9 z>;lpRy<3|m$rnRz`iLFfXtFKL+RQ7`HN*N)7aWEnri|TN@pCc6nG`_%50qM)rddcG zUQuKlTk?G9G2&Z}DtXlNB$DXr4rC}Sols5cISVufp)H3?LUucRByoN`h#EN&8f6Ol zIQk5=+prNv=C!{#ineqaV2Z6mcAe#%;4SwmEX)9oKQ?mQHJHZt_0bEp=}Zeui`?tn zBeWN3;9E>_AG5cEk{fO|#tYY-wd#NGmO5ii2t{3cets|_q#*7<0|4P0hFplS^pti6 z*V2UYu%cT%7`9@I#n*yw)V6sB&>e=~utfeM|8TA^6aWI*`0suriC52BG7`KKPLtg@ zJXA>2YUoB%$B7?Y|LvfsPBd9dQEp(H1@>?ZOLNXaWQ3-}p=tbUuh!)b@a93XSAL@7 z8tulck{o~JfsMg;KuUZnT)@9-vJfhx4#qzxb{S`a>kxR6U>eS1_PcVzG09q@8x=YX z=6nKevoO#?=iWlH?Hs;Vc!`n${SomwT&^$?d1-4Pa$TUjf~_RQ$EYI|YQl4-1WAKj;kB0|uX}(ip*riH0GRELAd`wB5MF~y%%Yl5OIcsFxf;vrO6%+`Oz-8i z-!x10WUN9*G?@ExGG`%?g^dw=94dpS!po$QM+6eP8O&K#O~ zN#gysC?T@hX@Ccll#9P=y)Kq1Z-W5x2&4b~`cmpAeB%?~mH7RXk;Gzg1AZxM=9Re_ z(E3He@O0+ElbUdv@K;XW@n-_h#{F#O-~1TP@aeqmi|GJ6pp4^_7NWAgspb9f8l{)BzDD^ zl(R{m6~%`iF)~qh(Hfp6MA)Qa9qoptMU=cM@6)Epd1 z_g}G-qS|&S{i$}nPfeI_Ak?VPe5h_rGxm3(QcR6lKg4x8hbtAA|bVZ(KLY z^7ZII+BwxD{&5vJtJJI1qU{K*bU`~x`%pBu5Fbv2%CEc zT7wD6%wB`S@!K)v26tOyQoZpVw4{L8TpqkV!2z_j4Om7>V-~N&l-6M|{3tmo;W*6M zCS|M3mMHvgv}S`erfK+^loFT%0B?VUgPJHtM?Gt*3_{?bx6j6s3d7V3IN+Zk-sG@* z-89BJxii7ww$-LijmMor*)34VG?GHlouHMv)k^yCsR*?tRPGM2#p(`Ak-IvIOaecT z>DE~f72t}NJ)E1q?J}0Ky|OYFG|nZ>GTBGC&iyVLbJ47n;}9U=I5;$nO0T z6kWq>c)Ot+rW(IAXaBB9ZnnJBm^tfS*TF=m$+2Q^R^}QjlDfvAnayJRQ)HD6G|0b{ z#7LCkWf_}FF$f9BNb%xv_cYhLw}YxJN||4jEtDDCk&5_(6lei31BdvdjS;Gjm@;Gb z_n2-oGUeHPn)dyAQme=e(#19C9v1pOwt@}dwPO$nM?>t$2iSenbbXMzDs3fuCU5NQ z9R$Z2J`~m@p{jjU7j~Pb!8J`3x&qTK&nv?--Q=QI60OV zq$Zs4iN{JaWho^Jh;3dy|CZ5dZi>a#FNtIPHKOmWK+( z0S|TozzsNPhas=es%yQm*?MF}u2@G3?=}0>j<>_ox34FEP`LH~uJ`}*?|-fxeE1?z z8(kMteAn9Vs1jlE4MaG$n)!Oz1pla6<7wLEenf2RKzgx$7T~aB@BMIko!{XH+eb1@ zlXZeo80vWFL!k3b>`i6Aa_i^^(_FsU^{B_ zGV)nWhX|>d>^0M<{-F#kf{O0-ST8!Lqf6a)^E^bML~Jz0Jl33lM5GRqo41}TW&wcv zN@H4*s}Fw}-YZSl8tB68Mkv+yhFVL5 z{)TR-oV_Z_RicB~CfdLnlQ{~@EnWl_2>w^M#XBM;i-OLCB1#u~q#o!no%H;Y7oK6A zL4uNSe4&VVkbY0BF4^1~KCE~PCHKU$Tw}q zrYY+7WJ-_otEe7qyv_7#C1$iC;}D~a)8&Ljn&&G+tajeH9B{3>B!wCYiqPwSgkaKJ z?uUVG|b_3?3J32w!Y5VsNQ=0>L&1ILU-hnT^yDoua%M6!&hfnhqP$M8)uyGt>Yc z=e4omh{3dyg>NGWCXIR8G$QBxGd%K!?8=FpCIWzsfh8FuUF>u><3X*MG4$hbIK!R- z(=;+Y$Px-$e;*^sxk$B&zBOyNqSgpM#Yy{U-j_*{U6&2{$5i0nHQ=o4?SJ}%4|p%a zY4E#?%D<7bw-{ErIFA3CR$i({cQ6XJ)T#$RV%sZIHe8ASGb;?jT7Tp1@FebA`=A$6 z-+gGii?P`fH+&PhjU*t#a@u>k^9#H^GHkPaV+5VZEp6=`HW4Wc#dDwuJkANGc@@t1 z29Qy^HJ{rvr7Q?HEc7PEI}@5`F+R(c*eJtItO5WsK>t$sd0A-l4Vy06mKf{XYNMd- znR@qa=-Ge*i%0*`s-h(~FswNeDx_gwDr#mmdJTI@nMQOrq8#<8HizUfHFnc5)ehR! z-p=t{ROn>0hzodBC*C%kR+n+Mn`i@(*0$fKm%uwlnQy&b7iSk+Dehec%~dH;i_2GG z>w*Jvs%kR#T?YVRMt*5M$zSBv1)kAzuQviHbQ8$#hcwJ@}0i z4ft4GD8^e5leT=Sc!luFNUAoOz=iZ^L&Dq-1%e@Y1@(BV4%vtNWOI>y&yRySg)hR9 z%7ep^wLMT01*F2(7|Pw4zT;cD?y=&lVAT^=z1+4fiQ-?r)@^6D&5FYV?T-$@rH~< zD=<}#H{w-IxS1(jf)1^3)UK%iH=A(;f29x=Ol@%RscooarQMQEyMVvu;(nxHPf$P; zy{y*kCnF}7O!qEV=-7-gSY!V$jr2dWn(WD*s;jIycURJ9>U|v zuB6KIgU|;?{+iR)g#Y1!vNeH2-BW7##BB#2;hg0T>%xlG8mV*1y8EwDA`C8A_i{Qs z=(e8dAY6{nMqO}Bg^%-MM*wxq#yIP}*eHk6lY{cppkp-+5Ac@E809A$|B5S1dT=#e zq93)?KL-4oyI09F!axz3S?*XeNWr&wKy7MU}tbg@>8Hbc=vD7oN`#-}z=&qS21 z=~{Kwua_Ihgp|rASvkl>{ied0MnIlOV0_TwhxN@7mb7z+%bn=by`y=_{{G`Gym1!) z%L3>Oc58H`T`|}j!@evA4ZCSCm|k@A@jkc$*>x+5pbP4BIy0sa$31%J~Ma z(A_Z5uG}X@MyOrph3VCWGEg|{GXpAMJRl3}Jh^_0c=BS|Cxw8C$ML}cO(|e^h?yW| zsr%VwBlOXL267%@r+&5Tq4%Oz@o1~vuUDdMAQ03%kdqW#|6zxdQ?{h|&XL0U2JOBq zz>46dLAuEEIi-{vap?iCs(o_>nzj$ro+Q8W%P^-INSS0Qos>|Z4+<1idE#?G3JJAC zUMGtz1+-_axNJ?=H`OqM zmbO%8IP>=`7E18G1owOKfn4k|sb{}_(6HM5of%_{3(d|jk(7$@d!pZap+^QJTdQds zWJIhXqf7f>^g@T&$1Jkp$AINnHR~By1L?XN@vg>qsw+csWP>VbaK*U6Nr;&Y1lY9^um+MiF#bUyqOT) z8kg#25@pr>;74jfI<{+C>&fA`y1zYd1!DwL2IL}tyc_7--`BQUguNyufl9dQ++QW#Nyc{0YW7E zm`C@oA2Xm<=SxbgV#LNFn&)HP^>=M85iLI3`vUt6jbCxuE*$&GW>z^anuesh;tm}v z6uaoutVdMN&*`wEH-Fdwo$k2{# zT|HJ*Ef^FUisoKx51)&PvgCHw(XcrV$o)DQ00^gxC{t-iGmW5;lGWL2t5oT{8yWAf zSHVB6FpXh7#_6%=kIWZimxJCjfDl^dHvYb8sxKXA^I_{ST5G?DX7R;B+G5j6(ayB; z*}TO#6uE|+BBZ&k$YN+_HHNxD-aOrZ6Y{Dc-6??&puiHgKxpJIxbws$a&-2uAcZM2%wE;%519o zw>r~blw76=Pzn4uXcG8n!^`+d)Q~iW^d@JPIMD|5(Tp}iZ;E7Ae_G{1QtYw*I)q}% z<WudrcC;{^^00xd>U&V>bnu_uD3Am$Eh4UoX&>Fh@d zKkLPDL{j}d#gw$HvBNWVZUO4rQKIC`8FwbYF#W8|Iw_1YY?zmRgK6iUIoe_IM`xh` z(JHL3XFSH{_D>(9ZPc3=Dg8KsZAko<;fuy8tJMzyaim18ZJ7@Lbb`L4udv+y8vAWD4Wd&EA#9n~M&o${w)n$r{`x*eJ z*WW3>E+HmW)b;w+rEB}Ko~%?(Q89< zH!hSos2?-NgArxumDKZ+1oWuAS`{Zv^y;cWmx~Hn2-uy*=usZl@n4>mahCdG_;z;_ z+6?KKz5N?E3gNz;d@zc4Dn3%lm|llK^VAWW8qGf(#U{b=<_;m))K9X-qt?}_Q&5;r zZ4!nr_>{k)QO3zbhbM6{v>XunrF*c3^<+=6M z^fNgmK&lJw`E=w%1cwUL5TUnXE#eacQ~8bVg08|v3Zd3TA>Zu zC(~6JVJv&6m%QE96ew@`aQ$sljIH4h%0p#AxPNPhL8>b?r&PDD3)x`x2!QmE4(nT=~}qfm)<35XXM9H~D69a##+ggg{-#S7>$ z>@4{590TbtX!mRt`JS!QfAA@7BP!4qFr6Uuq2OHYED!UvBzBPLLk>vw;yDghbRU4J zN@^3@m4FeKXSS_H$FR8+`vnI&yXoRqqv5@B(HXed$}ucjj^)v5y!(4CHjA!AyBL~- zPFg){(WQ?L1>64*P(ZK0p~NwUDPNdqI)5!`oLHPqb930)Xl0nSjs0u}3f?6$;kkk; zVZ6#hI$RV95+vkZqM;UPG@8+w)f*d??JTpBNq$_ZEop$)z&&;P68M#Ek^i*Y&uc&J z^DO;sDsX=;kofCn*2ZOnWUqy@|J!hTM*X)}U=Ct)0Lc8#UY`D*+<$xEXTawQ{2mop zSJJ;n?1vMR-Tq%Vi$5Ix&rJSs71*bxWPV+OFhq5${lgXKX(hZTcX!P1k8>?@F6OhJ zS-z=>H+Dx|Jgj^F*IPCFK1IkA5O9_ua*>LhZSMxJooHDmpq8i0Tr!Sso8-PUqp(;I zd6>|1O}D3Cxedc8r0xSRWNSS49*}4TQLvdLCaytuhPWd$^#9pEB}I z$}0=-ya7hfKk7Ms)FonUGYx=SnxlXiV>%c~t>BuZml`=xSA)K=E!`olH0_Ec85%gH z=XSD;QF$z!jI>zdIIrg%b&yiuvF_N|7HV$^S3cc_&?&(mi#W5P&)7JLWoSFau5U+p z#8aiyl!j#YD%mQB`=KQLM5(>RQso*q{EoY#1aBp+w_)9b)x>61xRGagWCf$RR`2G{c!nQbq%JZUzAdW z^PAj-pq&xt=~xvZG)$DNnuAF!V`Q2FQIKV`$wE9sMNS;XZUlP{EZHC)=2*kGuVWU~M$eVtcDQ^^_$6#-YGo@Jtl5Kq4xULxT zZ(Q3k$7r=&ZRfHCsY_DETK|=P`wI9s^Ada+nto*kB-Ye$B9@|Pr+!yU?oW+K1(`Y*F$0U3w zN1Rr&q9hADwk8GSO|V%ZST0}z+NUGJnxAn@01X0VA=1WW5i{Z)oD47IRrY~JI~mqh z>#d^GTtEp9zE!1>r-}0~I1BfCbg=3W9pX^t7B4Arq0Sns|8hTjHHsSMM zxLw!(sCF|=(X;-GAPo^z-7er8y!~!QG&NB1O z!0v$Y8ZyGkTECG`Aj4&ythh3KZcnTN&OX!pGwoPeKV$W{RXQ@6lX{vun3H%h;mPFBG2-UVmU}7jw$E)|E)PE!2e{j}0C!d;2$%+{VrcbvafQA{zIj zrbQb9-n8b^;(nzjfI`gse6{qRGk}8{=BO(4mCwNBlcN)^cHYyDM`P(i6Kvq-n7;v& zX05cj5FB}HoDWaeU~jJ|<8GBJOqimtvx}JQp+enn1Zi9e2gFk3nL>xMz&n*x(nPey zBXK)5{J%K!|1jY`T3&Tzl+_U$vdJxd!mi^+RDsOb{`_BdKx@Ov7N}xf)T%T#=@S+mFan>iDo5=6EEZ1Q1M{8p$DLE8~e0+pd_iC z7q(#Uxq_) zizWEd1EGP?d0|6{@ZI5xpl{6RAYt3Jo&GWrW_|l4{)@nNGK{SeyUmcGdt|>>DK~Qc z%Q5nV4yNBII?{~RO;X^{gp)rG7sLh8Fku5JEdSWXHC-V<8XWXl9m^}`O+mN#t``M~SDb6QEyywrxCUED z-PSTNgWX`^7kU>(I+A1?+0^r<+Pc=po~^k|Cz2A0*>KIAsq6P1ns^`P=Gg6yKlG36 zcBbv@;Z3X(XxzbQVKJYH9{UdO2s6!kcQ%jc3sN+u#%+MKO`t_t!)h}Dlf5Z-tnr%e zPP#xL>8nku-G<)yMLI#lle1g{4T0^IapUQ<@oU!*a@Y%EGaZX0x5AcpG08h8tY8FS z(sIcfU#F>0Lx=THhy^Bt+~I6RoCFGs-MK_eb69PhVb3sI%!y#%VH43bb?bSf`>AQ>Czyp{9edrS-2XE; zkxQ(mnHo3ApuDjzE1Xh8ue_GT!}W?Xd@M|e#eHw^T87H5en>Fh?NTKL+oV{sry)4J`yc{Xd1JlF7aj zA?sd<(0Sz#nVM|ds>x;lxO&Yl2WW^pv9DgedHv?gFTeca%@_1PeEfjiKXt+ka}dvu zDc;#j9$OtYeJ0dL9He7W z5t?I)v22c*u;6v{-p^a**~YqnNJNc+uYH_awEZ^Em>4q@63#FVi=%u==z2H_jji3C zI4qpLt`WPd%n64iL78PJeSA-zFWqtbjR691-0Har+m)tIrs}xTak!fd*4Uk6F-%f- z344cpoax+;BG4IIAwxJ(>xn-anwe_dB(8`M3@@c zYX`IPxm={I+562Ya2=wYRueik3d7>r*GEG6--K;RRDH9Amv?MOIGgJ>w@6l>ciLLkdL@BAgH z<`;Gnat3kM(NSO0ow;ovi$-CKWDo1&8x;Y%+z*I`sH%>;PuLuo@U}(%^7U)e@om%Q z=((wjlh@)Fz>?^NbP-3=oj!rQNW*|F4g^}AO7O9Edy7HP*=R3nr!*$2+A>kyD>JVv zcH}FIWgMJtI`yw!zxv|M8%{v(!Mt_LuKWR~Mw)Uaya;p_x(TfO3toOxnQFE7un@1^ z$xaa;tFb$`;Bid3jBCYA8tUi&^vg#bk({R$%Sc|VH55iNOG zftaN;1uv{nKl0|N>p!`+T=pxu*=9eg%DiKfjYl?zfQHd&?$2ee|PGf2kCw8TryWWf$aEMg?tA()Ucj@=k6)YK{$GvPisvhc1zW8<<(5{S}| zcNnrCD?4UE!x!w~rM*bVa7f?l;2v}pni<@dX*^_AYBL5z8j?YvA3tNtXsSLF%D92l z9c>sM61vL<)BOJUk}KyNwQgXsv_wTML<&~|1a+ppE>Xl>~YI1-5 z>NWR7KYl2XTu}OAK8tk6kr28^Bn<+nvV~d_v*_;3Jgg#U?{P^}JJvI^W0;{TRJ0My zpduwJN>9N>*15-!&M#(RZ9bHX5$D_2PO@G!1n%JLLsXgs*V-oHY&vLR?e9cn%OD@h zON7%aYhwzN=s0%4$izNUNyMoP=`KOII8fj1WDyC%$vWDA1?b*}$8#Tk{E&xq$?l%P z(-7Rl3KUqj{})Tg$<$*f(EuA^GMtosao%Y|-+o^~&9>&;H}B!e-udT9A~+ znzXIgU9PR!-Ve94rWl5#HA&ISLr9KV{QStrB$((Ny=wMw)A^OmT)G~NU-@W`lP`b0 z+3UePW@kmOz1Rsq+BNlu@VWbNDsasg`(}QT^xcF`_Xqz=k{55j`s%BTbSA!2i#2vE zdd^-R);PXBjb>qHDSI6ENdAUGjUo!Fp(eteL$yyPp5oU;h`AOR{A5_U&6Je|Y(!_kmZwKb7Eb zxd{J_#k)=5D?;O7exs_0^85ROL!tKfzUebQtxBllh5H?Nsl zBQnNvqsSZS7D7f6`Isdb;Ca-Dwl!RjFw7!T9K}lT z_V~Ft#51VnW*2uKiTW0e2WKCE@LcQl$Gj(ME<|j|tkp?Ml2QV|EkBX~Oh~WI?M&GE zYcaE^f?rIuAc8tO`dE)>8%cvo5gH-E6!HUGUs8&0#X%V>9^^QfQ0W2Hgc3Q>%zB?; z85v_PvErrQydxfr(l5OlB%6jn(A>Bd9d|6vDpNc<^H8EiW^Lmva9&%4HP;&I)mYms z%=I2gfpCrYXiF{blP;<9L5DBCpl)15&M`T3K7Qm~ChD+q54Njfg;*V4yWZ?H9=$q- zYDN_$&t{}W0y$hkI-7f2`H|y5)p6t2U=Purm<@!4RqW;C8~=ahcQ4-Ya2f{e#rsT@ ze1%zn)JtZfEw-vhQ2ws)Wn8Kp5~54`%s`FTVNa8x}SnM|=P7{kwPXiYY03^2^Ckr06>i(i6KpVsZQf83yCC)suneo3Pz%ywK^C z>;lSYg-t3cQ&Y$m^BA>Op%LdWQq!4=l?uRi1u~jiHEe0%<8;&~(YIp(V=Rv7%Ya~v zaAG1@%q;ge9H)3!_~mO(P1?RjsZ~7o@*9a*%cwul0vxHX|T=O*{bpIJz%Dm)9#=C{HJfe{)QVs4_okY z8G2F%V-K?yW}R$S(8k*ATwVsCdipn`TwZoq$?liJB-p{677cCCakwFks#?!SG11E4 z;VGN3S44N&*gv#*5u$I7PAuc4V(oOh-B018>bDNeGV9q*SMNdihFPpDw}!&-b9mOtR1BvHF{@zxw*an-4tD;1Y`m z>^zJ;)BK#(a^L0%^}xDW!`ZWE2XFs@vHaKRc)aTJukTY^wE_}--kZjF&&M3ALB$H z8C+f79OV(bM=(>O25SXeCQqct;g{Lu`3K25v#+^#^nKG#<&&9EXNv{Odj9yMN9L~Q zFTOy+br==kz2Uu^|H9O=7S6*0eI)yV_Nm%H5xUFdL);X&tICUe_L}1+nS%GY(aN`9 zn5t^Lw`C%Th#{7{-^z(d2aguDMfBCuZD+Kd zNC+oPwHG8Q;^NRnD;oTGMtQ&Oa#I5JF_X3>ykyMos$G9!(Z(PADo ziaSHZ1TvQJc^EWNq^k9f5=^lW9zo+1vS7+ojv1R4e0z>z>@u^zR-7s^> z5UY=4V#%-zVL)PFOgiqi(n$uwjE!AehuxZoWskh+gm;T24~7_xMQHdIW5;pR!jw5& zu>W#UX27nS95I~i33uL2X#>d#9kdBy!K(#54|={>f-&Eei}wmYMUF)}ZxmpRLEt<5VTuO0$kg6q1kQ30*+t z8pDPsdEN^0{DX+dc_WP{xq2hoyU??>)-waB}|+CbhNNVKy*5zm*EhqBsd z?*@duKIxv;u{vit`AXG5R|}F7I~{1*B5gAir_(Y$?UDpW9+zIc;FE=XeD=#PzvAy3 z{P|CRLd;Q%OAhb*wShW@R?EefxQ&tS#7zw~huLi^8Vu*Ly-i>#^}>#$sUJ1jrsCaC z67`5{xJ`B>Iy3>r-Smc>>zR)@eB_#$w;Xy>VxkR{%RI}QlNO@ENzsDS8U&wBkN-bK zG8C4nuS83BYsn?koTDb|5*IkX@pbLd$?<>dkI*qnsKCw87p^jR!;CW#v;56B--`M2 z<+pFYG5?o7&uOO1m-O8+Fxz45XN^^2fW5z@AGJOH73Xx1a5gQH(A|xehk_L42)v># zn+0*=r605{B`H9%*Z%nZ+wZwY;MpZ_`V-Iy{Odb>rC9LoOou|x`#Ga?RixXUW@x1X zicQIMY)(=R5aSwQH`^v=V}I!Bg0DNsJ==9t&LtQ@D+a*f7vhUFRr={3e8tj%rE3*@o&D8d`w8O|3;r`oH!k1dspuZcKk{;*!1 zXu2S!3=g4_>$xSi`J52xn z_rL$}!w+1Pc&6`}Z&%^Yabe3JY-devb7%T#RD>1JvrGE@9Ftcid`PFK{0E!MXAolr zn`zETO_>qN<>B;e-YVx(yU(d>^BIs2(qygMF5prmUuKZ(zW(Oxzy9T~Jon-Lf$i%L z`9o?wPj~0fg=`D;+otz<`^&Fl(pB2YB^%Ze*lIQRmx4y65sCQ;t`&nBVyQaZ=;U@@KLuXeM_2OfBAJUzj%j49 zc3Wv|G9hftwnQH$k#vQW{5EUQ4J*$8-f+vaiLtz2*c>(qEB?C37z({JU-HeIJh1mF z;^PahEA@K$;!of56z>HW7~g;Y{mU2MA@4V&<(3`VvoQ^)yfY!s+qSdV;=5@s^3AS< zFWOpXCz_ShnFj3YT)oC<&F9O4+#*a7eEb-G;W$JAFFuK|=Wd z$rI%HisVs{m5hYs95RW-^m6LuTAKI!ndJ}fKKRfo6a>~hPrGy@$G)X(oYkxYRxwUQ z6+uWLl6W(Uox151fnh2YaYRkC+uFDt$hF&QK8{jDn-(j35sVaWGT_5^e2l{jW#3$R z`SvCE2pH}_Nzl}-Z^`QvPdfu*wwcYSg00Cc0DEdvihwX|ONm4;olUvvCu|`nHN2YR z_{+*T60zVB%hgVUY&Mk|u?HglrpJf8Q&nqm^200pHSLgyA3V0Ex?1~pBu(m${bUOG za;Jlk(z6wBM~}e<+4hZzJ)Ln0nwGfitaEU_0pv-)59PgT=8u-}sH@yaiiPb4cV3Fq zFxjeV)@xrL<&IVPu16|n-DrmmqYc&0Y?HWkXrSd1e>ce4!l4XnS?#D&E467Y8s`^2 zFxoc@xfA5dS%|$FH`};~nVnh=D;*@QvnDFCTdiq|U!Bz!dl( z0@pCxK7aZXpUC>wH`BiQ@_+xI|NZCh{__3z-|@63*BZ_{ z73e#Qp0I{5uXmb&v@C=~G=5T#%caeZ*5L+Q>5SD>KrE9grr_#>JBCSLXKPYRUT_*$ zMUkrw9bW-_`5*uJA51G3&mZ|t+6!(4U-1=MEVQ?3BCgl?&e~SIX{)eJou?CvQCxj_ z#Bm|dJnj_g5u-z8DU(CoE}O++67MjUDZ1pX16PbZo&NSu-~9Eje`T84h54f_%%ks* zl2N{2NV0if%>^{ITkeg#LU%fv(v^-?lCY8l?JD)8Rt}XjTub+U0qebTlE#wIPs{pl zou$bz0pUBs1|u!5mie|UANl_0cmMqEyYIMc_DhGQN4_rkpf`hsj%=z|$!s;Epv!g- z>12|Y@v!M|SvyLoCeCPQQA5eEL@JrBDP9$*S5@{CnVq-Ski3xwa}HQsHaoL9*kHyH zjIBv6K0OK*ISYL1m;{qr4pWA(9K%*3JOKQWzxL`!38z_r#V}#Yif~sHYqTB}hC>7G z?GBMp3t8YD<`V86T7LV*4lO}0nQhTdB983m@b0)hpdn@9F2N5|e(^>BRM-?M1%E(9 zeKEK^dDtpetxTpPm z;D>Ct9m(>MO1{ z`pyD%_(hOMWWdC=!I{+DvFAlGLZ|pnyC=A#e^A4{L}BGAHDl(NXY6#j=Y91(3)lZt zsI=dF|K0cY*p2f>iwE&9eHOrTOj>Gw^!fNBe*=LIE91~SvhJDMKC993ZXqA?;xAhC49)Xd-x7ZJu5<1D z)AZ-M&+GD${B{NSy!CKq1eZ^;+BTsdo7v6%Lx1eBeH=i(9K>louIl-x&D^6wh0=9E@mFV|M1cy zmASut$I)qQ2zuogibDL#OZk=BPrX{nc}kM3=Zh)1cil0w4vhh&ZG&bB;b zPYnU$cp-d(l_6U{D$k2s4U&00xmIGNO9@c6%`^qXz6^~UdahmO*l+l=tWcUI|1>B_ z4&!yK3wk{a4J0S>E*M}~dM*WO|kJRZDEs18+<*h*$lF;EZapP5MEStY^ z6TILpPgCSz6okMj($u&~6Q>=ntkBW|X#QcUZRP2JPyxbD3ic%#_*}2m-e5G|bDcV9 z84CH{R01__1Em_)F{zEW{nBPZ_OEiS-J!nK4zA(q=$5w}xN+j$f6haEG@j?Rqyz`1 zTVmSoomnvHr9A6pux^Xv;+)b1#E3}%vmhuPZmOnUGMwp4Q0teB&uszEZM7O^DtQ&% z&BQ`M+M5ky~eAoOk;U&pwFZE!6*Y#O=)dI{+= zk3L4U0FK{-)w5q^{)M2>` zyR^k`tep{_)v{28(L-+%x1Z~yt9 zJfY@05Qfruc5rf@99IWr!;MWMfxg}k_ps3s z(&5%d$9QQ*hr+~T9Nk^KRIEMQUO;k)CU;K}9$xcPceb>G1gC;-cWCiimTpfY`sKW8 zaw<*#7#F63d(loi%Qr!{m?-|-@wQG7V3vvX!#Ce>jN*2mKIdH$)5DVSk7K!|&Nk=V z?iW)(V8u4&q~kp##*`}x@#tlrqT@MzR*s-CNof>Epvc@gt_We|)X<*eTE$0QIyr}H z?Y{=>z3h@z82{q)U;Mq}Gr=~zQVY`vS}eNKCSu-m>h(`@Zy@awFI!{|1zGD~d4{G$ z&l-l&xeF&yId4aY{V7_j*8&S5R0_dUNSdrInga?F9w9Y>lhdb|@?TWE_kW6h|K>d(=HS!c7&T>YDormxk3G#J z^}R16`wN$aeP?YSXb-5HmFOz#-QtEbyzJ>roc{oA2LnA>tL}hmXC&b}8pJ zvWe~;{u8_X?^udsmHre9cktDkJAcg)0asld$2q8Z1(ur%GG2zhKCDbnX1~7w?$tZa zP@JbZW93NhrgMsXAF}qs`1KKfe-9_u8xYS-UQ+Y_8h&(gdy>A6RM&F2$NAoJdYEgV z)5OGaSf(-L;M~&_)5>)=SKTgV?S3ZPd+AqzM-R~?-?ecbv6l+lmRhINphMMaAYcu1 zqUB}tY5&}6{WGnsFv!XGhc|w>fn<3iz{!Z|oUw}BmNQxmsQWW`7QcP>pYR?xo{6x zA;3w>D7b|HXiBDqrF}b?gibjr<^O=yHP1v!zfiN)MtAcAU zNQWbHN5g`P3?|pbO+#mtX{n5fF2RzoJJ05lWD_lh5gNlly>=~`4Wt4htPdV@4NH!Z4Q7!N+)TRJtq%|m7*rhU(P7oVymH9~t@A!`Wy@d@zLTS7Khv^=)OTv^8K!Ykooo&XDrdMA(Q!w% zzkNkYlaF5{ea9J?0~9m+&wu=ri;eHT`zN=A9KcdeRw#zxaslQy(uTn#oOsz2ceu=* zTvBT7YlJVIAcPqX)Oc|6YN0|jmiE#vqubaji&PY~GF?{nj#1$wu$({s$T8WcwtPfs zZhp~C9QgM=ycE*R)B37+P6e?SDhX<3*u2aVUGuOJu$Xmjgg09)=q^hm<#6bzGoVLc>9+3UAR5+iX&IlQ?mNVD69;+`*a*0`?udk;I$-J_>JEWyylCaRGXqb>&?fg0}Ijy?#NT+&lngbl}K22bbc zdyNsH2k@1!I15DI4%y~11lq?X^kmT!O@qe44Vd6~6?e7*MmrUt)V>REP_K?C0J9!5 zJ7F?>wl+jzU0Q? z_a59$^wM-~9dW|M>DtpG^MkKmW!X zOD68Jo-6hqHh(?#gYh^PBy-nv$j+TN+da4EEI%0bzk*2DDck(djXH9{%tw(}#T=H+ zHy?>^ns53ohc094LM`}8BByFxc$UYnKYqIQS1@=Sreu^4fRHkTsZNjMbe>qUXFjvm zId$=W9{ASb`=@aT>ym4xlTjvjmN)M(t#_3;sIkt^n*8P!_?P#cpFX+Bzp%15oqpBq zRsNCW0(9jEXn%)YyX3E(tP{<+r6GsnA{XyHTpHU57U-xU;1Ys zWXF4HxiR*NvyGMTFR!tIcrY)?)Tw*_;{D6_%q+|}GW#Wue5Pq^nS zW_xeiwYp<~WxU**+^eptHWs3*llwp><&-!?UdL=}pkw5_BdN_W9V}n@-z~p-_u?IY z#FAuv-}Go848ECVm(LRgaQHz|+xE)}$yhGtm;!QR32Z(qI^5#3x$TH`nbfw;*%^yl zGOJ1`r!w~Z^9}^%BIAoU?^)F(>t#kR#SER+6wE7?H2u{V!bc3dg5xs6Q7K$t^~hvZ zG&NWjcZLh^?(4uMy66{tD!nCP9A~FJjS(sHKB<25q|)fVw{$}@m~2YPEGLSf0Uis| zoCij=UOuW%ND(sUdZY5N5!g=dhTMU)vL(*EM41TotnfPc9Jc6<6_+wdU_J71%lWfg z7x5X!hizs}`_U)P;$T91&f^}Cg+x8gZSo6<9vU76T#>eagF8VJY=uq+ZUQx#y8NUO z9)?N;2?tOu*Uvuw;7th5M_9sZr4ySF)LvF>1_E6*f<|GX=PklSn7 zRxlpg8*NMz#R_amNo|X9yVPoEVF<~TM5@Yyg6cXqq0rTcS8HZ>$=-y}I8H<`mQx|V zoer~&m3F6m58Ej#`qLUcgvFyR$+8GqSH-59EU-_(R%Lf_BGuN4wcnzLWCPts%EfI~ATz+W_Hq*=F18G~4c&U2?dPv#vmf0p(Q|9!(h|M8E1eD@EISFHwcIYTgls~>uy z?Kr!m^s`!F=282d3|EN+n!;E^M#)QQ#Ef{Q&VEgBn~qZJn8!AD16DWmwN%A56}Ln= z8M(fU=GDl{_nekb*G)k0qt&5;_7nB6uv?_NLZoRs?yYOpE7eIshbRh}Vw!BI2Ti66 znW^I;bT1t_3vnjmX7K<0|Nfu1yfN%i!M~|>>=XrRhz$|&A~%nd)|=frzif=>y6nUCIe{U^d~Fky$+>`q&>cNXP& zu832G_fEEQx68WuY7@ygCU?WGSZJpjsrF{2`>Ng*<<-mn@50O_hh^lw&d_jn{Nv&Q zY2Ojslxk;qJazE+|6{I86{4P+SVH)8)Ut~TB zXU*M9crT25n%_}ym+dc-Yz4~r7ma_%#QufQXFBxz2SRxFfa8mwER6bgem%lGiAGvH z)?fv5O~p4#$?5<8$N%MAjJ(I^Cy`|?bb$Sja zS>8qC2l^bfSkEr$+~FhfYiBzzuCYG8_NjqYdgb3-_6Zk{FTFh5R!^dnb1-S{BR2_> zZ+G=OE1-3JjA`FVWDaCJn9f^hE`-Pa7U_8q-b*w>uUYvEmM{F02XjZJuKTpU=X9qy zzt#M1R{uhPGrRll8#!j&6@27xwY>b``)B!&(=)xf&c1Hu=dS)f*yEFU3os=xU;l@T z{80n3&2AV7bUOqXY0L}@x-WDPRS|FVf;WA-(NqP#F2R5K2={!C8 z@#X6uU%&ssk8yl_-D9&8-6E7NTmQdOFjD)3&xeB&r$a&w^F)`G4LXb4GLHQ2Cba9K z&$!n$fO=btI{MN{m~Aq;%<0u@`%cD^#Fz69e?}dvW`uGPUc|)Jke@S!;t6j#hGz^I zt8Cs}uol~Q}V!(U?;zhGNZxqmN?X~FyhOH?kP~mx&;Aq?m*RR7e@W8UzSgxa0ehLL~>~BN&>GK^+huVo&7g%eL zq=~E1V2=M8?#QvUEHwN127oa;+#u}5N;*Z0PYdD>t~)vjOx=w+@+b$qBm*-Bv69~h z|0oTwm?tSG&PxnLZoSwD(oXZHWBlcNb~fg~79z$*Ssh$v7;bu%B)pTxg?X%hlS;mg zlA*NIQms`6vRZPKqpGZ-xz)v;Pj&DI)0!+-8@CJ#o2|9&8a7x1cAboo@ved#o(lce zS88QajZw70Jk5{(*Z}M_4Y#CUBPA&ZueJ`g9Q`n)W|HIYcKUZ{zIgrSZ-4t6eLtnd z$Hqe}?$l1UtXqcqp37rRTeLyZKeE0^>>(>pczX>Wgc%eM@9kMz5Le8-Zstp@z5SFf+m=R}C}vGMSWTgat2NH6WH$*?3430Jb3OT>Nq-KxHL-VWds*dY&YJCB0JXdI za<$WLg?9iS$Z#1zuo3_OKmbWZK~yHOY_flx#5^iaDvu_klmAWs=cfa04rLyhy}lUh zPPaHYn%w|C-a#@YA>K&iQj$FQ6boyc{g=dVmc--D@nFs=!#C1$0wp_+R5UL$cKi9Y zvCSTMC;M;HhVui@_gBz6Qz83nw@&vS z&{Nw7j`W$_M{#DlHz%JGTno7um@}I!-D8Gz+{1^S{L1K4*^bN|GiE-Vy}b@Qy&ig| zC)dyHui|umyOOW8&o^l#xc++*40~)lqSKl%1A!CpdlL1 z96VAil6GhwLrTC==qg2*J8rnFKCV*YDbk(^oSPx${EF0?43^@^1-255xU=Wkj%o3y z;CLs^t1!*=)T)hMlP!N4E*$`6GM8>~p?I+=jbs%2;4DvGlR|??0CMp_vT1B<38ytW zx${J7wmiGZ1fnmoQW}>gt+)9aA!V!B8ph47Z7h&e6f6xlvx6hj;o&2{MF}Th_DH8< z<G+y=9FnW=u)44;i?4_!{bKH@P z4d6Nid1ak2$%Yk+!aAoQSm#%mHXsFX*BD9grWRtVQD;5X(GMkG0sf6Xz|CAsQjDLn$o7t9T&XY0~pC8J5jz`ay#^<}HZ^3=+dm;hFuSq--kpWM?g0Wp;-DjByaR}GMN&0NE}q!c4u6%Nb|ZG8dE zWVU#?IO!$d%G3sVq+%8*+7eKh)lxEYTE;TZT#QX-f1h4OU5!4?C)6uq!K+88d_@_2 zk9*knEYL~U9Mi}%4T(T@9dJcOHTwlKF^@KZ7NsH#zBTe)4bKHmg`Ng7ySy++ngv-Z zKe~&Tgq*=CiZ(ugNXZAvDMq?#8}Krb`8AZ!VyJ_f^@h1tLr${r6el9d8anl8q%{e%MvBsRvY3j zH`}hay)a~pysM>`Esk;Z;_FUwigc z>$_;O+57T9^~^f30vOY<**e~x(4JV4#++XAvmo)SXAx6@feR))vi^%2qqwb@$+xQx zlikEgYlkE&;00D(L9(4ph2u<&eJY>1z%{Hi-;N6$hl6sud)Z?gwI7%>%Nnk{Q3}$H zXU2@ILW=E+^U>F3J7$`$u;3QT*{N{(Fk&|0ph{T(>sAxyaDgE7+&9z3l1|!!b^sxU zs_>vr3MTm2H?{agGDQlWK7rsZb^B1syU56*(iSJ3>DWSQ)~Y6;FJeuzL;Txm`f|8! zr=*y=gZT69F3itcKXh#qf9NcKk>5FMouDih{8$FAgq-C+3DYTeF`OWuH2s}~cNXQj zZe_G-ua((3)vG@TZ3K7S5AEaYnv7}RO*Azz*?lN~e7W6aybo&}tGTp7&5_x+TfFPr zi$JeC!@EF_XY8KaV>|hg)or3rB+a!}{Zx0_b*Abb?Nq<;duqE~`|!=Y&(5&T9(`wJ z$4ob6w!Px9bGzsteX=WoYjT(5iN7+->(q58_nB_50H4rwlW#t+MpCw1!FG!2Kk>Ov zdLGAfJip0z?d*@T0_VJuNOJM&qZ671{8@ROIG^S4AdIq#7n7fzN2k6y*6i#D;O3}~ zb`_*+#^F8u6LYPoG4EX@F4@r*Lon3)ldB8&K?K-zo3+U>uieCMImuvn(v=|^mE1c! zPPj^~OopfjKL(r1B8*fZT4?!x1AW~q8-|A3hSADE@JkbeT1iTCv1mhj4Ha=*jbgSA zrOet5rWUX1=W&2G8>?up@G=LR&WcdeF%RInlEcqZc)PE?edA$js+of2EO6f8Xrd`a z5?WsIX>M1N%X_^}6oy)kwX?F?%2XP1;)m+_V5>ICY#Em(Dj%r&qRpD77#UBI%H()y z1&)0RwL-;ER}&1NWpS8I>NA?@YiP6F!Q2n0u2{8@1E66TT_8h(jXI>w*e>S5R-&r| zdl8nL6^c5T@i#)AAhG^W6~Y-xN5uF;^VXK#_-UNe0e-S80MQOQV>8MxC$nDUk&oYfOqz4(2vQ4QkCM zHe&mAPIfT}uqTtmG@OM?K5WMPidcXfq;SQ;TgCVxkd#hVBcwOhVq2{yQB}VQ)_DP1 zjNPo_V9T9I#!!+(XR^vf`QciFc^hcNIEl!gt6viZlW40pf94jve z!IR(}17oH5deK=GFcF4{yM>Kamx&UW-JgyYH*JG=4Xex3-bR^k0bUc4lsW@!Q!ybi)J314wKpP$uy;4udqtLNU z4%>R{5tXyssEwtELj{KJ>u3(83ax0yZFIL4FDi>|Ed`_J^^e$#ouVQnesD+IHIP`b zo|x^&$%sgiG?tE9kx2z{DSo6Fz>XP;QTDG4B1qQqnVn78z*ieosp=dRlf;2H@1-e$ zFZigl<-N2#+mVPIOGssgCGI-cHL|=`B2Ue3TFBf;W^kIusWH&XHCS3Ya1>xE-Hj06 zn<$H{p0qgG;TlPxTap9GOMrN>dec_VS86m|RKmVhMHO)jrq6#5VU$Fxak~pn8BXKp z=xUW0^}@dN_Tp&$YEYbGn+!O%C#7rh7YLRJ{W4rCa>nW+ON`}?dey{b5?_!_-$#Us zQi{f(QDwEROB8X+VwP1tVUzj5Jfz}K_WPihGA5i_y_RYwMDfoyGx&`5nP#7X^ngsB zWKyXwtDgn%xohA7P03LC7nw7m_vZ9pgnD>2xy$HYrWic6WXW+3s~q?^w%JHC2lu=6 zcnV{Z>$I+{pF&dia$uG2d2}i>%lDV=n|%>5x)gp;^WbT^%QiSgaX0hM)7GsoM8_r> zlryJc?{X1=p}WVO-C~&?R}?(>$gbI9STv)UeVc`miUz)@rQe|E7G>zw%EzqMqXd$k zUh7u2eU&2PNXOJ#honCI(P^Q2w|K-E;bL8_03pgSG{Y*s48iv;l17Q6+4Vb@8+~`M zSDnbMjx3J|g^hh;9z698=PK+6C~{M)7c%;Li*PwQr!q|qP02E(KIuoY7Q>(sL0BCV zE}g^*(a=fQwF#iSnq9;t78+$78p~s0WEmuyn(OnfLol+by{W~~ zKtW+tCL*C891Al!idzOT2@ozgCQY~V6ctr18%%SJ*wjDdBW$R4&Ba)QmMZ1g=yS1h zt12)`FbB2K^i4Iw$au-1VRv9;zB4RZ_}spgm3pTvdJ8!Dc4=J0_zr91U9=l`ETw>} zAQzg2&QKPkl2U4O2dW|5Vu-Gn$|WTSLvI_<9QJ2y#t|FDa!7fN6&g2R$OzI^kcGr+ z?G=OxHVry}K{kX7VE&sQ`bt`vI8M+=lWWukPj+aG?)jx(lhPd_&D263$7Gh_njK}$ zx7H>-W4)Hy#`*u*dz)X&vMswSB3{YH6cQN8GOg;;!A)8P9YhC2g9c6h2>&Oj+@@*K zq(ujb9*8d^#D_$cr4qIZ<0>c@uCnXBmmiT4jNfm}x%R#{GT+O5FEd|!oOMsEz1Ey# zjydMs`<#3CIdLO~b-2A4;@c-|b1l=@XM4-i0{fcNHT=E=-D6p&jC3QKyb>^Yp&=6u zCC%LH95{En*{5oryx==F5w>_U#sLP)=~(PtcW#TMG3EXmu~8QX40Gb1bQm&64(ge0 z%%dSz95bK(U(}YfJcU~1nhr9+Q|}P!OQN~(63sxv(NE@fBdaYTfY_{v7#7BkJX}wH zA*pe;KJX;)`O-^tf^G8fgE<{z<4Hl1xte+eBqb|Bzzoce1$u|x7rJ@Tqoi#BjMeVk zrvtUwbL{Qf?XjH@IBa&FIl9o1+=)o6z`0@ufp)4{4G(Ew!1mKcw<2(vZ zK>kWRzkA5f&*h8vltnDpuq6;iy6W#N82S`bwUgHxFXZq*RCGf~RlPK0{%vzz_i1{O z+!fOhco5n)N*v(!WaQPoMqUa6fyZFio2eF08pH?-8ekUv`KRm^J;3RVyg*a;L>4wL zKGJM>jw3IaNLhKtU#_#w!uDXG16QjPi^f!Re*RA;t0a9}6R%VtVe%@f?rjCT!W0UO z(`lFj)3N!79O4?uq4nAbsx{9gq#a}a^rbQjkQHFAW?O~vQ?}PkmdG9*K2xfr5TFig zIw50RRB;w5Dc%AO>_e^dSJ&NVWS`W(I?MYP*}AH0s_Uumxz74y zIq9j60`C|0JE!B`jdz?Mj=@tl_kw)K$vK?4KLQ-fhf~tmE9Lvz{Sy7=YxTG0I=-F$ ze$%BFXMD&P`!~Hpaoz7|vGnqrJB9b788n4buX8p{JhrYnM`P7x33=<8=E& zYP_xe8O&$QGuyAb{n|vo8tIW2&R4q=mBg`wd#Zu`z zSHh|1kl~?mHIP#~Oqy7fvj~1P@lBaQX6J2e-!3;Jz^S)1P+mj) z+>KM9uXx~L^u`sek?ODHia4#1&VJBNG*VV8g43ZRE%1E^ExFaELwU!fe?6AnYBYIJ z+aNxX*dh0BP>k!QmV&cKD$1dpx~7Dv7%z4e8Zn$Fli_raa;j|<3c)zpm!G~}vMK2$ZLs0kJzqeo7cL$DK@9+O+Q;IcL3=Z@2-@Uo#UJr zTs=((!COZfQ*Hvb;h~6LR8(zT>5r$!)(022Appl^fGM3O6rL zC8vAmoA<&@O~hvy4NuKa=c(cXvFZggS!O#0tUPHe3Fx2hcibE$9o!o3p>@! zM~`59eVe)G&OB9O#8`txtF1VqPU}dam01il1((Hm!Bt8%Vx?Av-%Rq<-T(pov0&*B5)id|uh#_^k`RkZ>`m2>dk?+rNj= zPbYdR>P8(<1Z)-7dv!9-U`Rd1p*Ewk?KuvmQ3J%K2r*W^64Dv35Mu(A8S9KPQ>GaM z=X1Njjlqy<7!cM`7FOfSBiS$#GOiUoJS`~BkipD9 zAj#T1O*TZjM|RvDW*lu@Pa$v#F)i(S`uXnsR$NbRfWiAX4Yz?!LPylZr>7MchZRjx zY>Tx^Sh;R;bXk`HF<9I@i;42mn&`doHiB$(rk=U>bhK({QS3Q;id^RJctqvLIqazY zr~9|jlAjrG{tt0Emq(>>3=3Q-d1!L&j26xHYbt#=lMD3E(cz1 z7~DU70wWclCVUL#e)CD=^`Gw}*GZpx_v!q>l)p7{t)6RmjVY7iH?LOoru&f=>R{N`OP~){m#{Sj6A)$ z6`3F2y!QsTZz$;K_x@qYWz`o22z!lnVN?u3mo0|=gn$N!`hy{}aVnygo4|NR3XbxF zAlf;kU{pvd|1_h?Kz8I<@a!QF|K#U3S+Qh6)tk%Iv6ys}RCwAIVn#CsQ=MCsF8&@X zu?{~x1Fxpc4OoU?5)+zygqFd&v(~yY7luyN#!{oSI+X!t#QRfV1h@rB1M#zfO-83nDlShAY=NkT zRahcOO(lT{fMXQ9Rexzkz>!hHkhoh=3By=!7ezBy>BSYxXnC3$ZCn^TXF)J<(xrWJ znK>)W;Jb9rx?5+W!Nka%KN8kxJ)Bul;1&!;Xv^F=rq-W1W17(CX|>7UWF<74%S~D} zA?H)He3Hi=I%e0ggqaj`y32<(V+b4O)*$Rk0#=bNwIRmxLNVJrk48fLnvYnjrGiXQ zNiLZxIE&`J>;}SQdhC^&Sz)6}=B=7dct9p{8(Tr>$p}R6lz%Knm=jdTks2yU9Iz`Y z$9yqhyhgSKANy?YCvM2(KoJaqg^am`W!)KL)_EirMnrVfMeNF~s>4>!!WxK0J#5XL z3Cjx8cH7|2AAXyS%s~65t?C=7Hth9Fm^IzYkUB4ju&`l~IipI7oHYp;fE`^!^K=lN zmqFiEj|ae#R|!V`S4*DkkU+`2>ufVFS`%@Fn|u;Bd*&F)*wZ9ps7hn53h4DV@kch- zTpq?6-dyKV6%X1uvu?o z8&5}!0^>6`2jD2dWw=MCRYfO@ zgAb1rC>Z-uT~GyFDYTY22`$u+uHxM)0JeKIo|#G$xKhK#Ry0gBm^sFltujm>QIk0RyV-&f7Y-`Xut_8JDsL)!cKG<9wePEWEm&9!GWlB-R zXM_ldHue}}FbswCl|5?@1?%EjK#VgmUTtK`1g9Bxp*-PVJC*5~tHz;O-`677`8dAU z9r(D<-MiI2yL8{{bMyU)+dlk~r~LPq@WJ%xzz^Mlum2$J-K_gf@mzB_PTtN!TrebYbqKHv__kMeVC@M-=a^WmJnoS$!Viq6yA`#Sz=DtC9>wspaM zUGD*1u+DG9cx$IiEcbzQhWooF&FhFLu9;ZaVk!DS^bEtfa|{UF{0YST+-!DqA**le zjg>sOcm)&E%`IlK!|aXvfF}XKi=km!LTshsH*yW-tSW62nx4l934a{3@cd%% zt+cu9lZ&kRYGA^8{Y!$Q78I!}hSR6|5tn!Js&m1_VMJrCT4~On$~A* zQKVLesrTw>4&w|jNtQxQNKu@`O@<=Fh;ki^*MByGwJ)fQVChds*Y_lc5SPH=JPyc_ z0g=WjhgMvT8Lw#oPX+hB8&2D|pUVktsE$2*T}jylbN!P}8HXIn(%vrYF~I?tbQIP_ z8O9Eoe66TN%$k?jy&G4zZK~{NSKuK^o>Mzp1Uo#cgjMX-Mz@g0vl4rYxh=38%SS0b ze*(N=L5q;h1DL82NCvNi6uiNI)K zyK5LcQ+1K_Ru{={{1?yK!8=YZwTz@V%jROhMkH@T5RlvghUAJNd-*fxuO@J^qdgi& zB!~Mt+j~f3haafde<;IH8TWDmHE&A9`e@b#pQMy9%$%3nqmo7b(99+9V(8C5 z^-WEJ<=4oGRCz&VV;I&H%a2Bh|w9+;(Q6QQmR9CiUKS@eY zbyPiO$rUgLnQfn>;--QoT3mvBP;|Z)gL}w56hlVOV`hgcIS*%#l5DeY!F7y2QV*Jt zBD4?{!z9nos~z;Q_&ECojOai*Q=4DUEKLbc@}cDH zUzU;&00(2$ChSC#ku2sNm{kU|ahTN`RsuzbbINOtfNW&8NyHpviAjwAAiJNfOt>B9 zn6nv!+Q#*54`|$ivObS*8~?z)XC3GXea}SwfO4KaBe;S;p)s$1pHRhDsj2FO7Khw_Q+e@OdXivLB7--YML!G6o9>xWg;kJGvjEX)1d(9`F3u7R9i9_L5A z)ON>zV5L6fes3M>S7VgC>7p$XoO~Pw+P5yxOx6*+uQgt7~t6 z?IU-D`H?_dmfR4=KtONB)AqSPB(pgErz_WPnBZf$Wdqgjb-+$?limwDFH>2}y{Ul- z&%>KnR2ixqU*DR-ESru>{32geH%a~>zy!b$3&y@R<1ISjO}e_9pgX}*sYKRJpvcOM zM#^S#XJ%*zk(C_=t*H@)#&i82X|elOQdvTnK|#zjREwe6sv=%eGkPq%mIFN;;GiYV zHr#dX81EehNFForHbl0xmVSlN8o-P~SbG7CN#!Iqz_Zdh>sB$>wIQ)Vx`lB%jq z#AH}ej?JlwHDoQ9l_{s6!v;`V)W7~ZEOXf1(H;vLnjm%C4h?>9VH)o4$1*6;NAh8 z`}ty->OC0p9@@07R2qxHi!!=YZS2Iwb&M5rsImnnPmO>rhu;#*h0{{DICR2wiJ2@{ zr_HeP5)L_MLQ{F_tz`sdb5RhIE~-B>kT}^dk5P3EKw+%&b-f6y}wmRj)x9=scH>ovbQQS6>GJ6G_XMcrpe?BrIDWE5}sPq5|ci z0HRpuk8BbX65IT*iYptV7-EjGY3Y@zGY1z9=yrW90AeS4x-1QdC8ly|lU zf+34K8Zw1z8Lt|!+XxOJ=KnbyS~(_DTxH9_-1Y$H8D!2TJfWcpLNZg}UXyY1ZV7&+{fKqMIz#7FztWxS$hZ2e6{mF$V&@~T4ABu%Sh zV^xr|3T$2Y{P$D^Y!s2fgyZ}*wd2pCqT*&n8Z;xWTO=#MGzF2@>&gH_CwSKPLq$fz zH;yMQ{6^Hn@#wCI_oI`BPqATQ3p=#}}N=@-0!-(er@`rH0S+6P^%y_ovuf}pSmwUW?aZ|_OUXSgRoo#I0 zx(2DQJ8m1fadc+YXjqqUT#2g4U|IX%-jPLbHA|d*1_1vM&TtQ>&UyvLS4uFXXqm&{ z&8h^~#4T>#pqUDCbmc8~S3>0>4-!#mX^X{n6Dg)R<_!DDn-j)m-#04a31?I}R89`L zrt0eN3{iE4Z{O+AR&)Dtu?+?3k-Hd#uaV7K07n4=|=8Z2kxHWKCj zW@MR={{I$Pw-jO!KywZiNcaqqxS}$5#6gC{@TSJJM_~N`FMigH3C4FtVTX$^CsG%i zxhd{}U?j*4auSu?vr=?VaS#r2kO%w4?151`Zv{KYaU@^<#8$=_PA(#rF0sR;J zHe>OTGyrHQD@T?MG)<-}8O^Dzt9wJWu=XKrM#nlrPsTD}*MqiAJ+6Ik@i2Fhxojw8 zGut5&bO6xqg%=|mN7nbPK|r(U+(p6l-sgjSwrDNHaw#%mx$1h_gun)~)MraLg&oJ! zGGX-k*W7eA?~1vC6;K^uvQkEl6-Jhm$b>CQAaQ&-kms&}osSV2eKyG-@9^06GGuP{ zx|roYDGn@3i%CF?{vO(it{%@m@c{xqu|~wY4EXSZ^ArzJX_PboF(7?}n=lnGHhVx- zbuAVj*wo^R`0O1v5q zs;ViodDeeFDkBEdh2oG1>K<}xSa&g@bfH~zP)G;HaZ4*Mk{V}=@VX#m`KlXNh9uJU zWvbT9Qq!!4tvwd1zK`8{`d6&Ske4U{Ssf~`E5ZxsmEq(X; z4?DwGhDh@7J>-axHp7$!fwMWW6;M0Kcpl@?fky`(9e8x$(SZ-G17H6^8sFtI`pxu1 zE9s&4J9J>q^W#i^hn_v4j}AOK@Sb!a->S~NS-)-7a&F9eM_o)bbN}f*TVUXQ^lO)0mUQYhB`~UMO*~KTww_nthi?7##i(aK?YThdKG4! z8vED??pgt}FUKg!A#5>^-lzOQ^OsMHv8aR-Qtf1CFcY5#@0WF>2TNQ?(vO!_h{eK%CihZ9WRWtH*~HX0g@%zsi}T>2)|%@s*rg{Rg=1D z6Cud=5quA0kwbN7wB{)>xop6~r@EN$5qQnD?l@^480`946J`~iE%Fv4Ud(_A<}qIq zwpFl@C7!a+b=A$-a)_H~2kGd*y;Jpw>T((;a^l7zS(<~D+zj6Cv_X{^>LC}PL`xO! zS(%G4QtAy!&lUwOo7@D3EurPaE7)U}rNUO1Gkq|;#@HzMb{(-s^0eZTvE*u{tQpwz z74y_eY>eP4#ci|J9v8uB3M2pYAisImvzM@Z;guCFd>SC(78bArd*XrW&tGYey_PK2 z4tBERcT_Kg@vN06(_xVDxP;V8whSo|yM$c^Gsa1YJ7COO4has6W#Oo6j3Ex5G}Buk zRk%5eJQ^yga|xLX?n$;ywMiPmm6Lww;=wWvFXrA&g`@YNQ|WIq=X-#-QdbAQ8(p)1)UM z;OeN!U;^?dYqx>mEKL>;ECA-5tU7^lq00xVmZpU?{>*rBOy5#XOQ0-4S4HwVO&b5U zO7V)lB&KNqGj9nE=3*|b9+nH>df+GLAaecVpz|8D0aki-S++#eZ^|eGQK03KY7%0A zn}k_F`ppB9$9QyLJ1`edFQNBiUTJfDrtq2fv3+#lqw2up<=NA@AEc!n+mU|jnr~oh z{HV_TqnKL<9zUtvn(U_^ zVp#VkvqshehFEM=W{XzjkQB>)3s)+*S`J%i+6{}Kgl0aA&A6df9Z(sJ@sa7?6qr0MyiwY>ksP+bhx3OGTHE z@q3Wcq~wM)7UX499Q$m@QWBV4=SX(NHMw$-H-7T27?0RTC;QS4B8PI7-#6%o5M3s< zwvbqLv`BplPB6`&F(cg%;Amuo&GGnk^v~d&_rQnj(?;hMMh%w3xZHN^qIAvS~Wzx z{~8(M8vb-npXW3EUWCfc1-bunAC;?g2~fae`Ec&_?>a6-`G{e~(rD^7ebzIqISOLH zw+!`ak>C(cYH)+GX{H=u9?&W@67y8Vi6A5<$26@^F^%gPF^S7+&?>|-u_g>zM=tf!k)oZ;+|SJ5sFs#^ zbZsG{2qu@8#3mbUc!6Ye(lEjO3o*VF*h3Kc6ZL%<*yud{F*?$d9+~NngVTg$)95F_ z^t-d+RLUq_!t33pC7cJ1QAG%X$mR$eJeh;oiv$_s)QL;t8c7G1mxU_KLC8o7*Vu3R zpI8uKW&>BVA|R)#PD6_pGG?AvsKXq4Lk^2ITeW|;Y%;+ogv*0^5I{Gk&0IdLNu(|C z2HffKVfM%hNE!lNf7a8JZCpzLtdH8^_=YhdL;nq+Z!n)M@8{whR6oogY6toh`cTzB zbU!-qT|3b42j4a7qr>k9X&+r-kAfZ@NC*1!)1wuS4m>*Wp?BaHJeQ9vKge>|cmM7l z-`>A@cgKqYKkst0*_%cIxvkTZE_qi>x42#8k9;n&9QygERLB=Z?()cD0Feyu|6OEK zk$1dc_ok2=R3w9vA)8xvQ%_zJ0m@`P3NVJ*SPo;GTVE`3g+V5i`!-^5%@R09Z$G9A!D1E99*EvM^j_X$Y$R}Al(=n2XibnmPS9zQWahl>YkMOBu|mS zY!NJmoNU=l{;Ifw^$`xt=oWYw@zjlEQ-3UoU_Z3Q4)}4hh2^nEL9a7PN;*@-6;GU^1S|VGOX$!P_ zgp|f3?^QKLXWeH?nnXholZxvyJ=!q)XcD$}HmW%O;UcDml&dwK>{+ljs)C24=+2rl z$=)1t9+8sPn+TLEnFaZ=uy!;H^I~8&By;V>#NI7#!|yXXmXpP@QI4bDRu`UZ$D*cM z64hK?1ki2ivhITjOn4yyuoJT5&PTmcX2uK92aE$~Wgm{lU&x1Gv*v%@Rz7v^2j#@&o+i~PT_A|P4Zbm$cI247n z(1|NWXL6NvfLGy5-a-^!(=_K&lGRNW>#CUoCV>Y}99&aKg=Ln5u)F1KSv>j1J!)+f z4I}4bLSFF6QcLDIm<{WKH8!-alcm}ZWEQ8)E;!}yT>sQkwo??*=CJUXv#CO5i$?z7 z@JbsVOdL|aZLdTtB{`XNgW`e3ufh!vL??YhN48mJafX2GAgl}TL3q8Fz65cekdd8y zoFF_CsgCBsxpXmieT?HW9Y(MyK>{-QcHbQ)*KT*CX)yv?g@o04WGr@Kn>vyZ`)p`n z!r5cy)(G`>Ad$0-yN@!RZZudU=h5x(AdJ%iMSr?hHHB9a4Im)xOUSftT(pS?Zbg8} z3KL+=&Hy0l1lJjIW@lWP5+Zmp;ZXsc`Dlx)w*n3kmPCO{X3iHWD5{YZm{As-pimeK z4;=0+FfP1TbTCsCl?jEQ>x(s>C!-i(O+zj+F_d?9wLP0J@H59_`{=-Lr~~J+{|)7T zqU!O!#T+9h$Z2!XE#gxTNnHbUq@AB*9ie z3|*R$1%f(u)x1qQ>(C0kBF$}SBpExP8$V*Opqbf$e-ajL$XfP@Fy4t^R9DtYl5w(( z0SqGeYSRfhveKFYAR{>x(8Z3xi0jICiW`bbTsS12SThcdDP*4JLkfFAWZj_z^J0OV z#-gwM9( zIF=y?h~aa2GIZArgL(C;g_U!VFriwR(uD0*zPPrBv(B(IZx*9!^!JIdV5)=d3U`gj zTcSSG1rbh&5{^aDOcsqf3;|uV^sh0+dJ1u?C%rf1t~t1bT-{5w1jj-OIWxzYYvY7p zdCA78rMeMD0JcHOJXIZ;X;S|dzYKV}C}5HuBy@%0OSKNySr!!hgZdhYV672y5z zhT7$cjq-~}zcesrJ!SxPH52w(ojkDwW3Y{-@_@d2_3BOjZNRuPzIyXjU`EOSCqn_K ziK^fF<5#af4+K<(rw9mofnTZ3#&$8*df&rML=aGwGc&XaQpgrb%FWSCL|q_QN(BKj zW~kfPQiccSv1FB(5UBPN3Tc8{#^wYcg`EN0?eGnc2R07P!%!ZSGdjCuD&;ey4k6=6 zuJ&j0!a}>?ETlZf9O1J7P|&8EePgIvZ4{M|mY7YRYyRCMW4Ae>;ES#cZK#d6{mB)3 z+ncOSL3>w%Sr9UdN;af{%+Z4kPf!6{U&LgYnedXNIn!d5JqgRkb0!I_gi0;Xa>GXT zeS$1slF!QKf#~O z5;CbfBvUBV;YW)p*(ogNc>7vyBNQ>ptb8)aRwHs~R9@4XsLYL-P#utjq4-)lpB7Z& z7_Y2S*@U519Up}KB&8Ts+RRNr5?B#z+D*ZJ$M~{sV_Ul-$*mifWC)K%&rf6|iIGSa z1#{eb1j|6U7cgjLA#EB<#7Mj>;J)v%y1sAT2j36T0WXywg80Gt=)il@fyXP-dvYSa zcNyQ|gS7iM!tY(jgYeOTAEE<~SFRtTtM3VCPVOguetYNC`8_!)zY!Vt?@e4f%{RhlM6fJMayj3S6T`cMx$$A*Nm5JDW_O$er55z53n#iMMLUv*6CmN{CNjo4LUL?po~&2*qiTi%%BM>Eg^n)-=l93G%sDHtR+ zX_hA%i_%vh)uJND4uX`OZEKOOc(I(vNt^{)NyURSq(*bbl~->N5NgK-a83A zG5fVx#?(-%8nSB#vZXDz&$<9Qj>7C}3^GFz4Ab30V-apxMEWaNTXx4+XGF) zH#$Ju<2t7Nm7{Y~)3=u|UU%2()NY_Tn~It2h0@eyNL~+<^4Nq2dB!cN{Q0XzWXw<;)fe zu>7s7X@hmuYKnM;kh=>4Afys{p%9ELnbAb)xSQyt2BQ+VTp7bp_}TdtDSHWtfi2$V zlin2kTiK?bD@1Hrne&I@XtH{Q#>iLGSdRKJ1+;i%v?Il4yItPMrI5`m#5C!O?|#$K z0c(i4b>qq3Zt3L&I1HFr&YFO;*B0{q&LpnxV1f)kI{Sqo^fY1ePJux2y6nh#lxl_ctm3 z1TXksTJm@JAT4eE#r^%JzCY}b4*aMcI6r3fr@Zg_Q`(QJ^^xp*=)mK3>3eA62TO78 z#PZ#aZ+JfITMhSad(R1M*Lr)ic-;;5PIDpL8^%Hiw}7zD-#^7%dXjPNxouj3m7qbE zNe9?B3JdklyVUTTIP;xu^StnvMNV3Xj)Ds9mKqVo-TP8mfH%8ftfslxp#&2l{7JZ3 zM)vID;eCu-2cA?MhfGJ5`oP0=%She}6p`JSUAPe%H*>pzK>oc(yH#R3Mh0>SYJ|l< zH2^e)#7w;^va&9kW_9d>JdBVh*tZE>$J4HEE)tm$t{MYeqv3G&4U3_$A*42yMo&M1 z5vCzo_zUdT==lvf9u%_gWw`_m?@Wtjwvs_=VZFb6Y2;N|gG$##S@(;#?73!<(qCD~ zMkI}DYsaF%Oa$V|5>tl_iuiEYDPb)qfO!3N5F%{C1`-BA*w(~0B)4IQz!|X!bmtXzG>(_t&`t@IY`S#1VZ+@2FL_z;FfkZ@+To$zX49%X!a%TOfFaG#M z<^Qycg#@YV6@TIV;(S)1nB%=T;qq!6)${>NFI|P56yYcXd2XliLi6Hf`MSw!U)6yn;sSFx54J(gS z#5K*XhTbMN3i5&Z`Z9R0EyeKY=`Pd(9xjG+2%+tMdexwceNi!pj$b?bVCldhzmKdG+#N zdimn7BXRMLiJmD3sV{RxK)i2?jRU|TBeU1wgMv8mmLt9?#J{!+&ombNXWsbuWOLuKJ6Q zZ|H;)d;K1mc=>Gl)<=0I2YleSa13neoqjev_nLbaYzE+>PrJYlp z#oguoC(ho;^-j28EPiq0%bNw<4fZVqZu-1YJh!Lta1Y6Q89?)WkUQJ^8!p*mjvko7 zh(h1I9?b_TKp-hNXjA*?vXMx3|^< zplnl=#Icle#S}Ei9AuyqSyWq7JfBxAAvq&_0A%dYbNZ7)%|O`TauMt;9OHN}>wMy- z>DJ&INbj;ayi;k09o z!)B+zlMK0+p2n@_dSh!~XG+_sE|^dE+v&Db0rP`dE%#jY(oSMRp5bYomd@MR>V!X| zbC-J-+|5Y|!}&Qd;c>-M07d57@oyf);tp3n=a{6iDTycxJYt3c>r|QrCqme;^-6d} z3H|JZ)Le!mpk>CwV@1KdNn3BXL}_n0YJCR9$jRF9dOL@yZ?B_=WGnCRd*zVvj2jR5 zKYQ`&Kl%L4-~OxlEG>}A9NDVzv-Z9Im649l=t*2<=ASa#rOFO(KT--ndle9zEm0{@ zs?y@dvRSfcT}T)hmzNEQ=@pc7Eg3-)`Ui~31E=Snab3F2_2OTC`Sbty&0qVw{Wl%~ z!y9Hl1Ecw68*SN91z)Eq60^ONgH!tb^LQ{F_8}ysORk>Hk$vOYq=)&^vrJ?W)^wPc zXUMmMeX*)k)p5@3c4&RTgvshmmUFCSHa7Qe+*gsQUiZ>*5OKNMIEVf1Gi z{;G$IVm33^tlf#(SR8(;*YNV-L=euIRZLO6KI5Ty5l6gHvEpfkr5XElgG+l0iYNV4 zkR4%5EwN`#G9KH@Lx#)K5HESqQW3~%xS@KM?Q(femEZ7?^)#8#lLK>$FibF|!)WT{ ztpsARGa~~bBi{65ADy5F0TMp*-D^DOC|&{Zn2ao`H|yOJZ}vl0C7E>{0u|CL?BnxH z4XkZk7D!E9Lvw1e^KCIbvUUZpLinZ?Bx-Y5OroPm@3D7r=4NQfWB&L$aIT<_s+Zke3tth;r-{dKKhb`d=M zj}AOK@aVvM*n#uY_Is%9A?dqzAZPRIPpIFq&UbNW`<=`?u4Z`>s24WxDsknzd-vWL zPl(_0f%FbE$IVfXiM+c7Gby=G_TEp-GMOo^$y_r#+3)x%k(+H8NZpmU%~~2AH-P-< z{WB6S4N=oPj2ZkBx(X< zG#QBmpbS7ho@7<=jevu6O3RU@bhVIcR$+r|hXK3s*c>eN1>cT60pY}s(>1dO7QW%M za;hsn-t4Jnjt-Ud4U|}FJc~kX63p?p4{OKJ{FDn-Q$D3>Mm&xlr^7_7W8KrweQPP^ zU^0@t0*9dqNy+81KIx`NfM`)qgo2cg8NsCDNWAZ-)FEEz-~d;r=dLayRW;dcPzlTf zs=DZr0_~QQNdZe}!pfXXwY>TA&1Zk`vw!+u{OLdYum9=ifAsV7DH?>tGJVsaw*46C z!uXV=n~mSbexJ#t_D7XQByCZII3zd zyo1DERbg%&G382AAtLc(nbf>+jq+n`AOsE~{B)`zGYDAD zyCcGq6&=do;^T-x0f(xKW`MJ`C^g37q2bZjjT0{|KE6G4V1mocH4%t)0fnTi7&3We zNf?7~85p}Id>jc8G0r_NOF#$@!(n^#H(z}AAKAXmo{vjKz3nIH!KA&hWQ6Usv=)e( zZ!bS_B0-b4a~9PYw&|f1$Lo%&CaT$pU!zwCFfD^0X*VM|8Jw71S7guFl#sxBodJLK z_U)g%dGo(~`TD>6{LMdpJ;JD;90wjQ%>WI#l=|29hbUE}IAE zh}u}GO4rO_EPyz^5N!Cr7eF_ zCx-BhdSfEiulB9_y2fl4BNGu-eihXI2rXM>bOz_hqDc)h@3d$fBWXB~E`?&Wo_T$} zyq<$90D`#w(8z)?EdW@^Q#CUX_htd`VxF&zzV`-ZEDOiW+v3D?pT>lbdP^7> z8-c71;=m)*JA=1NbZ~!J0C%|va}c0T^4=J@@jRpN5Ur*6q*FGH0Qm&}tUu+qAZ~Am z7#XhTHlD4$$V`QdyffwB3hvH1s&BiqjIr+;Y`rB@UPCfZX6WZGnx>Ty+kAzz%W&Y{ zgbLJoI+e0{H$1?8(T9bj&5&Z&Y|Sy@rqWT;ZBra?j7!++FrY1C_$C5NvRPL=lNU=z z7Mqcr4R(%_9I=fR+=(-~dTd2x)Rzo7J3+D?y0gcEO-`4K@*P-7^}<#G8`9Lo9Og;` zR!$;^!UM%BPRD48CplY_reLbvPU0vp6vFI-8)HxO@E8mey3}2%wps(RwH1*4Z1a9L zo40R2`@^68#eex{KmUWDzW8!FX?GB?jTLD3609&RXCOM}0-A@U{HD=h6dJY+u+)Kv zz-(Fh#cCEob^~(3`fpL$i#mh=ID5@kIEH31naZ${C=XUUP6=z^_>4^x8t#kPR(zAI zVd@PuX&w2{h``D;^@TaL#2q|+Q*dU{x@~ORcK+D5la6iM=-5WbHaoU$+qP}v_CDv_ z{ji=^)q0t=MtyU91MfERk_AC;S|PI$V>jx^?gNNHVyWkn;Hk)+ZM9xb@ojt&@x+#Y zNQ1fv!#3$dPx?vpzT}YYfld8suD!WN9SE-($8&r0uD=`jx$Cl`=_Ob2-0Fbp88#k@ z!dRn{MFf7U2=J(B|Duu15b%0P-1)d4nt6?pmlnBPzO8Hw#YI%TTjbJbV-{f+SyMt8 zrTT&wd{`2qyQbj9D$-o< zPu3h_oeI;AO`1yqr~l^_vQG%s{Xr+7URK3{KRo)|zy66~1}^88_ZU<$Wn?ZP@hQ!K zm_>SVDYCA0pUto!yzY7TE_nbO-#CDwncF!@$8)m5*v~K{J1Lm{&fsrQo!b(|-Aj6$ z&J?_y2?qay6zEdCV!g#cev~VNj7OdUm>@Vjhvn5|ap5nPz9*N)L`Mk!^&`acGQ`fh zC8WY=ojaCyTF;J|v8kCK$D9&~Wu1L!WyRyqbAsj!j99`04cmp+;coCNR5hvKtI_ri zi=1Y5MX3$kUyy96nF|L*2_;H)M|0Sf*j#Q>9ibi#BXsEgb23nchQ?&}eh%fRtkS!C zjtNsKeyOc^rvZFD(V>^2jOJLL9pCwy(St+b_zo+@yw4qT5VF(jgb$Fj0c|8I>lII+ zNMHD5&*MJ`z=)?KivitfzOv|iR&OErt=D{-ve&kRiHKmXWIQ|K4O3Hp-x6p)x<0_!glNNl zkb`+79N)=1#z;Yg?RnIl^_K>CAoHR1AE*Z{olJ;-lUVXriU#0~1XRtjs|rUM(7ZzZA> z9A28wJKox7*hb1wf@O&tu_O+PPmY6$ML|a!dx3a(76S(YG6EVZG(qsIF|skpmGMITr!U66gGI!W#mvog4X*O2`)Ok^ z{EY}3j8i6p=yJFv4(txfk5-$A>euLA=0Q{6M$4eU4Oqx}9FNJ7T?2Q6qVkaPoWwVp)vE?e=;=dRf74P=7oDzH#n;Qbzw zF|CbQk2x7T78j}N2EIzi;FZ_XP_?J=#tAnRtD}F5v1dhl>5bzZ0?Y7$8_K4zw6nXS z4JX$y1C5Dy>Y$1j@C21c*?)Kd{9Ev-z-MFp9-k@wP9|Ce24d*M?4g4X>p-QBu3>#${EjQ8Av`OQJ_rl$U6Ng_ za+Zr^DkOW=_q}V`_I(FUFOwL1^tcquFimYS z7LK9VtO2~%_6!%+)`T#LOX*6?Q}7j?yd&OoAGBLKp(fK%<9@GgaP<6e_hdgmH|S+i zXdcIbxsB^hQ)wWBN^Dw!KsTIeIIiVe)`&D)Xkhm*w=vC{OtImmUfAq44VWD2%Lj{D z%;`C%_A(c;Ncuf1jMmQ~JkZD~QZCcaKgwvRZK-f^YoXU7Jdy>&kKdnN94#!;4S95k<6N&6=bo$~ZZIB`qmWxK^ojv=Olvg6#&F>O)I;oQZ3>wm@__qcl5U z40_ifC}qr_Tk4pRdIt!f4*rG=M}}1S*4K(cRXUsK4>74W1c)e6T=KCcWY$a$_LqgD z$a9`4u{U5~Kl=>^S0=&78nG-9v;&B`Ah@^%GZ2iJH6oXRR`bTdS`H@`aZp*yr7>s% zExw-~{B-K}aVqgNDAS_gF|#74;>nvU_X`2t+|ngC_%i*2w*7cRn?|sso!!J9c>JLV zhob+;U~1o&J6J1k%uiD7ayqFVMU!0Yol{><{z!z)>b=;490}x}BGBAfNXwogRK%D6 z)3~4Z#;yi@#LPbA@^|^PgF1=0`U3t8o4a3sb*7JW|Ln=#e7@%da)iaTuXga*e^|wL z2>_p5FW}C8FIubFp^fc+mD9mA;#3TdLOlM;Qz`2B?Q%^6ouloN9wLe94YAgmSq!Wz zKuHWe`D%|HC+Qd+N<}DaioTvTppO_C?4k(Yilx$}8K{Ku+$Lv#N)*}=0hDb>9l>P3 zk-+M>!xMg;bJpqtLAr)S$o<${ok3DB(c+j*eQ|k4pxB5~I2BD3 z+{Qmje~CD8%$aVLQ6&trr%qm@0w&faojiZ4oxIW{@nCaRJ{XOQZKrLg&s zqs82qLR8zncY}}1__rj>q&>!_HnB|T;ulXeOZlgqBV}GaZnmpX4!EwebYgrjzlzv# zU?cldJAd6 z?3EHz+&0BydoB#2;CpdWLu_Lg@Ub!`C-BucV%!PIQtcI`DTew@UPon5Ky79KVl*e@ znn`W+0z}}Yr07*qYQAT*CY1Whu1rtnJTt6;p25oL=xmzk;SwW=j=Ut(8ql5RHUeTB zH}Qs;steEX)HdJ@#e~hQ`NSu33F{_&M=&uptt|uDpz!^}&A?6P(+S;g6tR{Vb=87> zV8MCANlrv3N!zo%ve(?gw4-=Z7#s8Z9Sjafur`GEUGKVa|_!mmGiGQmHMMph6^ zJ_mwzTo0ZnrOP0XoWZo8WZklWwzOP4a1>%bay;M=W_rBY3jjqq4l*%F>Q_z}MLxue zs6zrQg+yg7!j}S%wXbe|5P;4HY|G{d`l#TL;$gA{p5`5qo&Ke|rMbavnx%6%WTX9O zTXc{SPAj$on?!){@Ua{ggRJUy#w22o-H0qiKV3Wl8%m}|Oa3YDofTVoXmnKv^N9|= zzAu9a@{DCkR8H%c&7~A=06Ee;cqE1wzNTWqr&=(%U#Mt_h!Pg@NRHfYzIY_G7C8*Z zRHZw2jh>dou>nz2qED#N@4+);O(^mjuae+4Ikl7#jys@oD0R)jDu{wWu@<{GkgXay z9SzNnZtY*q|D}OrTn+SVe2qZ}BK!ssyoCvivh7y|jJycT{!cP50omOR`huJ942e$V zc3qwm+jlzNPNcWm6$0yNx8xnRS8!{d(N^<#eZToLMURbHB__NR8<+mNJ%86XAh!lF z6=SFyR(}4%XU%B^3{UlvILTGd0U)6s@lt6(7+EBb?*E{VZOOiuCiXiU=j`iZYhrZ9 zHl*lt%4~X9FgH2ot>3yl8nE1RT^@y#fZ#Gi_Ep~9uW|Rf(EEY_Lgi)|hLZ!k)FQPv zjN|zpZq*~V3y(GgV|B#`!zm_8yT|H_I(?Q>=?~A!QzHECi!G(VH9L3ng3|*_7YR9k&eWGdsrE}^Lq!hJ@$WQ&_m?Ml(<;WA%IFAwRiaGplvIRlP z&)5AdYq=$%0%;qX1We1W@=39k8K%NyVBXrh;}(8#(If@s+s(VWg0*seI>vxyNV%Ep zJXv$N-gylUle>HphYfz{>fNzl1}vdOOcYa_z`SrUtpH=~{3F@d6p?8-Xmer;I0E9y zyw#=XEm7Xg49ACkE_?-23|~JSA~MaxmwDfrnRz1SC+SkdMgplLY_1Rs>1Uk6bIyb8r0?ED2^Fvc=>}#NMwprUtu8HADf89nr)&=B_}T{&NF6hN zFH@L3v;*A^(5UZ5sfj>ou+@r-o$I&~E{&<2Hk%*gQQr zIb(4EB{-{i=-=V;aqiaP{xKM<6iclGH=PM6EdG=XUYq5Y9!y7xDqwZ2zV=M1>` zMp)?^pZ%E^T}_SuP19glt9}P)OT#C$Pu#P0%azcr)Ni64*q)<;BR&SpZ((M{E?($Fx}D!XahFS^|;xuAL`G>zp0zv(v5Le$EXg*i z4#ba+mP8(rq2@)|3*qo6v^hLXs#(&lS_DI)jka0okvcLcV+whnqs@Io*AAFx4=%;H zpmA_ZHDHgHhOz!6F&fkEH0=7pBJKFDkcs)b?U@Uhq|BWbA4(GwM@`NHp{Y)>Pec7p zqLJ09tOdshzSL$;Jvn$Zi+8|)+dTKHPhQdKf$W3mGKiaL(YKc>iMNd7r$mnjXIwTtX<}0h%h~d|Cr29h zow$#z8y{qvlZ+#K_301LGn0X^2I$%(=oeE7)e(A}>%a%DNwTc^J-Tp-uf^~QfCZIvy$afiJEFbBpL4js@9?+djBW$8o7V_Ur<1(eYEFc5+YOS z4j|bv!EK|BEd#x0#T)~BF8K{5FJ5}xq~#!)CI?zz-NF7Ehr@F0`k}Ai56csL+flQI zV*=Ts!N5joMoc8k(XbP*Md@jh9LJte`(xHdURrqf=j)mB=S_X37+A_Fp?Mfjd1!aS zNhyeEcX?P*qO%YZ*tpE+IKKdl`TBu&R*spOhB}>nGbBkFOY1s%*)dn-TuS3)(edp=L7>l?o9Bl;Fb4RM zgN|6&0t3?XKqLzj1alKO25_MB7o#sI_or|Q`4Ee3)r%T;6*mdyNFY5s0cDXl)@+#W z8-~^8lU-SX4E|NTSs!N9Y0~nk`~B7+hgHAQKsh?@HxXrp@}hv2G$vGlJnIX$FFhpV zkhqF2-|tDX;z*#SknD_jF6O7LYJzb(`zd2u(~0QJ$=RarOx3hO`-&~o%OVOfV2E^| zhRC=#46)nR@L>@ChCJpzTXM?W zIVmrE^GSJ`eg2OtcPAgzw}Fm4Dd%T^0G@@Xa ze~gh)ERg0I3sI1Q<>5d{(f!+Qd&2bGhlqeAIKqy`ZmyGXQs=&nCpa5drE8Pi4QpuF zB+$X)8rY&GWR%ZC82?R-Pmcjy5lJu6t%PtZ zd2cOH{1G~KZW=AG8C}A!X(`2Ps89uXA)2wal)?ka3?N1Tp9-W^lbERKpp&=_(|g^l zqtkY>;hZ(S?QN|9Zb?*?{pk1_V`~)|PPpL?Olpzan}$+-Cd?^wnYvdNFO{PML6as7 z90hTsQg%>y_H2JWl9V(TusTh@j3sY(Kh8m-|mVKmIFx1D^? zmQ$rSJ)R71Whojn02_i)#E-8BCv3{v*GOu;1y?>hSpm2<9Yybk{WEP%9{_g;RUxXn0r4O~I{P-q|Fz*lZ`OsQkJUs|U1lLK+-)f>FKWrSCiHaUEsewf@R0lgx=3KF3cf6nBuD zWc>?9#CqXl1|_60LU z{alpn^^KG{Sosc)slU^oWhEN|_|S}m0=Ix}VW^`5ZVW*lX-=M}C!_FUbrk0gU*ldC zTQ|hpXCMThuPTMh6C+80D%NyjR$|mNu9ZIfAc87FsmF6a1Hp-WY*%!7HYZIk_CVlp^z#n&s17NR1I($l8c zr$`x61mp>ne&dz@zBejyn6^Jn|C?g>a*{jHq$Wpp34Al&_I1YxXhH)8$acC!b__ef z#mo?jbB^ioCU+EGIgf-3rZe$?BUxAsB1z&wL~=FZwXuLI?#no);ZaE*0F?6^%1S*6 z6}!&iD;0SbXfjcU0`Ge;j;e6GdIrz<;_S024b&I#-^+<_9Kt2+6Qc0bbZel6Xsu$=5CWpfTa!c@dqpyImX2-Q zBoP>+z-J-x)cPX9@_Dn?m{(J$Y=N05%%^aPDnBV@Fcc(lcwv9V)uEW9?D0mFs0LK0 z1@z`Y;k%S1q=n4>R@Qlz%7B6qbsnMO>2)*Sj)U4D!HjKI-6qrd35Z8x?Cpx83Vhsck$Y z-HMv={rQ{wGiM%nA^!!i0yZVghy@w@-3V{RY$aeVj0YaiaTLZWF<3j?Xp{CaIww>T z9i%>qg~`|$6UWGv-xLrg^!h}TdbM}m$wpP8>7&c zr<^uuhA(zLbf2=^Zh|;s_9!Ht~nm$93fgPIY)hJC{Flaf*s8oIJ?i^^uYfeoYocP}Y zxY+zYAxHttu@qz7;xNAfyLYvWa{bp_YZ7peR`!0<@=0-CC@=(tAmqc6QMi769h6I-lghIFZ{?H7WdmjM*`j^{l&)x-h+uq}Ly70(7753V1K#`H0% zVEvqKi8?9`%rkdWyYHUWW929?xYE}HZ}O1w_lnuLi`HC!KZTUWzm^T~G7zgqBo2?& zf#A71Q=>|tNl_>ux2_`-4TB&m%L0haxMw2ze#H%#fJ)x+Q$9ZfsJPHd z7A>(^XuX{U2mgT-lLvPFUL=HUNS&fujmH#vmeI-7d-z1HP9!zf-|M)W2A+`7Yz}xf_0|hGR)hCnpJY|tM%wh^S{y?AlUGPnwwZ_n)ee*+X zRo@sRl?0LIN`HV*I6X9+^nH7tsDK|_l8SW+G!o8@-T&-*FJFF~Kr$a|%lMT8o6>cU z#YrKK62M6!#djFPJTT}aWwk+D0|xiZFd!aY>pNqf&M7#hVn?H%hoZcN-LRymPgLdl z`oP?%R1%YcYGfM*H6Z#?%To7Lk&F&#Cq2eBrxsolay<<BaeLHGGqoSNADn?guV+&3z+b4yYF8QS=ubVJ9euBabe{5!o=2eBL=c=?ny#TwL^ zpN4g=zd`y=4+K{sy2s5e{a$3MWIsa#IwKIpZ@DFkiq=Pw3!gy=(13IUWK&NoFG?7b zNZOHn;>0K5Ru@jV=5zM@jkAJd#_xws|2}hJK|U?(TFoa9{;~uVgL^;U^t_$_K8A_4 zXKYy~dW=X3xweGez;A1xR)cOuT6-V?{afkrmkSpViUv*cm4&b`57P<-VdjCF1?QnU zmI`Cc3%hDpSZ0W{qn{teL`n?jZfKNpH6`HvbsX#2*Spl1E`-*Qjorx?hg8L8#t+;7 z$9TEsrB*fvwIk63V`|fp* z&+1yq#RZpU=*bLwDhiqsmP?nY$98M~6B{PJ;r}jA3PA-NAPESIzIx$(Xa7e&@}F1U z&XC((r0?q+`I{Dw7e`&DK)kK#4{p;3HeEgEQ+VN`w&!im)=9se++j+Qon+IUWg{c- zQ3O#U?-Al`d)ZFe=<9BY{t%62AoJb`9rh@Zx`J@HK1W0p!w2N)=!S@}f!_XWa5Z}X z59b)r9yd{p*OVq1LMtv6)Dq^k!Y-H(uMEL93bn=XPSg<{sM>hmM7#bqj&$AnV$eeUgVqJWY$Dz6=5DZ)HU5n@2OIv|ZkNC(9y6FVX1 z=lsOp_xt0!$70UTOVF$i>M_>FDqu^o*#Aq1=K0}H7FeV6@Ys)^l-X}~CP9fPtBzXm zvwp5LuU~+4g4}*SO2n&~tbFls1q<|fX%^{xeVrj;KQQLv+I1rmsW4Vl%+4DXUciE< zbK5RrNknd*DvH>4FYt70e zdhAd3t<)Z5ihNPQKm(qX#1e*5yo(g~s){Ny%SEM6r-U|=3>!!}0=T;U8kfP&@Gw%x zM@0e7jO9KzvZ#wf^-b5f77R;e0tw3t3yKkb7<-3!*SnX!@BU5kT)d$niRRJJbK9|Xo%3S`b)>b!LrCB0F3ld`cW36#AT!-pq&H8{L?D&S19S zx2x~J(F#ONpcNM|rA#Sn{pc9?^$u``0(`o(I(uA*)>S~qHZ8>lcv5q~J=bZ({}}3j zyy(}y^SKaC*>44KI!1@+sRBqD;sTh$X*05;C$_U_;h#gLHlKGF?G&YoZdUi+dil#K79&*W3VpJcZm%JrJyONqmUfEr)O z>8)u^?`w|rt&awv>#5e4gX8fJR-!a_cl$}XwBI^*&OymxlJ=vme8wxFau1=b8-He9 z@F%sN^(Bp^^!!qz?fP){dx`?@36MNQ&hg1K-$s@<4Gxr?nUvXp(7hHkQWHiKU$^yg zGG(Cvc^E8lF7Yr2p=u=#?tWLxdeliyvYBUkf0$av#DT}ue~=AX7zT5ajG-A3(cGx$ zWN;l7sxaft^r+dOY5$fFZ zdGlJ5TDKx;YOy?MDHYXG`>|=~(fH1oCW+_}(W)I|L){Tb$o)w;Uj?HTSRB|Tyz4Cf z>g_j!Qm`2>jJVQiWCr$MK(p>$^6(1HVrWBhFXk2&ddSH-nj@JC$8dXM=UK?h2_oIo zXN=?|rre!}{d^GmJdb_Oc;Y0a2Ay4B@eJ8A+tmo-1N(oh1+p#EQV9@7!17&;Rs4$r zUl(;X!?oOrJ`-8vu!Mpgjhf5#Npadmn4_NfWzzh}&K__S>}VAVU0TrKnrcE3)VNeB@`~Vy4?TRT}F_6X1Yh$Ax(}mI+?=doZMB zz=qP5C_PH5pJo_aMR-# z%~P+fYYE*36KwlilY>xJa+|hj()w(Aa@S+9u_1HrRNq5en7T^J5j=s1F#74kC}c4P zBAI#u4St!<`jcOMW=TwR6$cFnoU@#Bfw%*GXD98# z@1)iACn=Q)99}J2CZ5u^vYV7FBxbY8EP`2x=NF6Thdnbj_dVyn4E~_p=1Y&J^y&cMaAsF7hMpkjX9rWz7gn>MowzC2y?&IOutX(FSp%1}!^+)3gKmDh7{u zG7Tq5v0CVx!*J}7WJFYF8JTK!&V~pRa6`S0p=E}*RYp;Pbyp)zpwq3N;w}+pjDq7b zfh`oYv(E2Loa@+>-U4`kITsEz=5mU#G zBhw>((g^#>40NXm?_tU|39r&QWY+U4LrH-vJ8dW>ncP&j7NS7(d{AWt6KScpZ}}@^ z{$$T??G8K&45}eZGpBw#+7uJ9%>509@ILnY22tSS?m|gO2z-GMd+EAjc-0=XCimoZ z9a8^exuyFl`^Ni(1Fz@Vy63s6#)s}c5K&X_FTc>y$iHr0_o!xr8IXw-55viF@#>US z(&i~bpCHU&2w(CvL;y$RxoBdYvUZOW%rBXBtLJKxMKLA|n@rH%EW z9n_}Cpx7v_7Es+zRV`pgRfrAIw4?Azqcm#c>C0JR0aBmQ(PJ>Pn4#w0%p#U}kO*Jz9!WDrc(t}85aBUe_pw25pUu(3^gcL-DbaTT(92^wRFWQ>c zy4=5&2z?#C5cVum(;9ePPYJmWnX%Be~r#@;-<>TUEVK7Xv7z_v9Xv+Rpgb zY0t7q1I>(rPkIy|?#-Wd{eeWcUhj{} z9Y?~*Dok6^;J86IO^v54-=IRnA2&U@lmj(>Ul%*C!yNptyFoA8UNzFv;b!EN|6GiT zTX2gg@HFOC-yA!BUu!*|tYx?9cI8UV5GAgEC^{CN>S|YAz-}3bh(h(o(>5bxi%Mo) zl@ukR{;3$jbBD4Kk?`ql5rWhCm4EYBB(5-6axe@z7TDl-nXyEmW`%DwQy=0@65&d6 zE6Y=?XXIlw{Tn`ML6hVhQ51z?;i7M`uYPgp4&c#?(lmW09??z60gLUrO+!SF*VmL6ZBCBBYt|`Ut+#p{*aNDNMs>G=hyew1Q?nhS!ETCt*L3&BlDNvBE znHY=rxwK+~=~N{$77cc@{VN4kIP+K}c^prm`wNp9HIJ+bbyt&WF$aDyp+`s7ZJ-6;+V-!w$Vtz`vuIA1+;d}na0LD^1n#& zKPYnKw)@49`fcd35oH=GQI80$bIA3Rwt2XtrFX z?Xe3SQH6gx3AC7+foyDw0bbUKxb$!Jhmz)zjYk-qIart43d!+br<&;sihU~3=5lzA zW{#wL=c5<%BSpcY)ZO!F65%I~OKL&kpB&UmC{E>S#KS&8ZCgAMi%<_}C}f`&xXsW+ zK^U}F>4tYN>O)})2>zGc(HTw6$uuoLE}(u)W@K*kD2@vXG6@(FMmWiY=bVvo<*cPk za+l<#vJ)>fH3%m$4V~QHU#sHK+&=GTe+hja=Ma5wgH~$vH@u-NAy0DNYVmJGN3Klc z)C(R`wA~=Fd1A(?6|4+j+NgIK#vYxZCJ-}$dJ`}SyawhPR`QwoVu7*$_2J?>3)yjf z5N@wn(sOk7Pe%k^-RFp^sulr>nwc;Uu@Zmo?R_Q>;3$E^>qbRkMr?aVNlp_Z_RTFI zJStsU!bq|YvRXE{gHYRC~G373l1Ytpu`a&?S+gc>R)}35N*=1tryVAbI z-lN?m^h@~T{mgS?sTwy%5}kA+8l5>n?BMgR>Fii7HHY`?2%3;`vzaRffs;mX%-$K!EWJQ@ZHU7hw;~l@7Y{be7m7YLrI;k~D z<|*w6>V2|xSHt+VBK`Q z%0MbTOew@$S>YqfV5VvrnfvF-1_&1#WeBOSAtZ?}GRufQC|UkZ0oMhcRQ~3tp7EN; ztYI`#2}o+bb&8=}O4jTiF2p+^?E!@FLrCygzbdacYKuybjvGjY4~%RobTg6-UKhcN zPWDd&_Om}*u$ZX8Su8siRmgEJ8%UO-JNj6GeM>$g_=;l&#g)u1r;wxd4vJ}5W`}C| zm8>sQXt3P-7U-l&RbYW|9dvprS;!{@i(Y4XzEkPa0;GByVc1M&1Sb?3VHf8T$wcY2 zO~fOu)(+1mDA!-1%4r>(b>IO!*PKPV@TR;8lc(rn^}=<02mgyK@{rw!2(O2qOwkCE&?3mZ^Z4T{;%C*X-WQcLlCf! z0`zh@Avi9aEaR(95hE+K-*H=VBnyM?8HF@0WXG^Y^2$q-T2V#YSqL112)BPhS4tx5 zC3LvrL(ZWGMx5;Z8!F41u>KO$I-ai=L{?vt_MW363^ex7+Ai7f5)FrFxY~_-Dyc+7 zdfj1|f!ed+LuE(&?W&+FP3aR>ovhLVUqEV4GOZXYNyN(uELh!^FgA*R*iyqsAKfc7 zTDSs2k%t(g=qQ5-!e!8x>y`rr*X@i!Skj>Z3ixoP?;@f@$|Vj(-0JRCgn0la<-DIq z7u!23wB%Z@0E%F#niB&uSl3u7Hxf%H%~u>PTfaa91CwQ~QE%v+?X!G4Mf80?7Qrp| zN{h-=D1{}^y_wXu@F1lV60kok#KL|nT95VnJRtnMh4j#e4@m_nKjnm@E1!y>m72kk zyI_MAqnm9!@#&%5v(1WcWp!ct7vyFpU67NM^GjD%c-SWlQ6Zv3 zLDpJ;Vc|e?O^1?rvmo#MNIe?c6z49D&&M>+9)>pb4bAnv(7r$?7iUWdSq7qux%*jZ zNX7=7W5rB%bZA#|3gb5I@7o@rUbkM$C84#cQUwlqA<;hNt#%rv|KBPuIrS+1pP;>x zkNOXvA}|NFVjD02%^-Yl&%t;sO}C#EA4t7LG0k4pj`ww~6!9QG^ zuZ5Mhu5X5IYcVNj@;qExl0`btg)m;fu;mg|p`>g7(EcaF3e)I_{UP)90EfOR4r`e> z7K2y`q-qWJO&2kD@ma2&=p(Mbmi5J#MhYM%Kb>!v8#kY?Js*$SJulxK0-ZiQr)o2j zl{xXq8*kg#=!faTi(^uEyu*66Ttm0Nqof*{X_|swJ=j4gUFD#V^M^!SrGC!|T`fBcX zPq>EPj3vMp1d<&eiP0c@4d0icxc%DMfTqnxM!cOOrp7jQ0>^kJ2yd``RjT1%K+vh? zolzohj*TZb8HEKFx)s$MbpbeXcHrvH#7T>Gz_ekF9o%3XI4`L-qStVTb?G*{;X0xH z!KEx0Zl|VsK99^-cM{i zv($v77?$-BMkgW<#pM_4c+$yFxxd{fY8+ZVjQqt)`q}c!5NZcz=f!fUUZN~j*-#58C*+!BWJ?%|3CV? zZX!5*u)b(+OM5Up?_ZC8a2a`VP{`a=wodeG4?aaI!EO6L9Y^8n3p#IX^3-liStYOH z3S4(G@P{O~a~(u8-oSLEb$Uj0Znde>a)&AuL(g~tS!L#lOL<+0piu(w<{NR76o5A= zQ~dxFuK0)sn7dbkoL#xLk>Z2Yl%HP4csgQ8Qn{q1suH}prvE<+0JSiRbZIk+B)1EX z&`S`HzKLudWgMi8tKHVTPArbnjYY5aRTiY&Dx{vtcplvFc8uJKPN=A|P0~uiy24`}6MTX(59fm`4-Mq>n6kDvRg)(9!dD zW$)MFo&9j{=@%(xD@qsz2V`8l;+M+F8&xTl8^3MB?s(5gcf zuTrxS+{{5Z^JwPq8>hw3OA^>Yj0%`;if*$^dv+BTtKbZ5Dl1@Ks0y>^dTjsFp0XNH zZD#v5Y9@<5H!BT+m)a;bm?^8~UQDI^qN@yg^e^d@R+C*-&)AelUuBHc3?Q8W^3{9} zu3+1QINupj+=x*C5%;!^0Uo^uXxMJwCFa(6r;ZfA^Ndgxh?f*Oh%Y14-Y+M?xk&Ky!La;*j*G z0ASF*dFLuYZLQ8mSW2)HJwlNo;N{AE)ad(N5O)1c%ilG2r9X(HIVzEpT9X=&H{%$r zI|8b{Y(5~a_+ZNMKd)+MQ0j=g`$5J~FuKJ@GSxj0CgmMBE&lkuqZ;>m@X!&;eH<>x zBmC32BlINKYU?t~6*L~{cl*BFbRvpMk%;HmN+L}~mZkZI0-AfR=QN?X8wyzHCw5UB z7(dNq?~c2 zC8UoKH^xPh3lb^}F%9S$iA@OzF)FMdIC~VIq`TmE?2zkmNIulm%G$zPyc1XvvJD1~_2IRqB;6{A?f_0iu)tcFe z-+?F`;gL{D!0UwNr*}LXnZvQ|WB)n4FX5wjn}mW<+ivEW-qCKrY(mZnu!#TyohGT~ z4A}~#Q5Q*x81ZGiF1u#r=>BoSSF+KTjcYFV4bzyMC5g2wKx9?zj(LuT&Zys+qAPm= z+uAf)RaT0-LtKJFY8yYE-JfoF)Le&M$Erfl5z|%#G6O|tVbWT)mgsDFWM9mpj%Wl& znl##7N|szK>t(JBWhG;K^^kZ+HtLcf3?U$twE_aymJ!NCz}6_RfYj)~6?4Bv&^SdF zJ>-2_3tibC3{TDw=#}w@pl>*_b!ip^4CoD~%p#E6({+@X&BpRuQF-&QhSBV7)0LJDPPBkLQlC^=0}4h$Da0b0OguRSJ>cMT;+O4UXRrgxcv)wsT*7$;a$!+vNsD(u!jj2Nd7vqG74Mm5=U`W1%^xpZuCwu~^@>jV9tYIf(_6N9%k-6h+)!XF)- z9OmuXJFvU9Ms;6$x$-mQB_@+>Yry7Ozxap7KQ{~xlL_Rg;5#BCsiNdUnW_ZzzJ_3& zjIWK}{w)_pHO$~8I4Tr+Jog%!)O1Of1p=eN6k!wov8cab+2Zrc7;@uTG5)-M-|HNH zSKdf;jegUV!*PcK*qtA99dFdF%D>tT6?rLB3FRua+&gRdeIC{b{hqJw?~AhOQu-cO zk4LyvKz@<)nwz>@llc4B^|RX8@KnzGhbX#rZ>>iH{3t|yU4?W4PFqNdvG&WW!U*xo zMOHmy{Q)k)pwYoeA@PbNy9-pTQbv{(xJ{yKlR!8?@$q%H&T?WZE>aFTzE|s1NrS}; zoKYH#SLZ!`fIlTmOER4xYK$kC*gPem_ahh#pf%=5!)0!y6rIwvlWoNsE8;-#0Xs_Z zAq0y|31DlH>0<1P#ud)H=2#l?y~eCaNZgh+p>xWA#h;WFZ7HtWg6d!nD%$ZRFMjc6 zdZD{pPLSO4oGt{uY4`fEM_c#VXtVu%w|$L{@0%#uS&OnXF0Y!Q8_}s3o3rVy*7KTS z4z`Y>WixKfT~hs?hFF7y!mgSo_M=*5*uk18=kDuuUKix#G_JdW3M_5csX5`||Ib*o zbS2nh-qZe0VA`Vq`40C38}hjX@_`uFqeafQ^j~&ay%V(Mqo_@;eAoH!AfM88g6Gbm zkGjK?*|jd_FUDwR++o|z?b^CvGdsM13tS)piV$@JL$~9m(Mn4E<_{t<)L2!WlCF-5 zomyGE3+j00jzQ9;?JJS`vU)eLTw^d-rsZ~Ngs#dCv93LlX8fQLdZM0|dX3%lNjd6@ zGB7Q6*@x2dJykF{(hgiy=aGti1@zpjHh48M1D$7gW>0JUYD1YyEq3E)iZfqxS$Cqz zQq8JMPx+cKsmn_Yk3`9TifRYzz&0#1ZAX`-?qZS3ebu@@+Uo!*rbP^%s-4xiI1l5q zsj@)tQ;J_$db{b^nidhs^j3FUBQ`PkR5?D#w{{twEg|diB$s-TB{x5bu_2zVodUFJQ{F4&E>P zzG`xR&vm~|*?&*Y zS`OcEOofEzX|qJ)omF6^k<=ECP9$+*IAX8$acD&;lMWtrqdfgif>M9f9w(~R&Kd=Y*R}?arLzP!azLo?hY^Z`=s0b95V_w|%Y_{4G<5f%8D=Z9J%I3XrzytLa+fQH96Aqi9Mu6!J7wqh*Nm_cIeI8q6Wv@LZ?MpWV zX{>fn>*GCpP((-$SlBuuRX<*1fUYc}x}vfvPacF<%0?z&Be5_@6yo3%EDB(!soS4I zh7;g<05HxzTVHwh=!k@@e%80mh+y5AY2HZ{JJa!EQ<-?tTZ)qC+jexx^zS&912PGz z6!nD!cdG>zcxpSk(wLj#fK2*2#dQ4C#d@LJWhw??ai1*G z$Lv382TediX^Li>v{IwBzKj9Rf&lFY^ZS{A$WS)CIRl*%|NMXMOuYCnWKKJ6_g`eB zzKZ`nRQ^jp?l#09@s_{t^k8smDnZ=O^5mZM^c==tXk9+9T)-ca!E!a)Xq`mN;4s}H zBS&T-0E)W(KBB`tGRo!=LBpJ!qf>^Yi^qo2_(4laedF?j>|k%`PgG&$zK^H$10#ov zhPg6<$V$UhZ)W9aC3*27Eg`*=+fC}v4h&cYu~VdhxY?D$F@>Ozp#UXLFxes{hVUP_ zSt%QtA{ixQBVc%4U=E}l#cVyri1lxGD7$yPDHhs#A1H-7B# z`gbU@3;GwtkI^&tKX4>9nMj zcSJs|cYmzAD^(tCzf9R?J{l2}YGX&4_g1y~X(CHi-wRQ}(+#1Tt!i6|8O!L2zy*@U zdI?|4g&JnW!QRg`QN2!sz1Es#{N*KVa7w1nAwBTLln5jWjCN{7Ef)edYd3AN2q7rc zu^1;(I4b?&xYm3ivG*i{kiG>5DD*GrIaX&cOKlhA zpb#JOs2&rH2j4KWv!Q0j3fHIQFem`E0#5tW4j5z;(~dD3qHalCL?R~CZ(1y+G=m)T zoY`*N3kx0o;Rq~dAEP<4`Yff!S2 z69O|?fNj3nayKdSxo3)HSMMd9iS)R#Wi}fJk8;G!lt+BI`>C5L3-uxZzu(Ak#zu`2 zZFO)WL9Bc*CwbvFrlAJs8g+6e57JcDyXCA~I~zTO$5^VqjTxD}?%JiFNQ@B@tILpG zG%yx&8Q*clRmP0{zKW~d>K2vx??5$%_~|k45Xes$butRp^}B&grcW6e#r`vWu2Hde zLChKWpA1^1$<{);q}-DAW{YWPE6#wU=%3S6;WvktipSTX>$*6E7@Tj^+CJlZ!7OeCU68}NQ{Ef%4-F(4vEMJD_s96K{U6XX=FlDqQEgTUX#M7qQ zV@yjfw)Ygz0-8)^s@RKbc&7$|QfAMS|2E6Vr>>m|WfBLIk7pXEv($^}r}Q%%kI#Wy zsORWx?}C^u#!AztpV0QV9=kgzJG zirZG{t$r32_s&ZpvbjY>x=}M6j!@q*6v1Oiqr!h5=bLH~PDdPyP4jsJbl2Ey!BN&2 zZv!MBNa%#ZRYe_5s*>d?_V&*4S@38E@eD4CyUm|$k+PVwUQZ-M)$=YN>3}iS}r04Ve-AJ z{^fpO3cF*pKroNJ!}@KH*L(Z7eCJ-*cV}?xRa5cy%W|rG!gt^HJ^G$}!}jyYR`)P9 z4M*W?&j4GRZKjec*r)W1I4?b2r!=&FW)ppw#}u{}D2)s!(O6;FUX@vjqv?pz5rsWX zsRB})`a0u$O#9m8K2jbl;A}M8l~VE!YqDN#F$8hUx9=Csh9`1c0;^-EaP9cJdNgvY zjrnl}UB+dlttK9&W9{a_b5dlarhbJFvLhwg;PoeLlbI)hvQGp)qn{9X+(8$jexIma zwt6ImZ~FoNJR7EJ(^_8ayOrVYZum}a$P_SMJbPTx(2)JC#+k{q*Fs{&lEht~Nzy(| z%?O^M~AJ633;%d9!R9txUE~G;jv@B22pxc`PXhnWrdWh zq1}4=wzEfF-Oan&(jYQV3byl!nh<1gw~on5#u@{Ct}2gy@7KTf>R<2EFBQ7L%f-0f zW*0MG7e2A;~fF3^6^5 zT5BlHtGUn?49SYqt#i5B0a11*&XgfyyWwPHR4}p5*$X8Qm?!&+opn5u8t9&51Klz8 zR*?=iyp0?A`u@lFa(vNqFSeFbukhpIe?^p3B)k0s5~ofq?vDHE^PCS37d*#qSImv@ z$_X0Vj6k7Sw&PR**obBMDoVJybzV?W$s!R4 zW5J-Fo0o{Q8U<_@a4~h-;r49ujCc#Yi4ft8YPgg-*XG7plb z)=C;ugX&Wv;INBk^VgKITh7pX%Tkv@GA-!UJr z#EN9C_*hW9M4H0hgBFlev%JdDBvbQ>Plw&6m3>FB+)e1itr&CaQ7g|gF8R?aoUq8dN>|fyd@)7lD0}cA(*2Yzc43m6442(7l zxu6*R{GYFaY5sq5{29$Uou5Q{Lc(dxH56W0RzbnH*3OL#ZbD0L5=RRX2*NLT9dCy( z&AtW`93vcEpV4;Ht8q`-s0n2a+-LRTxk3AT)EdF*oM;r(G-xbbH=@##wQg&Kps0?!fzPp} zRDOD=#6Y8wfN5KOO=fVkZ4`AFw$*$`qm#3{!AzK~A)TH+vGB=A77#|4f|NfR1`vOH ztg}waqgw_uBPxcba+@0PXisvZ^-XnZ7{lz0n1VJHotX@(Bnj#UywMi8+rqTIR z!iH&0;OI)$&Pt-Kk`#C(`N|!Wa|aQJSQZN{^VMpt&;sM(^D_3_GRR@3;3)XOH!%Fm zwRCjnn5n?Dg@GUd_>`h3dJvWy=BjiFlleW~CG3dtsM)`4s@fCIOw)FY4cu(=0sb#~ zAg(e?k*%q!3T|2`aOZi{H?JN;)k4tOERW@O$pq2=ZH8OLciWP9tM_~${9o5x@9-FlZwD&l)d%h=ThDunV4s?B}2YWKN|kELb@;Z`dnO zreH*NFe;p7%nuLhaTC;o*jIER7j%A6h(lDFAhwln>0k)iNc)n_&5d#5swJ3v9#0z3 z8y9io)C6IRrOA)N3I5&h8Qepv%S@3Bt^FgB+pqC3a z5a;kgM_sJjBSIE`aRN<#0CcB$z8f}@zt1%t1HgVy0}*jC{RxKv4k)Hu_TP~5P_JNC zio$-;$<*?JDr1FxHly}B7#G+|l5TX4(h~T8{>nxOr>{q($w&_?a3F;8gN}_;%!whC zBwJ1~XVtql%-DV_c5ejgx@RBm*ZJOeqZGam;=T{F;y;bMMmv(jCrxMDrX!~&7p=?M zZMC3E6~^9#Nn#^0vq+q2sPE*BQP{zZ{qo^a<|TP%VqqVYb(0})ni6-t63em>DaofF zvZ4$5KE@pmFH7Y%>KccZin@a4ARN_qoDmUpv8rub+#P*+D^;zjSNu?*>U=pm&}Q zXsMxGJd@;jUUk=CW=&90xmL{m8yrIFNI#mu((@%c(cr5FSn3{_MweN{e<<**dWt2- zxZDDB1H-xStzyAcOxmk2da}Sg$ z#g=iJ^kX!caZ%UEF|tWz7J|Fp+q|!|X6|Eo$z9>`!mC8kTR_`~{^=#(XCf$!LplRbQf9@GI@re0aKY5T%a^vWjK+wMJ&C1H zgfV;kwQI2k7Trdq@njx^#*xW?&+3RSo?@**B?0f!n*styfhEvxfQW%U-$)t25`~fE zZ`*0zFDW*TR9?DjtN>bg4q5Dz0Kw=h)8qT_iipIJJUNv8$VbzMFs~56oR%0{_}oL9 zOkdm8NWYH#r^wG=6in3U!auT7mP`vjYcv%~g_FnGSo69M1WR{K-eWYjA1t15SS)?z zGz_TEa)}%)9+DZDko-PV7JAKtBZU?guqleNs*yOPRG)YdZ`cZpm?nyGJMmvb;(zqc z@$3KED^j-iDf<7TacpoiKJ#vs4Zbf~E80FEHT~rfPkSyt-9UKKEprX9dyW~$t!KN+ zS{z{S)9UQM(lN7>g6r|*>8P;n3k!bQxMw>Y3(&83_)&@=Y(OBc_!q?NosEJK&l=`* zf>$!^xSW|AOY&~L2a?UrU^ZJujdq2=JsCAsoD4&jw}*Mtsxicc?~!fmpVX539^KhO z%cMl-!7>5%_-z z8C|z(;kNKdjkEf2Cvix?hv5ULVo7TwEzd%R`>_0V31nb)h=ps%$uhOy4&hpl7)JB2(QtWBewL4ZLq`qWdsaMe7739y`n^ z3UgY@`|0%qk!yw>JJcF?W-Vnr@W-IJVVv8!dxG zNTglQ84m#IxUgm-#4xTBqBz~DFvKbw9NMmaVBO<9oz^B-1$xxQH>MYtzg9SoplSD`FRSTG*IfgTSSdbx!1KvVr zk?vAbI*Id2K=qGeaqvrS5tStxNdSH5~h!(~W{$Vw(!m(!t;-|XP_ zW~-3xjn0eeA3*~n8q>trMcxlTyR+|C7)h#1#bA#V1=CZ^G{7%4_TMis>?7+aUtqS; zT@4ND_*(Mde2{)qt*swpbu8Ngu zaWk%oqX!+h5pfcO1m?|;I^ARVq%FjS5A**(ChJDvbOVNqa}hq3Su@T8g8iIn*P~rd*QQAlnK>X2_KT($Us$wv; zU5l*tqr}hYZz);5`rWJ*ko(0v@SYq`I4UMwBN_U$Bu(zi_5)>SxVPZO`?|NGy6iE!F+C%L3?ASM9(&o8b;;Er-DT7ovKRlfYsCZ`r6 z*OeQNZxF~zSFP%}!yi~pM4(0ibITrtp#f%kVoW3`NuX#>{X;Rcm&OlO)o3=`eL&FJ zR-%dkY#nJht|M|r7XqmG{$;7Qas@Wy;3I)kkgh3+IaH}suzPNE8v780aHDb!QJO~P z0EJKanx*)b3Y=U>B7jIQ@?L@~Z!;Do2qrbxN2@OZBos@qA|6O<#wPDErp|X0pS^fA zCs@st3(J|nDZ*kpEtHg5wFMPo1@{>rPC#p=%*q0%*X8a~j^Gd&RlD?&CAE-2Cvh8? z;HOk->+aa*eZ2hMX;*=S3%?ndNcIJ-6cWu*xzRMBHdUXO-XA+|pHPh@M$o66l61M} zM#9;Fj4Qu*0iyn4R#cQjGINgIQlR|xBrK;kpx*1l&}SFfKlRcEWE1*K4TJc=(*TZ+C{YRLf6q znip&R0qJ1ZDbW7VjQhaw>af$~n&?X9BlWxbQ!oFFqnx}fA%g>EFA8i*S5lOVaCXDV zVTTS7DcnA#+*%+h>3H;ako%{A8BIY=xJ8kEODR8`zV;L?HNu`2tQk*gfBuM}xO52q zNu7#_++TT&4x+~{6?k4PPFAhDM0YL!XAC#`MhGPGbg=IT@siX!`aQcEWY7R(#*$ZE zmXu+6s{%!`=syE+@R9s;aKbVF=S80DkF9gpPr2>;EdmE!=6{kh>8ybQ-OP;~l|5|p zIN=~I_U#~k*bkfT`^%J78mv@0ykZ8TqzwRzG+wTOUtv}jm}!)U6)H`)iGZ!*k#C9f zReo*QH>HGOkr{euC4VYDn8*OVu_16p26z>apFeCauwklKt##Hx>(<#Y zBUy_x-VgwT{eX5A3(Y6IdQ%vQ80kVL^5S)w3xT=Mdg0X~ox=VM+8B~Pb zr${KRT7r><{V!~Kn2oI?sI#20iLzX3D4}-`SR>t9cYUl4HUgz-th}W4vSKP5X$g0u zV+00Y{?=N|BbS#m_ssb|*1~+kCp;A2jh+Ma)plyP5tZD(Zy$#VbT-Fa*Tfb7#{cdN zEchpga$iMB85EcsDolHO+mp|@e|4sP{;PwMHhdWuHHehX3XhSprH7W`{7@l_+fJi~(GvvmBmW*&EJiH)!oS)`wL*Ii% zpmSjXi=VifijGw}yNb=`cb7*!2qla9d19bIZ@4KK$289h9r$!0saYQ^2;0~^Le*!- z(f|mJ!$$SNgde&@sNsD`&WU)(;XA(@Nent*{T*$yOKFKIsUSNOl6ZMi8c~Oj%BYKh zr7q@MqNXsBhcLx#CHs^>TT%epBh)uypF2QUaoCz_;@5P9owqXxqXu!%ll#4>%>xf#Ynpnx3YrE?SopqHa{# zYz3XHDb^UeLh}iU856aFbeTly(#_0f8+W-fL+Zpb1U3P>%YGk$&mUwoTD6!)kZnw( zVnQ1UOQhT-p78jN*^oVB@Pq&bRR^{cK$Xl=UoCnonlHYZwCZeyc(FG_9KyXb8Va-O zq2E9ctlMRW9LYKL?KJx1nnapxH<;u80t-_U<)RLXT|P|u0u9#1oi~T=P@%QVhlQPV zqdNisZ|s?2paA$;in3DUvU(R_C?dq+s8Ngx(%FmNU z5_!@RXh(@D$`XQB(M;&j0-n!*t;c3k^fNq#GyS4R4aVFOnN6pEv_$5NdmNjH8 z3dCE!@6Pl;;-pWD;O^wOl3g|#7TO<6e+e<+w>m^m9cE_}D5AYYTi>h06=uG(07T-m?s$)rkX%)qo%P)fa5RP$ubQQLs@+!t#TJ-nsR{|9?td>ZP-e=m-EKUe1cyTR~xFh@K|=|8ugr=nG^8#+qHClzg}IxW|5S<8m^H-X*m z2Kwt=0-l>qgK^@(qHAvr(|nk;7BSbDGJ14?a`T3-fxcQ-oPBpf$J^h@8B`af{tblw zJ&%ziOl7Ex{Yv!nKZjM974YF$g5l>a?YG8^v(cWKq(^ecVwe_9OfP!@gD*%UMrk3b zsfu(>X{e^}>J;575LoymN*WKPwOkdS>9XV(;piMyF8u;zJEr>nlPi~$8F(ZtJOptI zUD`zjeAV)e5R(POp$;MfVokTGT`77|dn!j60w4wr6K$y~lsFz zO+4u+UHHnoeijeW3_E!kRs1Exnk!fBY+=-Ol|WbLaFJ%v zue}UQ5T-Zx3hZH9l!Lm?PqI^C|@xh*rZ&-^<Abybk$?3ZP{CYp-`5K!pD)k?ATssh!*ug$PwosEqj#c&G zofY?lK#TN~jvn{ONyc@p1EKj@j{8j7ql$m&&Ix$60q6N+;@?`lq!bdr7pZ2R7UC7P z`g!o{OdOx1Yco(u^}lLo|gE#1tTd>*Nv< z+~6sxhEAA-H6FHX-I;jW9#s?}%9Ll|Tpm`ULa=&2&{t*&Ij6cMmgWID%Nvc0?T|=E z+UklT^5h|F4QI&q$a+qM;N_P42*(Ki5`!+SUUk0brqb>}1J+JpCh_Xww*$ZLVC$QG(Gy6>~vfw#hS!PKTaru%_z=d;4< zE3+U3fb&_-5uRL{^BWCwH|=awtG!gY z*FkCgJ1es^?f!5fVcC(_0FBAhnNW=0ycZu?iJoY?>kx^~gi~)OsyOQC^tk?bUg_FH zGuLtY?Em%Nf9?DA=K6a%$Lnsi`vvQ3&*n?Z_uMus=LFkxi}SRd`MUFMt0c!;^Wapt zNu2K7rQH$D#M1i(zp{Y1u6aR;x)Bu%GuTvSZlX(2B_Amdep5A4)oW@~>35fDe5xCEE4 zM$E=+j>WX86=f~}7YCB%^jMjFv4I7$2_sq=6MPgQ_SLgr*K{T1WQ@NFqfwg7bq@O2 zw%`-aYSg&j(i2J+1=IZ|Q<)xJydn7!fvev;gNpa3=$4AcXT>C+(#R@gNc|#Sdm`If zVE4ArRY%|zTADgxwaA2MKeQv|>GbQNH)A_H!B|kJH69S8__!+i1C#zI*)C;ni7bhY zSeA}u3tb6hI*W=5Jsu_m*kpk11kG4^ku-JGFy1&ZN)}#!U8N9qbW0e*>I;W$*X^0y zXNFwYN4L+-)lV?1`?!T^ob>8N`(k-_MFpX*Q?m;$e5Y_Q%0i>0C7FRx5m}HBwNb3k z0f|}ivIrwV^|QN=4qo3XVqX9J@q`vfKWlQZ{)IY2ZeG|CoM*PMCHAL+J%JsR2QK;x zNMiipDWhAQ(t8opJhyfWZK2|OPB4zXIvI;-&j8IZ^d8!d$Jmt2Jf3T#dOha>D+!Vj z8~Qh+GaS~s6eR2g$~&+T57bRu%iZ-mX0>m?&V+$xC$G0`jiI98ITqD!U7%?vRv?s8 zDFmrbW$@QXVl&?Tec-4=oL!?4Ct@NPPb}lOqrj*&p%`dwmcU+&eTGg!9^z{=~-3{6Ji3wAptF-@>FTY^`EB+rDFTbU4<$wLZ=*uZcGk2%>UQ^k#WQ#$!hf}v@ z*NsBvZ4Rr+Q_uf3%W>!?u$R@hEF6fI8n*Z<5@iR>UZF0&>=5%4V0=HLIXG zY#7#?;2F$u8EawdEeZeb5IAY`B*j5B*imCz&U~RWgKA4|NLAph*3q0bXHre?e2y2S zpVjB+B9UhYWjP++*FhJQyJe+Ty)MlG2)PXE7IhPFaVt?3&=*erJIY$CE{cl`(nyR( z&NJh#i0y3T7Z{_CwsRE#VD_4cW&G=k5m)8Tg>@9A2me+V56~)w<%+Po6-)tuWC z28B1=Bw`w;-J>C4KD*le_w*~Kjpt{KHTt2D#%fJ%k&ZX{s7=_UZgy=4@B7Q} zFCX)l*7oMuV{p7zA``WVIG?mcd;mKs%if>0v3levmx?P$M3i-}#vk6jBhDL426CLo zvXH;mfgSj})9_dRsg_a3l!!>G0WsTyTF06p#rp{W{(DNHJCO#(6PTZf?Xp0nOznZA z^3w8qOg3k0Pj`DRy)3kPLm+9QMLW}>wC&%*)mRj1mg5-~zjY!6+p}?x<{-{6M_Au{ zTi)B@ROqHCI$o_UPnrx;(}+kBSW+a%rb)8fwS=)g3Py=hM8b0;R_&p~2LyIeMtyz> zY*NsiUCU<@Au_pFi}xZ=R1G%|kN^lYgX>%VU7ySZitS z%p4*Y3=)_Y6vr#P=M&Wir!;GO-%ltb-;(`4&NzsqHIQFFLwcqChu)TPuC6%R+{Z}= zN(5H9Yiav=;QH(Hf@hvCJ(a=|l)XU$$1F#+vRn76iWlUC<1$l_d2~l4hL+YN(PwM7 zvZQ@FFzS--;BnxEUM?z(=!1-Q+4onM`!RylVk2nr{5_WO`E3r%Ke^FP>^eTfc1?nJ z2sI&V;)}Bj&@`*(So~}WL0&>J(W&rMPP8y&Y0Hw`Xs+2ze5r95LgiPn{X}z;2vbu@ z|J1oKp~PTD!)84@V)!Qatu3!bP2x__0Qy@0PT>1!k$T<}4o!yL>%OB|4mrp%-eGO` zmxD+V^2}=1su`^}HWfLPAm#k2E+B|Rf&kl?@9UE)`y?u)32zH#TLE!Z1Mujq;~Nc( z`dc~ECfmXI6I@akXBQ;Us_kp?;Psgq-Td8ek>5v0_< z=*c^=9~Zom9dBYZ9Gmb$TLr4498D{zCamRPkik7L#<;GCUf)h2sV}^ts2?U-7*+A= za?&kdzXX^CxaC1p@urTyb*;km&loQ4w8 zwLAN>2|eAt_u=l{rHego2cQ+#VELmoZAG>Wf+9iwY1cgZdyLIF?s3cUsaTee9of$Y&O*2F7017o8qLQ7fgoXD2#Qnxp3^H&%) zK*uCoTlg^TjHaeTTei3^dV9S8T^24VsG94I5jSEMG<>KFLT;!ChRc%KcnT-ISqzk| z4=oe$uiSoxyA6gj`V^e7g0F8AO2{79?55Ly#SM3#z3aL2+It^&`(=kc=HBFuup9ld zef#6e-Z(A?di7F6UZbwVIc;?srp8ctkh{P+xes2Sv9{%rGUFqE3rdzkSiq3!X#6MC z3R@yMncsBUpwW>A@uysOlFirFp>J*t;q25>Jlell&UFMO0g^wnQrOyn<11e zD2Au6`rcc>WK~Hy?%vx9yM_b9j^GAGhPMWVJho-+YrkHl$@DJ$f?N`IL*aLI9qX;fz4L|es zP6=ZUpR#egblLjsd=2CFKM~=6EorgaBh1=d1&BQWdR!GAwWI8Qs0=F5_*5dx9$tgN z7^gAAh(`JNLI57NZP(|SoX>MYSBlGWEZe$BSH@RI-L<-iaQ8DBtYd2_JfjmYlp`i5 zHq0o@dF7TPyM3p}qwUuKOx~LO9v1T{)JM3nk%$Ca0=r++z&Y}PCa$G1WH9t!?-SG$ zHdGC2?rBs~n}LhJ6!d}_?VLa;+EgEQUFt?1znMOsT3nKB8Jbm*Hm=GBgB*!h*ddT( zy*Q0|Vo6gz{U-YGvd~P{Tp0oRe@k8iPaY(9!Mts`8Jeg@#)Ay&$*7hDpgUFea%u9_ z7N{p#LVVqK#86myMx)Y;;AZ{##eraXLr=H*dTaPd9V?u{~KKPe@$0 zm^e@)KE}d+L?psNE`F0668(9Kk{xUyh6`H~9ztS~=4(rkF78o65^ScCl+Z&o3f3Oc zbQ3lT88&so-Y|7XP&fd>R=Lm6OJhsjEvI#4Jftn-gJ)u=~ZR~e;ZBw03RA6v`>;@C2a=&W(b#yn3!>pW8Emt(#m7;Z_5nj zB}#&N2akN%sQrb`Uq&R5U^CkylA&uWi3to7)KxU0DUM}Q#zrULkpaOZ@{;q4v~Ro472&@VRMhy289a4^r)L2`k$hYQ}>F#OVTMN zpgog{fR8}V*H2?cF7%B-(DHn?AxBJihRYA%n}73Pw;yU9d|6)J5g^3=!%}_M8oj;L z{Zjt_)U;uFQ*fh8eaD|W8m!fofhT`#$UypxyIc6cxS<|adXgjp4A9|KTt{fxhr$0Q@H*#R=$3i&+H!T9Xr2KJF zl~g89`LE8>Xtlv9O>J&N29e{(rUkD#C7g&W%GY}YkP`5DbV&12iGuO4Dr)7z&ZU_G z^UT%GE6g~(p26Me^6o)!Dr~}XL&<6G@8v=jtu@GfQ1S8Bn&!Bujzp`D z*YI9(>Aw^RC5p;`fC{TbzsfT6&`PR3Z&kRq_7Sdw~a%8jb=2R9& zvla7&_x^^R{jW|AEwrA?m5mioj#gKFF*gz|Lf6!Oxc&|IbdjMAVj-0cxKINChf*?WtxGHSRU^qtYw8MFWERRH6ez`X>X657Zv=xIezk|m z?D(&T{v6DY`JAb}%N;!ZCS>&STF8cY z{+5AFt=o0K3pAdV@Xjfl%c%hd$a|j@GKry_ybjaDM@H?n24~s7DdW}RcNvJZ5^U77 z5qq)AiHxd91ym?WFv5u*tXnPM$#>D1GWjXpW^rd0AwxVNbO+q@Q;^806MveOnoc4D zP9E2rxY#Ay=Y68MDUV@M(IBc9_IW%QyULHD-^2us(sGU{#3agLsdd7N*%KYZ=Kf@( zZS*WHs$$I(3lLgZ6x8rgY6HkfbFq<1u&WWBY?TcT$7tr)UmtRM-oB3Vcc&6U5J`Nj zTIf&l&6!GxZQLrqDJ#NvW^#?7KUk5=`1ks*za|K^*acYGtzcVvVAunIZ!wUbU`IVe z`uZB9KX1rH;IY5I7u~ht>Td71-JIHZkU7ecd`r#CB3+(aFey<#zcm)LVD*cNqUevu z^VgqH$6S{|?~s_GL8gP{Qz|w`?f8IMOdja=oq=Tky*;c1w1HsPpMmQ0yK)j_GvN-N zH7}wWzzqF+k5Xk+w}wM*zsWx_D5H?^a?r_`XQX6(Q%5I|2f>03HdP79<=>GeOeRJ@ ztDA%11j|_Tf4nYe*2+@=9g5-#4oZs?0rOP|4Lbno5cZiO*%Ok)4TAZHOU!mA`C>U4 z2toUhJy8(b;DSNHK(0l!!nhx`i2Gj(n(~uJdlE79F)mVPbqDhn7B_QeSGb^whaiZ} zDJ`WGZPmOY3>BBwn+9gh=|HfkevVD6~ve9zve{Z zP;~4cM}`4D1@`Cl%GTQSHxi_Gym}FqSJR9Gz2jOtgxW1UKXbHD7A=in(jHF}Fzo%f z3d~B>wm;%g&BY0n#a$!Jr=YTP5GKL$CRd+JgY38XyBq5+2!-k)6?ja2=qzv$5-_cj zVg2;}&@)B=P+=o$pw15KglNh>a^4;&48WNJ(LhtPk!*Z+HHu?$l)5}dl9n6l;ag7b zZw;DneiRC*XUT<42>-q)VWx?jyqGFD!3Y)R>N7?-s1kFLPTTu0o?7)!=BCyW;lkaJ zicAEL!X0J^<}$3p=S!C5!G(x)09f$ zpVRf(us-n(A5*yW-FDE0Gu5Nw#3PNZW05VtSl2)OtO#3G^&p7>OKHfZv^}j6 z%g%#R#zoP7=j_}o_7udHCWMn*4$u9$BF=`amLR$S(c5zmE-UYFDuKuuPP2fRKXxj*UGqI0CEE15C8 zZZJV2P65KOqChXH;ufoTV`$z1giFvcbgg7#u-t{spSyzNcrj zKl>`C52-T|7(}wIIWzk9GMFTKIz=+*4_VtGvqkFV!94~ZkeAzkeMd4zpzX?EFJ;kk zLzhfvd6m{O=NMOIloIxeN!dyCv@gXSNK)lsf*%1PGebk=P<|LQB)Y+H&m_ct019~DT_~|9FJ|a0xw}M zI`$t2`I(MBS`{*sXNy?xWKGnVG+heM{+hThE;D)z@FS%@Wg6)PXp(1@Mm($e+QXfS zKw3T3S)+Q}s7sF10|Q|r_zlfzk)XBTFc4RYm)`ZamgK)^VY zEsE`($xEgVaMJi$JDg>0>}ed!xsNoZ){e+GsVA#oXYhthqQczOQmG0&X#^~xX~R!! z6doYvZ|&a91>3C@|2-pEFPx^=HA)MD^o1~HLi%xGW^1C@?F1+&G;8f-@r2OpaQJDx zX4rNHr^1XbfV!X%1$(G9io(xG^OrZ5|4NHvnj1tNCB8GiS?xOVT5ukwbLLQf8s5)e>%@ zxuOGfzpISHK=4TwUVt69&K*2wot5`|QsExLQ_ZCtEwHqI!KMgM3f$!;{jDveM~Eyh z4cs__OyutC@3f>!9hYp2gak=O_(M0f*#W`g8gFv?FM=YnHm#%f2k>x&!%5V*t24ib zHU;F`Do?3GxuQ?uw${iS-Njx*EZj?@+|X*CKZ0zliSpHe#1Dy;Sb$v#A}R^LL#%D zSMVW@vh+rZJqL2vXtBmGj)Q`zy)sl(DN#x?-k_O2chM{H9A}cLu91kUN_C(vB6Okn zkA9dTDDc}Syl|d_d~fpu*N_wGgC$GbI4b~oPN}Bm{yv5^Q>1XR(Lc;}s$FmU>+|`V z%4gQu=kNaOjwn}N^U4#Tlp!J&!5UHm*RiL7od|IQ4OAe%_x_UhI-bK8W>{A9Y;eyN zLdPK756Fxvlt@)K7>BYURYt_34}y5?%ivZY8-{2x(OZAhfv`BK@OwV4^MC^+vnYC= zT$#0vTXs$~=4Bqu*Lpn*l_^FLXJrk%zeR7AT!FU3|2RSYKET|^^mFTUx!50(-t&Ax zs+J4J_Qx`q|1W12@f4YwC{NNxA`$`5`<_^Tk?ykHWQhyuLqJQ@EM5TA5arKdM?UOW z5#Sve^A@c|MM!&=W-_w+BLf?4Gt+ua(EXopo%4rY_xiPjhax63NWYNLl3#nd?1iu; zw@`WtNI}hng*L^bKS7|4DvN|=~|9(Q&b&#z7QwF(DRGl0cc{odu`=x(>wutpI zkoxB=pQCgl88zlzn*bm#cel2ay*=Tg#V0#(o#dRgtzHU6EcPd>l6iy%E=bkK%I|k) zJ(MO%_d=!bACl^YggkYbOeFMa&hWhn&AOY{0eHHm$)AqNG5H#bZUucX*$aD11ULvD z95^7{eUl_iB5hte*U(8lFK_IGnU056tvZ5_r?-5U!onpwxU*7Zn&i{P3bG0vKKl@pD){?>Ne7(24>P9`RxLEz-RNe{y zs1bsAPg?)ty8Yd^!7rqvEfM-LZoO9{wBuiBh;t;}IeWp~t|gnBQKpLVnmt*e z{RomMRp_Q3Uh?QJF4-mk4|dp8p3bdqe3P8=R5hO1k&g~qz|AXBH*n|FH{$#!$1nJ+|Hfj#il9w$olizAoR4nP=uED)!(E4suQ$>&Hbl5i2GPL%F?rr zt5(fiSd)BwqT6|jQ67{{KNxa6fTj1H*g{^ttI34*vS6SML>;|W*EV8q*wrLUirp_g zTQir6Yy?N#$9Zz$U=ThwM^UBB%F@kHn+{i&=gRm zi(cOMnZP@81k;-PfB>KXO@ z5Gou3MBAxs0(ABkZakiw;EnriBd`M4>G-Cf>a7^8d~FvygfzyG_7n{YR$zvi5Jw^x zB>*<1x3eN8he+X1)Wr>k#Ti2kAs|YUOPgTRNEcBfK_*knmIVQ5P@qIalPS7(INN)+ z-mQ9SdT4JSzuVQl=La}ybbPJu>F)jEcd>P;cWH3@w(;l6?}yvPwjOvJ`?$^P=i6dp z-{wSA9h0BYmTn1Rn}Q&^$NQty16J|i`kKi>`Nr$3^YOt7Q3Ij(eS9~|#lvbJ3tj|; zhH_<U&IZ5kVECVGlKN=5Z8glm|P-&FSXJjlnMbwUP8UHKnz zdu1YYvdeNafu^06akx)D5tz}NG*y8uOAmMOHR#I_a4$82{fNxL01E*0c|aprb%*|D zPW`0kNpL;LGOUQAR^8kpacVP|hpvw@sw?`izu9E;{p5_8AkkjMo=6t1o}Z34SrUc!)q>80QEp7S8GG=2 zA30vCIti&}s&IYOHCS^v@W>+PY4SYYBNseG3oD2&DJt zvDJMLJbBRWg1`A#Q*FYv26e*`IEibtzeNN2^m*{(zv8H;pS(51kw;#@tp2WJWO<8` z6{i3R@uaBY7#Coy5ur>rwInrum;d`Z_cc(kKlv>CnefSP_p!k1GmhuyFNdG)7gDRY zu73~keqG-&h(^#MN4?sq z2yc3tH(Dd160JT*%j_QSD|FD}1s35qsra~OsK{^@s&!;jA_CHyjuGpvwd4zxab0xr zWbsKsY;WsyUShG9N!sef6LqqrQk6{udH;@janM9Rq_ab%SS}gpi~@V%PZBF=1msCV zyqGdX9ypT|jb|IJ+a7o%&V|3Fv15_Imz}jyWUzl*v#}<4PXf|0EN~Bv4=-wAX=5d5 z@JFi}JLhs&OpftutYe_@*Gbz@xWteUCz%gso9E46%s$xnWlGj;*E<#jySh2uU85hI zJVi!ohPCsao<)QFQ^m!mIQK|Jxd-k89spIZ{*PVoUOcX+W7iR>WY`5`@-87?c|er&V_eSKkR! zd4QN>+~1cF{id*$k#Qb1!qoL6NjMJsN*7$^)%0`P%dIebwKhKe+51!{of=4KI^I8{{9OaM*wOEWwRSEj4DN#@Kc0q1rV)7z)G?ap)viS{L zi)MLowiJ-P=asYTaF(B7llnPup8g(pgb;+jWIgw4CpS%{k+`5gq=t01JIBsW%drzxb>2zUlh*0&5i3$KM!ReTe(f>C@UETXEmoO zTzO==4hJ!3Hd$<%uS}tY{{LeHkI%XJr6<+}?c)Dt%RHZ?2ZbN$uj~y=54T>xRvNM} zw?k{IjcNa$nP;^(S7O4k&>z<6dq@z6$&_2WXOYU|-C3zV!$HnGY!Ju1YySmzlApJM zVZDz_qxN$OR&9u+w$A`CR8fWyqEU|QLVF%4)SjskXELg0hFY78|E*)-U#FN4M+Fw! zS3c9yEkk7Gl(w7{s+L8d;VaJ{9gz~uz>9mz$sC$g%s6Nf!oYJa5gz#yUyzb(Z*jld zlt9@8$*)PQ0ml7(2~@s(AUcJ*$;)oxgUyE|PjvRrh&m}os#;}op7O~BUIjz|6vWY? zj?F{&=QrTr@K9NPB)#HxK!00vw1NF~FUfJ@D$`A8PmL0->26+SWo%LMqNVbJIKw`(qdadPMH zx>;htXJX)GD7s+hl3kbY`N#pDkuN67jQq_nX=>X>*NG)dDuA5jK3xyMiy=bf_41la zf+n7_q6&%R0!bS+m^_+66CZ0qq=wDb=-)72-*X$$zz-%C5iQyYm3k+n7|Oy`B&!)g zroDWbUTG?j*PsmZr0cTJLt*Dq3Uc#jc}ixW2RfbjY&a7Zd51GjP-s#!C5Js=coaa$ zM=sD`Lo{imc#;U#Od2FatBg=a_w!nB<9)-TCb)d=S+X*~YC( zaikJyGpN`sQhfFKOMwN?t^|-T>cAf51QLrB5liD6#Yic)W`;H=i~0%jYf$Qgw4H#kc-I)qJ>Sk+o$~5RnVYBE z<+Rm)r*pS%_GORKdqSr$b6l>8`8>kGAg7^@V?t}L`va}Zd|^eV>X^;wOj243Az`6; zrzP;?yH!vLZh}Td_6aks6_=fGG(^6$278S?x zV#b5B6NP6*Vit?3>+)k3K}d|PBY~K*hCT#ThK@+ZZXmgDcs_*zCpt@ye$xekZ|A-< zT-AKDzNc3$h41!v2Rhh@Jp}Irl5Rb%d zGzobvzUc}TVFiks;C+2l8Eo=|U{ACWc`HN~e^F3hdQ#e746N>Q=!dwTm0mjV%d=Y0 zo%|P|IBs`am|Ih^EW(L7M~ib;NuIN)bZV!_79xE7Bh;ZF;?g`K>T z@tQLEqLNLNK05oD8-9wU<+?l$)Qqm3F{%<&m%LOGEHr%l&hr;q>VW${2yRrXWwW&n zE8_I7Qad-$u4#z}Sw0klU;n6RiufPfizpdrzNa%i7;dIB^{{gc;6*`hZ49>)rG^*c zMHA`9xB|wP3!a;P#0b+sVlRqFFnQ<1E6;YpyBgZhAu8+X8>^ZoOP87E8XjpOa3ys? zNkxSK*{xZGbM<**W0?YyUKWX$?GT8|5{}efxKiETnNTCS9a!dh!Uz9lqs+fE)HIxq zR-L_~c4Hsz+IgCIu%srA0Y@k2Tl=Tn7aX9S=;>bI!!&XjtT26kTadT5@;W13e~Iry zI5olFKq?+3>5>R^B!xQvUxFI>Ri<`QBHRCXx%M1ov=8|ylO84aM2y`a?7r(gg)k0rJSebjOoDThA zX_pObWXY}DfU^Qj7Ss?l`>^4UB0++2ZF>Jy;}nE|u)iKnpb-V+wqFsV70ZZYK+1DY z2sRF4IM%Jtug1jKDn(_gRjc=5qajQ<9_dOlk;*5nz@vj< z7gKaAj=0(5dYX5I0}|D6;WdKyzNGH>DhNL%5D#$2Cd@TYq+miKRsat#eWohAMVZr1 z=xUNIK$zhb>qy+$tw`ZUOu{Nf#OzxOOp~3|3MXjwhH%Z{6toJyms$or@cZGi8+XP? z+>`D=OyV3gFUHwo%m)A&2Yv}65$Fu1Q#~UheDHbW#~-pUG)r`gp0w0^4-+_rHpiAh zm~w>x(X69}*W*m1f}b;0KbNXrMh$(>K~jC!Wm=KP57~46;MFb%aI zQ0dd@&Ny?y5nW;KO}gSFq^1oNz~^{db0WM*-!E<{U&R?Ggk`Xk~-b#8;Zx5=c{tL%K|&EiLc za}(2~WTj+zgsQ*N8tgCu_QrqNAx5L#R!3fh{skEF*`;u)RzEE%u=cAf6K%YKhGIU0 z=f8aBv2RJk8m20R@y{$#EoGc*_CeMj11W=rXG*u7wmVrq5AtWYB>Q4BdWJKq%h(yJ zd4ox~H@d^F&0!L0@UqyBO+!VIktR@JPUnNflHtjf5g;PU69(d>?o6PMi%+=2U8CFW zp@jE^Wto!8$$}KgL#~Du+@)NfO70;e!jP9IWfy6zMJsPXyjk+j{3(Nkhe0j zz@)R@zt1r)U#H3ANez|i1Kf;jMzGC6s-V=kET7%3fs3-(W4^M8T<&tPL*#=xX$qYX z&$t0HeIO%c5Awocg~#{sCgVnIUdT3SFhGiK{xV-(`deBOR+}&nk6M&CE)EjvetRhN$cw*rh1QkzdE8uR8Bi9o~iAzMq z$l@m#l_fyl(#j@Z`|Z0qsxd{jWK9f8y;np^c_Nr?6a*>Zf`xYL4vX<4y$nNVDlWzZ zH*_|=g3NNb#@c~g&ki=tFjjYKtoghH#&)B39yx;>@l2Pv9jVtNor_8zRtAK; zn&QIVj>ZYMR2g69?8-3;wosLC{}ms;ah7U}7mxKsd?XIbaGx`#qYpf1Yo)@3IjL78OBIu@=7CVwI!w4}hGWFyshA0?13 zIdjVW{FnO*$=}8ZJTI&H@+mNZQpZ@ICLdbn1nDl_<^=HCK7-IVe*CU|3Uel(tR76p z#{7d*6+W(DtN@*t9xxKrkfxynIT*JXiV%8h?clbr>&)3D5f`f`>UW0hDX)`aj-BUh zeuy;$V+q~B^qS4VUrvvmfQuJ*meSa}$t}7_jmLwSy`eIUs{kRvh;>S1!9H!1&qlAv z7N!6+-|tT108U96`N~$1o=xYK?>dbyiDc2lG;jY^H2xjVGLNGwz9Je&u*f$&Z5FO+ zq=@ou2ea24sY4H@yp&8nEDy{U&eMYP>x^qi+grU>954TVP20)d zVXk(LOPUr{bfr{txgO{BqxrS3YFbIa$!eR?+UX*U7gmYV>9HC>PL|x+!VKJUjM({kiWy~FdZ{N{BW`DkO6?%Wnv3H3PNHZ@sqhBaz zn=Ldka)z?+bbG&sa-H9 zg=e@;n7MN_goZ09B@jy<>V!rQwyI7~u8dcpzbHRN12X5z7FReTL|j03*J{>ez>1D?m%F7`WM0t;15r$cH)h#?~ zB~}R$y2Q?6(d8eOm6S0*co(wXIFHWimKHjs*4-^>>2f%uG~o2->;`eb<;q|m507sw zIHC4W;B|EV0)fTsARKawihafQ5QlF$OjAc0wWHw{3mOJl;;FF)EpBCbOHQ>DVWUa# z(efjrU|HCP5#kh}+o5%wIcx35?Wh+??MXrws4C;Kvo{sD`AGUDHdrt zi=f8*AD-|5voQL^*Z-XupGf8!P5;4*?8b9E^yB}2wT8}|nQMoA_(juGQ72^onHg_M zwMx`ykJWeh0Efaaz7gz0#K|~=Ls-`=LWh*02k!G?qt$b{eT2b`^Ky3&N=j=tVYH`C zH^WM9=X^D1YCBa`EUR!Yo*kZ!rldg3ky_6PY#K?nPJYQw`w`~qz-fV(v%H}B3)42* zE&T{(uVXfV%qK?9VKf;qUWQ$IM9-01|MKHT?pX7KZL=%Krs9_<=GWKbABc3i8Ya0D zsA|C)Xi<*g@}CuXi(|nQ_}|7DUkf)?I*5Ly$J?98f4Opm_nMfzK>hMmeMJW?RjP-u zMfL>yBiVo;N6?V{O#6se@H$w-W$2N)SGJ`V z(7d{>V^Zxne3BT^xjQLMY%xG5#^@_`T2YUbGl55vJBJ19bcY>vd*?nciy~(*w8v*P zu26^tLB&F)0_!Pz26pVdip#A38NC};JZgup+rKP^2@u$y1pdLYGsAdI`8sZcrWL-# z?0~>0+Vi_tZo)4w%wHk454;-a&`lk zgaKq->4HuVt)Wx9+;`b0|8j}TvZKgE7aB{aWTgM0)zJ5_q~#>w@2jJji#N+{0~X*I zL7h+e^#pP-d5ktRMl_EytE%}b7e0?S?I=<~Z%;^aUwZpK&J=KlSLXfj{zdDr(d(g6 zdSw+i8#G133~ZOec^F4|eT?)|%&uI|+hF?T*Iy8bsjg=3vmCte(JSV_C*ZA@3D6o_ z7>=r>JmTqQXhh5KHHI-d@=GB16qmQz@>iz47VFmnMNj|oJZ3GMi7~uyzq{fz5+{>Y|5nsMzAD%eQNBWO- zV95@ZGrrlhLDZ=-JQUPAmm3PLQQDy*c+h(N^Twxj`0ND=I?9G27GGp;2U{N`&h{#M2Ef$2=1P7Z(HBCo17^ag6U{!*F2@>}U;?y;vzr=N`34}kPy1S(e z%Z>sGFgS6EYMWww%GXC{4inC=xu3A~15B6BB6=bxVSx?hVLqE&z&xr60Ol^=)c36g zLC0Tb{XgJy)^BjxfAc~4vyB`6{{JwNv0Zr`R^ldh7uDr2#BeW#7RqV`W3PsG_2Y3| z*`Q7BCR8&s#7HiGR_~SB0QINNv-xb8aeDxOr_mxkCTwC$_qG&-$?MiMk}g1&(DtE3 zJ6f(fqu4(IqQ7)jf1RL>mtD7(RHEg_Klp{I!ml|WEDK+nIjAs;PX)xHCud;SOw-8C zfYP9k{na1a^w}42GzlcqD@3M%*Nvl3(;+sSu*xLNxI^Y8YhvggPea%FQM5O_u=0&o z)=HQ&m6x&cK1B2^RpaC{X2n26RGH#?l%@8ywTx_}Jh8NOYk7uRx;+2;>@Sj3$A*`9h=h2;&K zBk}hz`;;(kXpngS149rZiT_Vq)2{f>ZfajN9 zYmpQ=Rx~>)xs&l`we=@_1o#U^M!UBg6OWcoi=BcLk0BT!;S2RBh5>SyNT4x~Qr9GDxg1dI>35BpYw^knVGlrNkCz zEIOb$)hU(@#&Ovg{pYzRViE~-6kXoSVOMDF(?Y;>$ir}%a0K+8i9WssCt{5;uOfE2 z*GW6DSuN8ni`D>ZG!{gTD(lKA9e_1-55c1_CMGc&$i4eN=;vO+Q3=_7Y81)0O$%(-CXV(5*+}OdR3OxF4iCWvN3bDQb_|4WEsQE( zU7i{=*oxMN&?qe0B4ki}7R)m+EWc}D5>6?cwKXKAIwt(fTrx<#PA6(!o(=IriH3s@ zC{n@LHtoJ_%oK1i_C*qL-Z344_2YRx+;}x>KA%IDm5Xn60s(qs%AR?JXG^zO0lJuD zodyvOtMh7-^BLt4AM>aZ<+b5VrrR%sk|A`X;wk8Fd6@Q(<7wAp;<}GWenj|Dc>pg- z=oO=!MO2Z7S?iNUw#VP-wJ&_m+}P*YxhZP2^r7=R;?-o3ZkuOn5owS_p@b9U#Obbe zQ{t$~4wPtGj8FAhfis85Oue6qSEAsEklkz)9a|BY+%NF~SDT{sC*|2IsXbB%` z^7hl^ZAI9@60Fzp1vI)~QL!+^cKBuE7K_Nxm(3YZKbmK7Q&CB>zhZL^oIhGmKx=>> z({sM)FdOD=e^*XOp{r92$9@nv=W6g0b#_xaqxC&nVUI7t45k!vn zPc+v(QgJ?lZ-(ke)<}+&b6!2QB^{Rc(CxAU6p-2>9}9@fDZB zJ-hHP#OL+rNDZ{vtvw+c>@Kf@pu2fV|6aEE(!($CNa&PqkXqL0rGbM0!nJ$Iyh z2AHFN_j^W2(RkHXcKus+lvjq{k%6;yLjxh-lD=ZD-t8i%fzyA2En)+7l7`^c%=fC3 zHM3gp@AP>}M?2}a|H%OG;nW|4z{>oBed*S1T2n{>p@(Lb5;6#M?XavF&zZ3;a+eLI z&z}uiD1w1WG(kI^3My|I(b|86#&>2!u1ZNYvo1lBO2*j;h^H8h(84563HkTZ2@NT1 zVL62aV6Q;j!Q_Fu9nX`U%}hVqQ*lA62n|HJb3(}&&|p3s4oQweK)S%RfS7QWTWN&& z{ex`chweZywPOQw5>H)!7Ha>n7HH40v$1->xAODC$~ojncr(Yi+&VLuw@CdMR`8Wv zE#YZlmLoS*#!(N+BtsEOQBP0!ASdYFGC{IM%)VuR%>6&?N#9~Hzu0ZMeFA?1ntv&u zb8qsm_?xoVDiGE~3!UqD9dVrY#M~!BS{RlD~A( z!Lv+iNaN_p1VGU3s>fd*B#_1@3_J4VOh@3E{8j+95wlu5^{pCn*yPeKK$L6Kbi}-s z8Ix9H&N~i;6^-w@v4iN0Pm?-73qv#qRsM}$TBUDUuc*gTBJFvUhv$fQY46JFWRxoG#Zi=#nF)K1)2<`B1h$$FFZ+gq3 z56gqZQ9ds4U-v)!uJr5zPCO3ahFwVuhreP1277LnJ?VvqWo zr99@G*8WjEwp@|Vak=CxZg&Sx;oy_X46_o#o?uI3dreUk!R2$Kg|y>SMMM!`R{WR= zH01Vs>CvB_{=tsR$nO~L$L9TL4OhF@!XUMgYJKNmZY}GQNS^g}aAR?=tHc-gC92)0 zl3(S8LdeC&gab}-JK5SC$i#nR2z=xzhp|t_S*{_5$V4i(V0C@MQ0N)iAwr9qD7r$=_$-odSQ275F`~2wLHy8Yj|MMlQ;%=GwmH zS|tknS{GaULR;3VWn}i@EY7|KgfFn)P)r$pFP^icd9bA_r5gM3>G7+_`a-i%h z+C&Zv48kZbzz&aF>NAM zo21dd4G0HcgdkyLB@}J4g?iDfbSdMfy=Ph_rHc*kfPHY>GwT}RT*9L*p1VQDQhpr$L5)zwd9ZU0H~IzT1%8 zNk%IaP@Eu^WK$h>tE8!*|FnH-&PWPuM;gCdEGR3^6j?A^ahdG@o$*@9hV3-b* zJ(8Gl4_my+vI+d0RI|ZO9^4jo2>XLJ3B;2WbBr6u(bu$#ttF_S#@Ko8zW7Qi+TtpR zmh6!-sBBagp9oG!SG+QxddAc}B2DH{W5#B6;lWHrV{YBUpiO9YKuLD^nP#Z?nhpS7 z{gOySAxU=qU4kZ>rZMIL>|uV0^}(NvfzCsLS!GQM=r$=|7GzovWg)B*_WzBpJLaEsT78fiEFAfF!bA&%L;l)yxlaE2soKUs^IEiuqvYi9gZbo&s*6O`}D&4d%dc0)HYKJ)|0Wtmu{ z!p($Yls$;V&A4fj-O#A6@eNJS)S>_e80UXV?HYG`Ip`ULNdCf-eKkq>)iwWR@j-v~ zS?TdD=6GZLF}d@-^+?9fB4K8trL*vyI5-+CKU8II9M4o{Bxzm|7X+etyY zz40Dl6f(VLM}EH*q=L~5Xa+H=YWnJ#sT8eH<~Q)4A)rJbH#h#C0Je3Nc`Or{Wiy81!QUorm~A7LLCNV-@>t;*&+GB4IXCN-U;oWb zI1pmI!vBcK==x`;Lfb%GLKvhMkJN8Nrj4&qL#@({F+h{T1ZOOV5M#>>*WI{)SGX1O zj4uVh8-w;_@kev`yO!BWmrsMG*V@CTqoNnXu!#Sh`<^#tyqe4QKf}|IjtDCH^rz!l2>robf@=&STU43%d(;32dQ|14f0w zam=_sx+LpWui!PW?jYOS4eZ^R76?bLycCLmE)lBD0Ef9*9&1hPbOR8~zvfhvTE{y=xvC+HdHbx$_GJ+H@9HsuNR*7kPT-hI9k5z<{hS_FB6zWj1i&*SN=r z0pe82kUXFn67LO``>@zQnOdlIl)~J69`M9DZL&ddw)nx*qis|%n0Pf%p{$_>W;w6= zSOm>50gO46s4$U47qjn?kZyH#BYc&9faFDtS%v^fHZ%efGuQ@Rls8dAFMlr1)0l05 z`nW|pyDJ4GMZVw(^$%X;5TEp-6O2<~3ONVS!fP4krU|Q2_SDBIO8h0lRuh&sQtW!( z&0y{{L`cU1S9?4Q?)1sTav_5QYAOJ8K-XC`zQw+()2Eu1Jgs-A?w>sMvnx#D$MgMy z6zK}QNQ&?}&`!EO3MbTpy{WxBf#o{Ql9tmK8{71Mvj9j!7KkHAw#rd5v2M~@_^qUi zASep$Mhz+qWLZ$J5$Gm5$EtZmnUmq!5^4lpIP6d!(4Zh)S=gl$_!RUQd62S&8!oZd z%{c~>>Io-I5}avX%O_3r!qa(L%}D+kvrAzZr^5?{c(ay$?h=p3w!E2C+l{CdyKqqXWG^gDi|s&2gEjY=t0fI45a>swSikR6~b@D`YHt ztGF$Xi|lRZ9i#;}BYVnZ7JDmMjGJSOEAP1&G+8c{rS=-XZg_WptprkpyKue3n0(gC`=X=nicanZ~0PU`)p zMJU4+j0bWXu0&(aPVOhtRb4FSR@F{I{~C$AoQ*U%<=n8dZ!P4%BRc;~#jq=KZcR*` zugn@%#SM>aWpl@!*&&Wb%P65GfbdY|Ly`#_1k)W=gA7=;8mnhnvY!%D zQ!$8-uo{r;!oZZGi2|DSA0VW49Zu0B7*`c+GQ@W5h1ykYxQNN&R}DpF>n!2fdg^TO zNY;LNP*^RVQN8myk2>FE_~d?QLG?YJ>}8sVr^zt zv?#(Mbf)C~RYo@KR|6$3Tol6LZeNKn;!95IH9}}sMhY@SK|C$Q#}H8HlAq)HZ;TGd zK;kV@Y;W$`S&fZ__Az-{=@USA5XA)8VBbPao@GL60#TY=L>>_4vg|4fQ)H2L>XsxX zm~i^0jJof9qi>l#+$_Iskd|({9zt-0KzBWUj|3(!TX6l1BfaqW=06LlhK!_b^dRl; zBFpq1VGsMHB@IO+yL?f}3|X&E-zJCh4ywCK80b_QBulH7thOslA8aKf_MW2VnyaCh-T-1Gu zptv8eT4R%1MO8`IT6lebB5}$9dDaIparB=!ehd7%m8y{6rM7tw zk({9m26!-&Ksw&u;W~7!&TJ{Vn;5s*$l9&0h#4?PTdT^V5C;7>k(r1G@(5k-{T3nU zc37cdzvY!##UspcWd0|1G(viWp@NYVtzNNao0-FTp5blZ2S%s1vecCMqur z4)ui(m^-p*vKrrK4e^r*o{^Mu5Tl?x3|5MZYoGnzjL=#_sBj z;+|m5L_MXegE$$Vm<-#e9CVkV_@`N(;enW2HyzxSrD|Araa>qCo%#c?{hmXghMJuU zeXp+AEF8YyI_vP?wzv;~9o z;6FO2lPCNWJAXm6ljl;Rx03;U0@vbh_wJYE|0<(*GnS*>;YBo3)KU^vZ)=`TCuOQcLn_lSoWSgDe@7b7QCO6gFMYZ2iH@Y7t8w#QXl z08jrI|7f?TWo_JNivTQdCAcQT$W#{8^LeJ_;R2IFqy%r;>JSV>cwtj?OWC<)?@;(y zr5a40>XVI3a+#Gb^#TdiP04KH(ol$}8GmWwDG6!Bsl-ivG~6$oc#B zxdIlD8EQeDu#E0#IR4(Sp#7xt2GF8+(Z!o{Ry8{oKUO$qWG3* z_R=st%k29hKTRZg_%R3Db>W?9hHT^4x+LGI$65iNtI_}9!r5t6B78z3K7)1pt!I;K zXl_CYUeY-y%P?PwIkU+9PrnHKtrzkYmm7R$!?ZIHr}Rb*V!Mtk(=1^$ub)~u(th-x zHn1#3-?;wAsJkoScM-01`s+dO>p#99_f$V@L@T@uzMjr~oC#gh=f$=1YGRmMMaw{W z6}|l;6lBQ5S3*L7UA*<)?#smeDRQ4iPPZB*T(wh2&5di|G4|0C9G@_gr zrMhZ@aze$k#R;~BrFO6rJ{D2mD$5{Hy<3>H5?W%@RLywxw_)Y7_;DMI-gy>`4GDwO zdZ%a>suBVq!)t5@3BQa@$&{NfyT46b%=>%PF6En{%kav2+i1jwFd4Ln>$Pm`fLQk( z6_%Ui*fil_yReiKA4BAvPuiI_VlzhejIe7~(G3-%con|e!8T-?4BVXaI3$iqEkc0B zE_}23QEuk_dtN%8)zVZ6io^mJEcw;Co5fk`q7BoI{a3AZ26}dbj|08e%t#<_^8bfe;5$7SKl-oD>g6l^I;TyCS`VX>!iG?j z0xwg6gW>XEW3egj^wMl~Mw2^a&) zK^-~Wkc|q__r6g2xyYV6(_d?9(20rgy0(bm@J?QsAJJKuxG_`7SQEU8hgRZ5?`iYg zy5JqgG^SiVDfgTeEGlF}tsay2MJhuoJ1rv7*`fLiV1y~8h#+Nk8=xCZY8)u4#=!oK z2?qa`Lu)!XR|)3TuQiveTEsOuBjGoM9Is9-|9Pf;JIzx8)?x86-^jS;CYJi zZybL50?KX0XAobbIg%Y8#`dtwl_%c&kzfAMFKiK8;$5`g{n%44e!9T8UaW7VN&m!O z{)paABY8&v>4Ymu-2=ppBM&}!+OwP~v&RS|VLIXFSU)2p-yYWeb7 zf4*puT!0H@k}wp3BUvN<NQJd29_n$=F}c{QwQ zjH3Q9hgvdA)H+zQGfCBwJf{hMCRRJ7pOfMe(-4!ypO6(Gongt@sgBsbEXT_QX=58g{@^*;=%n1sklXU3xAU&c@tm`DE3uv5M3DlC|xbf*7lPN{u0=Z#A z=W7O8myKJ2yF=luAvBNJV4rL&6R8Lmw9Ekb{{&Kj;DM%CutR%{qXS0=jt(3hI6CmA zbl`X+?M-R(5#;E=(Sf4_M+dqCY5^f0MF1yEVCM;=G(4h{ zA77nej zVhskl%gZAu(?X^ZdDRm#S{Fs;8q&~Ww=q+pD#?Jn&=jRTBP){GDqI}|a{^?){H8L} zN@C5UCCOUPoNW9qn$k32$ZoH;@`R=HU@O0O~et7%6o(`W+Iaz-?w z8k2-9gNZ&$X~sJGOEqH+)$or#!UP(UgwRrgtN~<>avaA5$oa3rz$C=FY9hbxJjn=| ze8y!p?cPis)5INN+*eJQPL4vUW_3ta+a^njhc%iI5 zm}|Sk06rbj=qlA4|B9k;7179i>?1ch`2`H;X(G6oSjaLe9f1be)d^7X`4*9WVuP?5 z-0Bo`nrjeA!!+<$i&&TA>J&~4N{pUrWY%FzhOFiRBu$V`V7&7UO%}&cTtagNX$^Kd z;0VB`o`e(!-bAr5M}NXzea>$XmYRp7_}KLJmA+_63wzkomiYDvK6Nf^aqn2na6J5iPtv7|hyMkPEpOnbUMG=dl`$5>g{>j?;84b=MScN6dsOb-I zfe?7Qd6a_>GVz5u|K#YzMCM_SgvljJQXb_|9d?|G6he9Cywuz+D#qh*bl~W~(Sf4_ zM+d(C4%D}{ONfp)(k^Mrp*lKnbl~W~(Sf_v0q?%p_-Lz?5L#p-q$Tp~#@MOtvf#CV z-S!LT?B~3}?5$jRvLW*ZP)AHGF-g_|lOI^DkHv(B8|9Sjh$~5E3khNyCYw|QBeR$Z z;zU9}EM%+!&Pcfw%*yev+-_Z5_s_dtPc1^1}MqGmM%*0MiQ`wR36!)cV}I# zr#8Ei4oX!?IO%O65>dGPM`E!iLC5X_;iOhFtoR|+oV7dA7^4JCtaj!&aWUCbk)5dt zw8$Ax8sM)I(=Ve)aDfTPY~LtrmJ%Xb$t(bZKWiw7=(doVgjhXK>n0ZNW?`x%@o=`d zyE?P-5eR{0B!3S2vKKRh8ga~UXY5Woa=?O7UOt#Z_~tQP96*`eQ~>cYvI9sdb>3PY ztiwa@2zJ(F9L2O6Sfg|dzkw690P@IRB@BN43wsydSyyz zr{jdA%CQp}06aD&Gf0Giv}lod`ICX{3g!OrLl5Welii<2gK!yxUv!%o>$Yww>8*2H zPP!_97y=u>aE>shAf=BfbF4d(C=Kiex~tM$ii~odhe67WN#hQJkh`p;5k9koSO>Gq zX95j=70#{anGEy93PyU3VH@R6SeFw{%5mHY!Gu|}kc6=is&$Tj7*^Ar<+!Yli97l+ zoeL|otZNRDFU@v%;;v;hng>5}OlzQ>7w@Hg^5Hzeb2aFhsGhb<32XmQ4Fz?=JV60Hs(a|WBvD#RO3L!(KO*oCM3Uw0&V^Kyi zv%JD8ftZlg987NbU|gc}DwH&xZ}ADn@Q)d?^5tO%GM2mO?Z-mH-%q3qFgoVASgY@T zHfLgXX@DB$*0}&YoGiw~XUUnuU8 z*-re_e?Z(M!b=&@3I^4p2c~Y-5WC4^))VKvzLf zj3*<7TIE1U)VRl;I30_l14jpr4jdggI`9qb!0|@fH?Wxp)zN{Y14jpr4!nK`wDs~< zYePztEs%F*y(jCv6DBrUY+l*l^$yI!1`?fcW$fJ=n>6BOCnh^f4%+lSZYASlN(C4}38(B0#9&*ZWF#4k#JE7=lsw<`16% zida8!N$Hf*-}qk5n^p0vY#mUsXi8q}usCkTQt8Xduuj)GNR#%Vg(_e-NNXM`kQhMp z&%-s?B}?W+aNBDY)OP)YLbr9V_#IwFCpl0+JtwCy5kbR~5PG;KGLMJLDOqiFMv6GJ zA!TVtt`HYZso1R7CjpSmjv!_A!LKFB#{<2;j9^h3jwj>62OfOjfd@n~jxr~!%vzF$ zqnvnNwiuT&#_@LU2^#Afbd^PQDebCUC@x_xL`1mJ2@H1p+THTnw^Rf3l`GY?@fC&2RTHTS1MCqW_ z9LWkU27Q;=vR-mFi%^xRu>$Jx;`e;{+3&Bop zVvr^cR8q-YbFsj1ORmgob*9;aHN%fTZSo-Tx$+NjL=KvWr;Gt6=%ibl#Wck4tEq#ZeJ zegL)yPGLbcL)Dii?mG|%?Py{%8pC3_FAtMQr*ke3qSA6 z*l(d@mlaKf4e5?&3r9pdSm!O9H*eDEho0U6JC@l(Zg2%@B;m5hGqXZBW%<+(Q6yty z64**(16jMlxRQXADFVA&Out>DTkg25zXl>k?6Ug+%&KmzFhXbY}v9*pCf(&=2h^fn8lggDV%3Nnb!~`{%Bz>id zHatFt*io+VF4|RQ#XwJ99_8T7!kl8HsMEZt?KnTj?Y5Uv{xUvdj0zK_=-d$fh0(oIhuU}MR|f* zUB-8WcxX9Pkpmd3gaL$bT?V#dZf9L-!xP^Lx^s*^7@2nN10EsBbsmNZF`7GgiDB?- z^xL_d>nF+`+%FBWb4(^H$p92|7|s)r%K&yHG0iL?W5u`bjUFeHp*r`xG|9l^KEl9~ zkMz-8`q9i_;y#X9CZ+QGV_nNF^AP-*xyFy}(1 zhaZ0U#TW5qm~r(j*>TH-)`F8sk-e%XyAwfgF5f>}jwbx!X|^gN!6D?C0=u*E(ktl& z6`=W}FwpFFBs8$xBy%O;mB=*%-&pX-iU^6(-vU6^@HYipi-l0WU=TV)I>SmRzWGLw zUqYrhR;l#DKj%0BVkf?8*CW|<#L5sYD%EF*nbwKE;q9YDN^??N+9rj`Yz`1M(zWr30~S+6o$Q^oo--76$)tk{hFYn$Q+igL_9Y1hKY5f%*0|QeR`Em>(9;DG(5x4b(aqqAS>1#{^`bxGKfW^xmG1?ZrwF!YXAf$9) zO1F*lqE9Ba%Eb{hXQaR^lV_YEH!@06WjH|6y{3GmL@4)FPKHf;X>kU5wyr~^nf4;k8VEuS@{JeF?4jsm$19znZ zdEPPWyOExUy>rtZ$@?j`gP8J=oF_F%7IkUX-C$te*sU+ApNLG=t z9#{28M0D^N0fz-VLD44#io{Ka?zt&Y_6bM9vH*Z_$&y;g*y|XKMuD~l^5mS5B*q7w zaF-?%)l0!5{#xcEVtFkhd62F?>c)S(4L?=H3q`#-Cj#>X;@K%!f)}X%!ATXcIuUZP zzRw-h1i&FUvN64K(90Hd4-XqF6UaP@&FsOUb_oICp4UF$$1({3be3tYostG`od#yM zVsaV)NQM%(9C+;uH4~$Fv(U z2qT2SgIu1F!#0@+P|0*|ao^1$8O~T|8S9RGtV4CK8Pf=BVp0z003;+_=2aj04h&&i zIq#6t4mQ5yGQyfevbxkXFj47DY1Vr@JLI@0oR%8nID)LJUUKjQScTOzv^zl9LOb{s z9Fx38SQ*x2R{iVBJI4tv$2Fz!NV5a1%Qdl;Hmwdr@ZmY1&x5XxK6h!PO`srRqh)IlJtDBeANkN29yze8odlyZA7fnRJ`FDPO-b>Rjc%0wy zSSc-sKm5!m|NEyux!p!OuW3j<>-x@vYp1%T%RNRi@uP&b*0fCNk!XYO=6Xu@Jz@;}7;ssroO;l3)&PoMj& zqNAK|#tzhN_JHlGJ1(5oym89k{WY`f! zjlP%mbP1AhbdJbiY_K3%w$vk&XfbQ}5U^LJrXhXZ%4=koP6kN~Pz}hm|D+X_+!EM% zVK8Tf^B#Ch!(8E~4JInOFr}+WxKpY!=}0jthGZ7~W|-u8I00ifIBCT@uZwA=ca@X_ zq$bue9oRK0dYR1VveOM1lWvHS^{-SMb8Q%xmLXQvNQ!GJl$)d3^4b9j!RH+C0M44i z?6RJ2LjyN1F*WN|{i>qV$TGD{7M+Q4Hm{#19UW8hz%1kh(#lPysEU%!w9G_NDH(hUEu z#@L9UpLCje7Rhlf4CNa+_%w*Oc!n z2n-pxp}gL^XivTPX+3`GKYZ-@SDugK9cKMf$DjKzKQDlo*Lzo7=ylTJUe}}ZVU3ll zbQA~ToS?v>xpnc}_r3VV_q`;KU86>`>LcsE_s~NRK@6xI3CHc4J03#9IXNwprIT@D zq~<|(i%@9?SP&70E#!jpJy3&y5#YdD;kH^!FR=3wkEyIiXNN@y!|0PiqDWcctPN)B z&?j#o&2M9_;&NcNBb~`lELvIPgl%qyDm4B=n$H(lKMRK3!O4M>X9t{}RkPKv zb7bkL2+pq7@MF-N@<*ROIRCfbv?jmGZ@)_JnDOhDCcx`r?ik@P-n$(*&g{K=ZjQ{p zrVgCFk>+n&cc7YYS9ic1rhB~u+{xA-nC|s_@7MhFY}!4({z|{;o}q8-LOw3|um95e z1~SxD>T{=4qakvX-gI%Y^qpwF8Sx~+s2L$Ei+0SWG%pYsW-f1my^+I{U82EtxmYz3 z*y3trb_lPfV5`}#00e*sz#be;~^v}7F_Kwz373}xu-ZWT1vEfekG%cXn5 z8GjEW=})O5Xg)30Fa^Sp)+IB|JOGg0SikzHSFvxiuuCJD>yGU62RR&}` z28TOElvfiQ(mMBp1z9D#q#{m-L^%v6wS=CR2HgBqVJ|ss511K#5kY|nkBei5-2AFS z7=78UybSazI_Ck8bp@!YIu~-LTYXK^YW#$kGV+uey=J^L^P-K&82X@61nD*Krc{?jCvrwm zF|@jJMSmKNC?3{cGRY{dW*9o|B{-`BP8zGDG+IV8B(s#p#tO8OC+N6j8Ar{b9sH`# z+lr%!kU+t&rpPP#L_8NJrSN1P8-9tA6GDn&ptoJ>;3+IGgaPeVxzU1Cw5`2^3+<9AO8`a>uz6 z#`O4qdI#^N`5Ax<&(C>Ko}U#w(`Dr!kle+2R{ItqH4j}u>9nGJ^p2lJ`_v0h!E|w< z_tHN82mdtk?Pk4;_K*JkPk-u#Pm_bb6eC}9xOMU5U;FrzAN<+$#0n`fZ^Jh?l2+!< zlxEh8??#QY(|_ju&p-d&FK_Pf6`h2U1&FL!Pj=nPaB*S;^3W=02c&hRfvV>SdFokP znG3v@AU0dYBTs@9MNWjQ497%M^`9~XAO=}eekF+tXji$_3z6Zr`ds_s$ntasr!Nf$ zvXsjM9U=T3iX;U{L>0lRT9f=;xZ|StmzlS-Xa z**h|`mYs-f^qHk6ms*LEV)$wfeG(OhG_6WhEe$Upl!>A_1eHc;stH&&$DDMS!ZN2o z!m1Hr_bt?&DIt9QoAo3inV|JGBB#l!2y$?cO1tw67<`qc<8pBlunMg1ITBuEVsA}( z&JorRX(Sti7*Fv0&?YB$plJbA>{jW>DKoz&PAD9N=7EFe355dAq9p)EmdQUZ_J%}4 zHQXfWcJaCMCVYEMlywYspxbc2HKgm}xsV}EE4MR$uGwt&m-Nk3>HT%yj%v0J_^Ir_ zFl^mCw{!E-f3ua^-{!E~Z+A4+=I5~L@^@}3-CA=WORAc4S;f~6Sog>6EGICtE8^!k zy)1T>pT?+EN?*x3*XaPK-uOG6{d@MhNR55nQi4+Lrz*9#>3+SO=haUNR5x;AyDe4n zoVz~tZ{t?1b-9dO!*A=etP+&}6p|C@lgVv4zo#T;S>KPO;k%NzRXlNPJL=t8EAu2s ztN0J0*~jIq^OVp9k?q^bg|OTQ_Ip%GY+wCNNKPKScx`do5CG;0w}O}zf>>HL1L-76YZOaAR!!N! zAuj(anZFaK5mqyhhgCL2aui!l&R?zI@5w>v|41KZSnP^n&P!k}P?hny!D=+AQ8OVB zIcnfpJgQ*S=#!dKpkLr!zJGeeYPIz4l@?^pRV=#Z=a)eV(EO0l5~~X2C2V}j(ienq zgmtbQDjYC@sHsmbnGwYdqIRq6C#)_ zXI_ANaix;)x(qZV6I#ku)^c2S!s-~8m4D>GhIU83I*w@qjV7tC^I%q`ypWktD@#0J0`xQSsw|fF$1=Y-x z)i*kpdOjOd7GG(8YD>)>=GKLNqvJ1q=ofH59iP%i(0=!0dh^Wrh?~38A3ps_{o2MK z{q`RbR_~ep#{d4;Kk)AF`|IESBl;U@!vFsN`vYn8E*cr+R_Z%Z^3hvpPyW!)66Ds! zzxbcO+R>K#5y{HU5Z~ME-9StNRB6$wiqHSh7azEB^|7ZPR-RI39wA+gGO}j{MKd((yIa*VIrT)L7+Kv|=E_ zmKZcqS(zog(BjGf5~xp3|4QnE8p%*Kkg8pKE^ytJ^xw$u0iU+PYVl8t)xtKU4j? z-BONAzIQUb8=3Gf3p?Ab)b*0)d)rua9!LKF?7dyAZP``ddG0y)qsmkj*hNDq8xoKR zF1M6UOl%{fX;89HR8? zC@4R{W0G8~@*{p$xsVwE*JTICHKW=6U2h7XE-wWO2c(^c{XKMu#0v#H!ikZaE32z+ zpD-mW*~n$;Lyv~ZADj-wd{i1;92F#IzYL&IG(DKhRXqH!8uTimISE=>C4`8fMoB)V zbu?TYEoB2Tk}U#Qx#%3n|Cy}VY?YxnBP_}=CIKg!dY9S7aa7=Nj->3wKUQ1rxh11l zyGDUdDGpU2CML5=8#$nzN&`&lKuJ~u{R)8U=vI3CfWcp$@$cR&;Sk1H@M#&M&bO~IFQkKqS`SD2&1M06d~@`mJBzM#7ZGU-~YagavWEHhRwq8U=ds2 z%HxF4uB=h6GHFco?bzTJ8o4d3I9Dzjo!Q%39XbSa+3s>UZ$0G8{dv62qNj|GpwUXImttFwE9D-|Z-{6p%1StMcx6tY?+TNZ& zl(zT28*MLsZ zUa2Q4o{Kqp`+u0_wLd2ey@SjQ&7a^I64A%ia`CvlpeP=RQf8zci5Xq2QltfDgE}uF zXxcDI&DssV&YiI)+5KVD%8We6d)!j9cR%&Y>Q{u7>gj#>>vmBpIS2RWF*1 zfq+Z-R%N~tEu6aa%CZVn$Peb8At0dzl%^I^Q%m;BiABtemxmtJN#{d~#^ zk`Qi*BbbCJlS8aK*LPjYd_07!(w(biPM9lquOUhm)Ux-PMrQ455=Pw{%7*T^kfJ;p z^VIBkRU`dc!7;q+_SNF=O8AursD%8U5PB|imQQdw&899v!u0Sy%_-ln+%EQ9C_>44 zW$VKJfqvY0V$<{4#)0sDWpiJ6$KiNIu43nw7ccU(y+WRup_a9)R#C{~bmT15xVg~t z;zst{*7ok`W%*Ri1|JRq)ARa{PDK}`N3C||ldCuj1$*VXY6&zSth#n7Ic{~^+(TNu zD*^?o%+%<;cB8soDWjigHWRLWQqMK{(a}mT3FzB~_74s(9$wJbr}^3rr>vv;tgY8x z?wTI1#KtvMU43cJ25kFbj+ajY2s)h&WU6EBG@DtP1Zoq^&dPe!0>@fupsJsw0eP0v z`h$!5UM1=UZ&$+QjhS%_=Lj^|9ZH?`D96R8ql{}|=|jpfKTX?uY>1wiHVgq12&NR1 zdMJ?E9|s*&YnJR@(c{c4nbuQaUz~@L*N-Io-QwugoQYpm@;YL4IpQu)fuwOQwRRxN zx8n7Kp5%SB?*ag-6`zFgD~upy=)EWi*bK!(f?mMN1$-fuhymgx~2_PU^DHJa%kIkjuK^p zHbBG3?HElD8@VkABjt<_`K;OYYHLgxMh@Y&i!^O&cL`CpsfC%X@lm$dk;5qO+J{px zMUummv4Xg?6l59F~w$%vCSdq4Ht|3v+d{;?loFxs#Ew-4*HXc}>R8PKo|2wUAz zd-(Dr*YuCm5@WU{qC>w|chSDnpG6yh_x{Nr*6#Vl$Njl9_Hm9x)Iz2U84*!fW5GEn z7;qFieCg9um+n0Kr#|;hzft$nGSxld5d>Gu^s3i+$n9IVDQ%pb5Jr4r9km3^$)_hO z^Oe~E=4_)RgcQlp$KJ}@8mwx9`n?Fw9;~%vlR>F~AKjd-P`Kxf4T{WjEQ#<+2H#q# zZztg=_14s4=iMG`2K9(()sm3R4Ux<@2PER~+t?NfHG1;Akg2$FA<)S!n&x7Dq3IQaA5;l8Y(u@RhVS~#g(wuc{?3;Fy~P-N)U`J3?yx@NT!ql zVPz$arAeKz5hB~HJrZdA@%3ZD3A6|48=@`i2^n6Q90iMgXuJ!dau)bF5^mED8l6o# z>2u_fBhx5zc!MCKJvi*>u3hDe*8Dbd{PApfs`M|D^%$+#GGxaGq2}xs*%d8$I?_&b zDuLJNG$%|0(?Uy)J5j*NNoLK1p{kU(^g2kZ>imMrY6rs_H8{Nrrmhd zwsWPNm~cH_$CB;-!ktiU?az5{r9SvR-Rzead-2(Bs?W>KUR@8=vt95;b<0;fqRzgM zCBMLH1ZuZ#->R4MHXL_;;3up)i0c_z2XQ7K7Y}zWZZ{tkI=8-fi8WPyw{PF3-={w_ zZ9ZBd>%7u0$E!cM<5#Ht%I}$}cP@m-t{=JDlR5k1uy({r$yqwa<y-Q$y!U6^{QSMc3wSI%E9X3;zJKTbmf67 zH*Vf|>7|$SG^C?eUO~=NQu{6wkR!9%vl}oYo6|#3uE`P!JA2M}gC}FRBv~_WjadOR z66gFZAX(q+ zW}JLL@I&Mh?cCN(FS!;?$f#EsCd6~2ablX@Pgfe}We9ot4y8^=Mw_xSN0T~XlS}24 z7Fs|lLb}9vwAu-)2EQ%csTFB&D53`}6?EB2B>8knN*{}4_5ROCJLKjo3jTkXZ)NIX zi&iF2Ia8%SL4{NZJ0?!-%qvYe1h1EqrM?UikOC36LDD(rs>~o!m~BuhnOwB16nU|f zB%}=^l)bnas;^QfTF2EvD!#-uNd$0UN9qH1_z7*39BYp3R(3ln1&&2eH=4v-)(D5t=CX;`c}?D@OcJ_zB6oL z3gDPePHbsVFpN+a%OArY?=YmuZAnayAHP_hEV;sso7%HFd3%T zu#<@({X7*73$lF!4rj~Tu>wfYz=D|_+biUc{=0|&;4gjQ#v5+w2&CoGuhz=z){?wS zin3)wyJdxx^$1t`WL9d=OEkI$5<&`5sa?EyN$*n9P^XW# zZ{5;ar1J6;E}-5&aB-odS|=Y3JB;m~I!U5+)q|iM!{EGKb&!B8(bGI#V-iMLHgs%8 zE|;9Btw^1Ro{vy@I+KwLRqHCbFaJP6Y2IX2?^^gm^wp;^-A(6uv}c>hTv8bj^;3 z*_m;lW>^qn8mu6BDqr#^tEvH1wP zk}+g%soy(cMc&wufVoAlo4_H_LrL#*7wzo}w|Og@iV})@h$_xY2fIDv4+r5oZg?Cy zN+XN{Ge%w!*UO$mi3$ozy7iHa&E? zVM{D$xa>IzFJ*+?>QgB>`(&BQRlZIz$BgV&Imcz2)lpIWx;T}2aDf--e3`(S%9q_$ zh35yA9wgQr$GakOo)>wOhAt>4nRK>zsD$rpuMXD|-1d$GelccEdV$ zX^!2TjnHxgSDE^P_+_6T$aP(s(!0#++T)arRc}7^zzSj#ivR#X07*naR7r`eQ0=EA zxBHQX*H%0D1y%jP0}oV)(?L^`qjJ16 zaB^NYZ=HB0Pwi$(?3yYMMsCBhtJBhd zU-KX#@FnPYr)rxY5bNxu}5^ z#IJtX(Jpww>_0RrGjAmFAXzAolju*#aqk zpE$L<)Oo-LDZQbaj#h008r(vBw|m0Xyloqg&WtG+Llflh0==~I)~KVG<;?7n+}QL#6&TLvraT*<%1aJ5;~P~Az4y)8l++; zgG&jJoJL#JL!&K8$}k$Ci8<}2A!v|U6gW2pUCqAUe5SW#>0*D-Ok0-&;nEU#6? zM#>p8?AjKenPGV0sRYbTpBXUi5`w+J&4HM*L{y~OvlvRQ?1l{7bt{~nQ1negDpS*}+NDt= zg!1tnK7pkPEk(^N(e6mE8B3-qK?f+k2FpUFjqQ$9b5fcrN-YIyO(pA+U$5b?wA1oY zyLTJ(oBQfX?5EYHGL@R1WJyqUYTx#QcdGcbrvB>la(&CA^3@ZZb`N&c)aREXwYb`o zwYbX(r&c?U+3B!$+N=5!@>`BnUjoT4x?ao!d+853*Hk6a@WQkO4(`6Wx{DRAa`p)i zx*W$J>2Pvb8PEcw5ocx)|0f?){A^^ z-OnQ`QVfcm}PAzpVq7R}y_`rkt_^fn!eTOM;O=HU*N3-3G7mnuQ-422W{u2kUX~Z#<6X@;dZ@GF`=$7b7s> zWw)|q63}eSl!ggZ3{FyWqTK<56OKz}V38@fG8l!s#447}D3FXjG1Y}uN(rL~5{?Xd z;r7(U5N(*U|DahIgwlx2ox2JB>VP0Y1+3{L0%)hGn&#yNDobfS4_Rtl+(OoCUqcdG zvLQw)Djc5Dq~t}g*7-RlIzuut1LIIRHA5)K#AC$5I1!%Gh2#_PET7;glUiIt6-OyG zY^pg*^Z}%A#}zEhO`c)9Bv89FtTho<_-)v>rsKx90TN5eO6=GB(Z{16K_PhCx$4f77{9E*S(!#GZtePff3POh6tYlib?=>?pZUw&DI-?(u@AHP%ZX#q=SeX5cOgn-iYN=D&F zX_x@VlpYjjR zfiPFrB?!PA26wQ~$o}&0U;S^AN2)XOQ z$;|shki>KLCU6M@Sv;eDBFdXGf{Hm|$pD`-`S9?@0KW=*Fu{o|9`N}d&?HXVVcx{8_>POF&% zx8A_TLGFa7sWBTzV20@~p-+eT*3Dab6lcdi9U0xPEPRl}*xEq&a-F2{}=n z*9gJDCcv&&iVz~`NSrrt=qAIyzHx}B9<}S&uj}iSc_z~PFURq!1oIQu|9Pszh1}<> zxoU=f1Xn~4;l^KO+ghbX9(^1G@IGcA@e|Y+%u*1}z0eztEE9~m9$|rygM%{|^wxW_ zM3=Q71d~2c2*R@v3v&%eG_Q3Hrl4M+qOFigg5DAM8k}6mP#sC8uEca75{do2j^;=* z6yz3OiP2OkV~Ui64o3ze$d`hehXErv`LLS5)kRbze>~*`TM0z585%4e!=aWQY1EFY z5}VDCH5iCTo&n|oS~2K&BOx;bshWl=$LVQ=IYI4jFetgGIWU~4*krF`;GZzF2m+@E z87x~etP-vCm`DOw-Gzcu4PLj{AdW{T8+q;SWrzZG4+#O&W7A>v7CsYwI?12v26{Snb{k zhdKICbu0;uk7+wfYZ7AiZ5Np|;SGq;!?9+m--GJ&B|-G&T`m}1Vh8^78*S)mQ z=sOT{oX{TqhDysTC`UbswyAut#-5yE!i;Jne7yu+b|q|3hm}W)v)z zq`1=KPDnMy=F|J-C>2=MBKOhuZ`{28!V7vW?~cACTqn;upxj~2{}8((G?>Xe$9Y$Mnih#@6Sa9Gf}e93Cr z6`Ns41udvvjdY27TJF?E-)(cETD^OM9=?A1QRZ5_>4?N^GMborIBf^d- zBuY~pDXgZnk?2>UGi;>=3YpJ|LpLQ^JkfXG0HyCYG=-#l(#T6g<`;%bQ(jW3cknSK z#IEW!4VJiQ=YmBgD?RJHm9DPStSop4G>3Z1XC`T3Agj!+uWp&Zgd(wuRrAAabECyt z1tS5+C$NK?IF3b9u^i1&>4eRzBwki%10%Mh3*aEun^|5a?=2(0TBNO7O~%L=0z2zS zQRI~(dY(vlt-+KdLv*ek$HW}ln)=Lpg8KE5DJ45`SfU9y9i=rUw)CR{M*L`;*Up46 zstA3AH{5x^=(Ni(1*1_wxy02_qS!n-0NQUkc&g1-?FrJ*D~;i#odH;00UYI-19x^F z-?UJ5H}$R#{gYk&bDDf-i)rx9>^c?`srwx4kXSt5>1!x!*0E|BUNToRJUpG`$V&>j z^OL#?*5y$8*qNP40P5w_a(qJ$q2D>%Ax0h}9VKk4jLX*pSluXU$kEJwtm-4g6#T52 zT{>cfCU3li6<)*HIy213mYX@`?JIyZYIBIlQLaoOl(GeZgAbMDc4=9fDAmU?{S0W5 z1_T_(R?b39?HD!zR#D>8Jb}%anWt~s&1~B)bCggCBV~Kd@~lj(IeOB∓Biu7t29 zfo7gDNv1>0`Lyy#A)GNavxc+0atyip2($03nZ1&#tWpM}&!znjzw)*J|6l$5?T7T; zD%oeS=FzU5i06@dOvoBn`!;V`8S7q}K=XGzHfme=h2Q-kN)C6muZErfi}qH&wsF;T z7wrdr`3JxD^20y>mw$HiUfPc+-zPq*?|4+Eq8i8XLCevNqY`>mL==z2j{{{b3EY0& zooBxLbC3Pzqq&!s8Jp*Al~qiH$~=s3Bo3_R%Rz-KUxOD%(s1I_ObQ8}^nAp)u=o6P z&o7~!G!Tb%UkC81G@fFpu=q##BqLv)f#9wM_P>+jEoeMl>zf_*^rRLC($KGSlal*w zoS;*>9GPT~m%F5?I!+AJn8rsJ^W{y{~m6n`57{oa8bfb4|U=D-w z)Ico>_sr-H3jm|^mV{#VQ^;;qBM!0eQlLV4)WcLuny#XWH zLe=DpM+S~2Eo*1%6t2C=9WML;=*xqI7Xv4|LI+TPBILIbooA)o8l_q z{AvVluwJdeN&_fw%dOwm0wa?<(rGX^Qm8YL#L(@ns#IVH50_pu+s7y?y=j zJI#rHUE_{baa_7%p|j6qH_(;74$qNwnb5A;KeM}n&unlNyz}kG^&9N`{!y8(ZRd+R zvVg9R_4VaE>$v;vgeB9#N)O@X)vDbu_TDSdwB&(&?=EKpc~MKgs)YSO&r;lf;BL7d z2^wm4&c+1s0GOVAb(mJmL9Ne7PI!=Hu6A`QRCwM)&ADZ@)#~r)u$FA|7_D8GmC^zH zaR0__KXvKMq|e@VwAAC$R-s%!^8c3VAii}%Dac*-?c?ZAonDpb;Iz*t((3i-aZ!;) z-#)S(SpwkG-*U+E!79m~h241jB7wb=N71k;wDJ)NfCzqLHAwr&yO0CW`IB}I zN-kV38m56U1Htt~ZElnfZL7>6#4fj*9E;Wi(G@d2^JcE#J(HZno0Sbu4lWWvNmZ9d z^7ap!rWr7MbNm4#d+xgoZMI#<|mG+cD5K7EZzYu zI~3%Lx<*o2(k^ReRK*@aBL_?C~d0FB-|jRY~&0*{Do3b@r0wl&*IQFfHvchQhIQ(N)GrM*_x z$*;r!j)cw1Ig?qxO?X^-VOUw?-pZL-a$Hsz4P%nffE9S=Y5WYHeI*~)X4>X#o^2sK zt=F*S2%` z@HfNCv7<{kZvS6}i_2WaDqMyFbWnXR?Hm8~*Z#p@{KBn=c;}#x0igWwHe3=l)}es< z`oQ7ZvDLBU@QMHMGgn`^`V;T?@dP@qf9knUyzl>ge`w;WLxq}x%uqdp3}Xw z%u*(^r<97wbsRJ! zWP*zSK;g{H&O9i3^*t*RF5Jq!G~PL+D+PU2%Z|$bq!(qY?jjt4$O%hoN83A_bMHb) zvnVXp%w%Gvu`=8A%5%*0Qg~@>$+9-9b)IlIxdrIdbk~6dHRj8dRW3D?q$brN98Czd z#Oai4HOj`*6QDS;+ zka$Amk+{CQBGHt|k39{$Z$`?(Y`_Ofn*^@MQ_T=Nkz;`XgWm8c+{0oLC+C1+^;l8Z z-yDFBIEh%3Q%qwr0$HZfVacJYE)Fu8H7v2_)F*G`?1c_yhLL~L<0 z?^g1!D%(`2T0W@YClMz6yAKhvtjx&^*jLm%kR(t@X0UoU!Ly~-+(hqe~nGmyd~ckMei z8`@9FShq>J_*d1OdXD2*7q!EJ3e2NYHTZrK1@70NDCMdhNqtOAH*~bPhqA2J%Uv#lC7ldqfmT<2&h)qg zi7J{RK!>}26n6Nq(=v7%fzw)>6VNP_XdR{4ci^`d2?sDpWAMNq%#yd!2Fa3p|Toeu=< z1)=lveO*gzNX>`chR2leZ z(j?XFS(E50)sI&8mRcq@B;@%gOoSq_bsQ5Ets3ePv5>=b+;)a384X(_K^i%xZ+#CV z5HV}y7_;0#n<~PQn1GwSeI*1hlLWPnJ7XjV5akS(%Zv=eil=>7;5g3UmL;gI;%eB+ z4w{6cZ{?9fW7?$)5x$d@9HjanXKRzRu!$H`h#ox-=KSGf8h%^`COW! z_|Ypti_61qYB_ihfDB;f{_w&-d+O(&{BOVhbAREVzUiUmI~XU5y^lQmvH#`Q-iwCA z7>pd$4);#@EZVz%nY(B{*zEm(*Pi}?Uwrq!_}a30Y)^djA3L4!@{q_;gIJ>w z61FUJ9p`HU%I)x#N3Vb8PkjCx{^RR2pP9^_(&>rV6QSH)g?wxNQ;%Z$K%t&W{gVj? z=_kIp3^>6Hz@((ynjAiekc=IJDwdkCRExn()m5=V_^Hc7IoqY&Z1ZhkyCCg~HXa!Pg+Mb{o3$?30?)F5-~9a!G5IfcwNQ<#k8U(P7l zX)Aj&Hs4aFf+R6>*T*I81|~&O5DfG#4yQNc;nT9tY%U9h&kk(r3mdnImIFw zg1LZ^K-^2o5}VuEMX7N}60Hb5KLR*$IC4@%89kHsA1SgeLQe~c(1sn`IF(dkUQjz= zsKO=W&4`?0sytoW>oKl~-Z8@GDBwqd0+hSs?Oc~yJgs$ty>lg;oG=UAP1Wi?THZpZ zrDTCw;$y8*lE!jEs z5>A$tm3=ks)#6_b_x;D#etiD}oyrIYf8@`78%L_%6?9-r^V>jp*5ax1RObFgB{d(2 zYroGKjrW7y-j(k*A!BJ>x6fC``Ow`wch2tWc-B>8!;>>DPt_Llz?V2|(T?8FaCq&= zua-!)>O1kcWvELw(|7# z1G{soJ8FrKo~xlfJ@ux9?nzLK=Ch6z$nxsCUfupa{~FC7Nz<7^D^%*lQEFO;KxEbyz3o!7qk}Ye(lC&5*8+3M9YaMy?cadO z(IM!CxI{sW%=^<55I%tk8p5<)FEPkEwr z&{HFv7}JaH5=fk;liL@z}#d;Hx*T{`04P{^=Km1|a2)N40ic=A&pdgr%( z&m&hB^6z@%+f>nSJ^j&2jw4F&qhr|tUhw!`$tVd;vjeB~G@{!j#z|PhUIn00fz#bHZpjOEvp;4`AK%}8J zIF>eR7#WiM)iiP95wV%_?Cen1wYhRAU^ewMl%i^4u_y=;?d!|~k2uOP2uL#5)Fe-8 zZEuMxYES{NH5d)$Y*xDhq-AYvWn{2zp0mV_-xL&@B1__xmmOp@v3S}?4TVNfwW;Wr88NmTAgspK8(dn-Q{SJ=l7mKPow#vUNs_$L_Z4d zouZgVU9nDHwrf)D>D|wf9gb7w?5hL?(T+&~w$@T|MK74$y>Ag^tnxTAn=K5Gxakq6 zvyPn4+~6N7^Pzv(96VDhXz|y^%V|JhCZ+%*hm~DUAb-KbroWV3ghSPwfu4{(Oel~P z$i|(8Jbvxv(qzFX1#^oJwz?vcyMCr}R3PTjk_Y>ty);|_25<+qS$zu8FJ)|0x&sBA zn4-#|z^s#yNUT|%a=Tu4n=D?|Gc^-l-^@m>@|jxa&E?A|izs=nw3SDB?4qvLRCUc| zZdJlgzRRs2!aN=6*z8x!bOcUaZQbP(*SV)CJ@e|Tv-4!6_kMNBLf8L1EWfd5Gb4V3 zOMG^H;pbUVEt!;M)koFr-n+er&xKm<(l#xcfwQ!xo5ih1RVkbCMDA%>hJH>jSiFy! zw>9;t8v86<5AQr?PgQF7BN?sNK9@m0vc*#JTO_YEJpj&rDUC>O;HrC{Eqm z^;&;=b_`{nqS9+AsU@#kO@&jTo}_(|*lK^CROPEkEi=73{b^==wS^$C40+=*lOX_C zXozwg_q_(MFYyFmQhD`F;CxZL@%ZFvfpGAkcEad$a}Pnk;1Yy@fYg$PN4}(#muzm? zS4U#_OXjMWnrd;&Dm>&ejLNk&&lHeZ@i1w4Dh9?|muRNMr(%ZX@gQo}<7D5^aA}B(( z=A{NlbBAbk(U{)OAt8dpL4vV*i`9)_9`?W?NgfY+-2LZClvq#)0)mF5)blk*xVjcd zoI`T~7Mc))p5UJEK?@P#3IZ#PYF>S;B;vr}c;U#jg~DVCKnWv&?^qLp#>Q6pG3{13 zt1O<>n|Avgly=At{gjW{AO8AcJsY3UWE&?->9 z=1WE#PlPulTjgR7t5LZ*0YT9+SIX0oT_E`6BS6w@FYwbON59>}>8d02Fi4_|mK#L@ zx3$vSWSB{2@_^;rB+VaTW(%f~@oKz=9c9L}eua!)y;`C~P~XVtGfdye2Cv)&3r2lY zHK8<7#wvLxY*-63I7;EwV43pFQ2N<1Fli$6P~aIEeHa36YYQ`NkgGn(P=%yjnq=j2 z*I{@93tRY`FhZ0aPWWi_;L83vGqiQa2he@Tjj%$_!vGR4%_M|U3nSbfaVJ`#|;f8lq3@dLm6!MD8O&2N0@ zv4<}|@`>ku=c&(q(mdWDL&5Xwa3fyv=F zM>9`q8$($0 zG29h`5FkOTFmT_JaGq4%?{)w zn&n8JLJbaA3a@a%m`$B0mQpwePzFFrW}_(tS54VwTJd z+(;BMm@Tq2J+b)UtZk-358U#1Dm*d(tZPXIurqs125zUTZ7j`{T2hrYnXh17$VxvE z++A#vyF>2}BGomw@~HzgyBSN?CJ7tadKnwAVVZqTnS5lzepF-eN+B0fDU?i<9L63(CFYpU84ZZ6C zt@*LpvRTdDR!)eA4cS)(QaI{TA}7$k)JHXlhoi@pW^@8gW9(KWmMySlMhGfICadZQ zq`171CSfJmYEE=xgn}#7p6A^8C=xrD$sV3fr-kHXayf)m**23Cqjoh|!W4JQPGZcz zoq;B=l&Jy)SJC!EQvA3C;bF$L{AaZ#I7wk)FdjLN#KjHdJzgeh{^LOAD5nb(6l zEwUqHf9zKM@g zR<%$_lcu2r>E~gng6w|AM4nA8>t0AxNhvv|2tACl^J(|ipNy1s<-1DhIGzp^_c^P` zvwjA*wL`BGn86)gg7DoM6G3w|YXVzo=3&c1O6~+Sl&sc*AF+$)gx-avM?P4ug0+50n_k64;=u zp@RaTg9PJ`FsZM z*9;u@6m`GieMNY?s1_>hat`|y$>TH!(Fw{ir=+Jj>-&}4>E1E=?P-*sRdd314aVuD zF4Agla;@g)Hm=-!nI?N43hJ>3gO{D?OUu|UzVLFFr%44QPSl|68Yn}xkO|2BHxhIx zn=m>V=Pi}5^hzTb*St>B=!}c6HE6k33{pK8j|oy3LPR9ndiST@D%pxYV0as!Onk?|Agn2FRY9&}UHoq10J1+A=Bs@N z7Sx41re~t^O5~8*m~?USCngnMVFw3@3Tnr!kD1xm11xwIDx*>27-(@OZ0LH8WDho_ zc2cOFk78Fdln80A2+ZJ>dStzLpeaYWlCTNXSFS*l&B%mh@XOsJu}Z{|%3rKv7MuA; z350Ss5L$qJ$RnUW#D;G^d}gGE^=KLxY8eY%OcSR}QsU+L8hJX{Ru@3e-F+L9ig( zE3|@1h$$`sq@OiGuE0?Sc5*LAwZndl z{L#l5kY^&Rhw$lZ&-}m#-~H*UPx~&~&wv0&qWZI38fOtr#CkLtIVy=SmGLWn)%*On zU)Alj%md|5`@H0MjOul~)U%ww_Ko-$%~4XorcQuT1V=@vnajuwsK|AMPY2{FD7-av zh=fbvLDfkH0k<+7UjqOE;pt&Y+3`iAeWU)Dl`wiEhBeFHf^XbLR&Y(tsT<*15LJtCDC1N{Z09#Ri{b~C7?@xpjX zM5`bgdxLkD2}f5N4q1g!6A4Ht5TXgQztlM*FtsIph~2_2G323Y>wt9PiRCK1+=`l? zu(`L@CDbrB%CuC)%TLIFrPmN9ODI#Whp3Y}^(qC=A-x5B)U@w~KbwF++XQ>?C^XQf zyaOp((kLkA1(dkNCzvpmh7=AJujFR}>m5`8tb&b)!<{N&mGP;{?MqB1 zrHN!!&yn4<4Jk!1Gm-s*#YE%CPF< zj;k*eTxEIDh0!^+bsn2VHYq{KF& zZsn1iQh*apy?>k~N^teN2H`@Axv*64l9?zAb~A-yLt!x0Y0z1ZRHcY+SOn6$kGW>{ zG}x^q5~-*q;=}@to>(T5vXU%%JJ110VU~#!f2T<}qx2<~e9bgTN7DqwLEX_MRu9cz z88bx=XP%;vj?Wraq?s3#95Ri>8KDvMr(QNEEE(MJE}+BWZarKSnM9+shgD&4xB&2x zMZcD^H8dK>CMKjbscnX{QjyX1dMG1{0MdVzqO}>NO@wn2;1K+W zOBqlb>T1K6nZsb~V4#*Lw5g)(os4C)xq6UltR5{dwTOb(eVhdWT`pU=N5-LBY7YS% z$wW{MXhN!SH!WpZFO6+o1&Mi~^Xd}%OkH`g3N(2wL7}JyK8rLmlp&f3wx=#!+SJyN zV;k0JW__&brEgQnqo0{WHEZG$jrwK~GP3#z@ohQe>1SnR_01Mx#vDl%n0i?S4nBRN zG+_soMy=T*$34m*)5j{ph6SVHN*R5Jp7E`0&h~AX1hqy+=wTeU`{r4hj&oFq74qa^ z_>vvtbKjp9yChKG#dbKidL z3*Y*p5c7-$)qP0oxF;$}dC0I;T+tI8n3Jg9bG0!!eAVIjl8 z3{!24#*RQC`YOnjYRLyE$JaL!SEbLt#IgtM?pWRW%b1?Y5cve~y zZ}%`%pycKhNJc_`k+nf2837lZ$UDjCafV|6sR1a*79L!Ec_ISOOY>AIjMch&WHDy~ zT}x#Pfs8VM@7^Qw!3${P3}g4u8_|R(mrgV|9{(~a_F#ox!z!nvQ6W066Rk8^*W*aL z*>&N&nf|NEI{VG5@i=?f*U)mGiN(utMx`8g!097Z%5D6S!!t1@RRo9RJn5XJs!phd z=eGQlp%Xyort=v%pMmokIG=&@8Tj(eKtIv)q&qH_aw<7qKqpw9lG8i5FRQp(%te|p zJZp0?6&>d_sK>c^4bgC6hG}VIC~(AZNRIm;v4?{u+ngyNtJQ^|87eXP`}(|C{CWh% z_{toY{7eNdSv!%MimFK=$UuH-Fyo$~CFB^b?5x3~M`7TwgrKNwlbNe)0Se=pSjrh{ zGEgHwuGr@$@W8|GKNm12RojI>EKYBm1Rjds3IQLlCP3uqsXGyf>B>gkDU*VxOt~6Q zLQNzk|`SxBn+662MCn~HVmbx89+_|ZECmGYa;$nhl9tl z;@^9+>oS$|s?J=MZ@g?kqoFl39GL=$6#hfMrkUW{9TyEduaq9Zth-Sgb+DD!HT2_7{t74@2{mrVjt0IPjUD zplTIR|DbE!B%Ad-6=d!}>P|p>vr_(Sa2~u)4!Y6i4A4QMh%AbKn>AD*97kO;$|&=J z$yzI6GCip=duse|TF=g~X;|PfrE-IWvY8Hsqv48dJyS0$i?X;P)hHas$VEuI+ML@Q z_6=Q`Jm4fv)U6_SysXFoIzoe=Gc-iVO3h{sqh?fTz&H6Nl-VQ>5}~Wyj&2Hfana7; zSd-q-Gd7`9C0=N;jEu;gCe@fFq?q9Wivw?laqE1fuxvsBM zdHLh3KZInc>{9@+s@($V3Ygoac0fT6ID$3-ohlL_Jq9=P4npkskZqg`<&CqEgvrRN z(ut0w9C8E|fQs-Y9}%z4|6Nnh^GL(dJcuu24e@svWG7Rk(wj&KZD$rLmYXv$D>4{2 z*$V6plnz1plTYi5!5fF3d1vFPm*+V;^~ru26WrW8zKq1@ROd5rJ_Gl02Ks>9!hH;O zTB%=)pGZ5cYjK{}`3#)T!1)ZE{Tb-XA+DRorBOW%b0z3&E?rS@twqy}NU1&;jZtcY z&bVBZ`RdDX#u$C2Ca@VjG)5-*4?q#YY8N}jHRAzfOCn;Ezd8nFlDsfm9Am$1L8^6i z2q12*#4moK@<_cuB4bX$xMEB`1WL$UMQaRal=3wv=qghhJE|);C+g_THYvD)*C=7g z69=oD(uPKQ$4sf}g^Y|>5;JKSOqDcA#;gXmI1;zh(kZAK>OSnf_BTq_o80@=ZxD8k|| z%hI6VyNwfdE!9|*!hy-La)Z@4G^7^{R$?U%7MCm|AUk+^ai4$;{4>+bvw0#UBCd>y zYJ6aXg2M@B4^c!S`_~!a3c|bJa6r}je*l1$kbNh8_;rFV~W15Qc9jNZE8W*$brq^4mS%k zxJhO}%WZ9!S!iqHB}mzN2}dT=w;X22mb;vGZ^FjhqY%e!_hc3Q9B!r!CVljh(=?RO z{p{7U$khUiQbP&p8@3XJ4`6m|ul8+0rXW{d8mh?@>naNWFYQ-#FO37Ta#AHZW{n42 zrR@rBQs(_Isv`@O2~jx=fNr53`B^mGMSIsT|DbM+{i{!Y@W+1rC+3vKy)?e#F>_Rj znbVIMMdg6=*WUbn|LD*C?}EMe?LVSlQkb8MDS+dYdb-qOk`WG*T*S}J(Q&p-`bZ#bh-4}m?a_cMh(*+T|X5;Hn@YLGH@F~tcFQw6A!9jN5Q zQ8~aJh!Dw23IzRmS>+ZzY^e~R{8n)9{J7^aXwGC+DL91r#T;i?orVKY9)HG@iNV5T zMm-HOyTv4NxEqG6(*Z;NJ#zXsr!vfmH_n=z5!I0#JZhBySi@sBcT~cN2_jmBmY396 z%wQ(wJl?Y)AM@Hfonx22axt%Cz(6Jp63n88(V#Ow(F0&(wGhshFw`ZcKK~(!@D@*d zgO)pLZCUziIIvF!34swC;9-pmc<11pox>_6rl^x7X=0L?8YVXWMLSYqo`FqJNC307 zMO|2;jU0?*{R)?IYECpnt&EpdgAU?BJ%s4PeBSTG4D=%IM7HNip3lImnt|8C?%l`# zS2Z(d27G=a?aVCYc_HUBa6SX)Gq3|QkSAT8UgYXX!hEU`CD)d|Je!}Pt>Ypr_*_Ee z+KlV0YIV6De`9o2R`y)U8K~%Pij-jG&c>#?LR&8S!Z=;96|fwkAsN9y)@8rx5`T(A z0r1m17_ut?4@tCK8>1Okh9-{^eI#vs@hQJ(cxy8=%+Q)5G(3=aX1cH{Kd$8m59X3n z8tav`0|wyMYc#TTSq>;qcun50OpsiqulkarvmYe*A@>Zpj^rRcY7MwkMkI~$3ny98 zX%g%no%;TjQF67S&`6}@V0P0IBKuo&7*(FhO}#^^r$)(iwSz$^g`)+RzyQ$dxBbm( zWe2Z7*~C+;kP7Vwrz6t4d1^?TQMfCRhUhRPRgVHhbgLjBEjG^zSLWg65J@cjRkLBz z@LLQ@I6!UvAqLr`tFLn`LLmgtQb-k#;TH^X(rOYk7&^bjx@H8gp(l`Mek21yWWgi^ z49ZDi@;-oKkQB03Fi9F;Wp$;HFSLwkggCw(XPx!oJdR_#G$C6lq2(}a4_lL<>7!I1 zVfO9MEghNCj2ECeB}9(bEU$QGhK98uYs(TeR9i7**ro==vE7gKl2QP*c%|2{!7CZ) zCb*FiI(nE-u+GR8>|TbQM!*_I*e+qk*2tA!j`5xQ#>C5znt#d3VJ4jAIiEN-$u_Jt z5|TiNUh(v>VY49l@^crz@mF8Zy|g^6H8D9z^r3>OQk-fb%t3%7f}3m{8+dHF?OnLb zU9?xOi9uc;_~b9H@1kr?=*Eq^{3PA$yoo6}? zkUq`zchWEqLuE67Y)QnMLM7ett&f=8#OX8`B>L((f-sdP7@xn?OZ@qSjpLAa%1+Hd z(;Qb~YNUjHe_(EC#EuCArAg{=5VkvPgL|plW;{(75|9<t54%tHVWVg&5-pSX}bjYP9dPD~)0)pm^5?nFwma3zoEJVsSfI`UH5U;rh~ z5e&}}q7stIG*|#*mq2SFLwcc={%}AMxjX`K6zCbkIVw-2=$pKSpnDjhSx8DdWac5z zk!Ov@iP^}-al~Q@d)t!bvJc4*^imY*SL<*k#MA(}w?<5Es60jOB4$#s)x33TDp@+_ zFPY~2ANS0V>~?hJ$OxJ4q0*QI$zP?SO({ccHV3a#7kazGXjh^t>2h{xhBZa0OF394 zH9pW5iNx zo|)7j6v`@Jn?w{kEYLU+fOc0`)MgaYLvnR<)^=LPW+04xfrfo?GCQS~dwl6Ds`Ga? zRzl=#D+vV;S#WVDocCB*;2DXNfJ{M!PiB50F_JEp-a5I$SP4N`3T*A<=;+Ji&hUy# zt_lIuvK933x~frO-Kn?qLd%fhc;DkWNdAdj#zn>#d_oJz#{?Eix~mP*|5 zt-&pv84_wVr{ydlaiJDck?^iidXgbkddExWQYm6AtBJ9>X_VB}C#2L8VvSO=1hpuS z?A!N9Wc!9Wn2r*{c__e)Oae2YT~?o$nlX|)?ws?HN`cdal+gH=+Zt2u#wE`tY@yw^ zg}8(^{kB)@Wr|gr2rI%Mqikeja2_++%GqU>g%l=%BDYKH8{dLV#F+MtrdzV{vftH3gYsTpz!l504&K3*}8BCnhm zvOAaVJpK01-g@vB!Tp{Fa~5DYj6IWau}91hW^zwH_N`|Ul;M&Pd6_*9_?0~SqLA9t ziU$+cGzkG(p`e(CF>lMzNWSX*t{}M;v>3EmQkry0kIZpThQ9A}K!?i$NhMJsCq9x727Dd3h)_v`Iq}%4&E)hC+WSg(PUkBz{z|RDL)Z zfnYqnWpz9W4uaAp)1+rpgZKR3(KHm5_Qp!W!{JM@06;JUl5tON1@cBw_|3t`sr3vk zFo!72&~~*$!9G!xE^ovwdxtI|SQ(vH*Pcq3`NkW>=xcyNAQIEQ!Ty|rJsU15h3Bzm zBp%EdEjd)}L+R} zLUtzG^F+^Q;Cu$oXW)DW#tfX_NE6c3@Uxpzw%t*qWT(6nl4$h#=4hchDl_eMi4lPTW z9F%PM1BC<14C60SH+B+e+8<<1I#qb4`?|l$MCjNrovl-g4%0B(4naSrkdS>(>gmDEt7DRk*G^k z?8|A-&qyZzOSp_flSm^yZBeLCMU34bcAmj^I;uiHMUpyjhk+Os3T@~z ziWrQX48&Fdr-eOKR%>YTg5jDhVyHcn6s@2u*?UWrGCOA0h*Z@QmSDC?5CwUXA}rmC z&>F!3m{5MYMHKIVZ3|&oiUV4R$zCe@L6V4`AHXe?r#A}G`bt)tF%L3DmY^2L2=lPz zjxuZJa5F>X@DtLl0UAjbPg6ycKCl%)!}gj5$r-j+gl2BX2n}wb9pj>2CiQAX!e)f%5!-9!2ur{?I{kFW)XgF);h}-fUG;%^n4o_2EdhVj` zrCqr0IdzqqTs`>J^4OPSGjt$nWK9nsKZkoCdG=%P`+wfwuoiyo*MIWrD_1?tW52lh zj>pY=X}|iZ4}JRDGaxhNKFWDOz^AW1{R6-7?xXId`EzNtOx``@K6Vq-V+EOZ%kD|b7DU1gRa+n6F!g+pPIBUyti?nH1Y0ANmT401mXJw<7qr}CxEz`-B+o^M+iSYcY8nf!#e7|)(m<&L*P^Wu06bafXq4vbgDU zQgYVnoz{(=xE9#EG$&mAtQw%5YO92MxLRD5x~dlBj@7!kFmv)Q#_Q^C6Sy&&y*3Ox zGw5Z@5-0D~;%*dk>PuEd`Q`EGY&F!eNCkQ)MWt6;4h{|;c<=#Tit6X)&6{^_^DS`t zs=fs*_x<7M84&}Nek*3}8MPVH=6KPK;4(=}QwGbRlBTOYzO6t&5*PoD zVgXVb_tAvvKrU9sJasSm89a5e^b8Ox3{aU){u>DE!!}|W7`V7HT_Y=Tke0lBpU>6T z4&Y?LX_wa9wB@MAYhfRrj28Mmkn%ZBW~^3$JXI7M+tnSYRN?mb^Qs>`0bLYxWzpz#a+eUo2u?)P zB_+Sr97ChtonHRri^H~TvC7f;jI6MxI#PAR-YJt2yrmJE&?>CFq|ZyMI_ioesP7CqzfklS`js}*?P1Xy0A9?0uVtB`6Z)<)Df99M2 z)RX`9!`H6sUYg<)AWaNUefD>rzV^&Jzx8{h`B$I#;E#X!Cu*7+Ca`>P!SaRc*M99& zAJ%s~K62%ekf{HzH@#g{zxC;lmS*rXGmfafbV;~9Ey&8&h{H>VHy*on<>`wTZWM?a z^OJiWRBKL$GRM+Pg_oFP{vCf|ud59?47O%&vji}a-h~U_a)1VCp8NESwx6j|5@SMY z>*??!+L|_2UonoPP^K+5u^~^hFpJ-~1uPt+xeaYYMhz7G;G8eoViRt9gf${Sc zW(#NrmWntbjUhPfYW<8U+3aQ;4CXCD4MU zFM^>Wa@^nv1wrZ9nv@xsY|)3-j__yZ?HvyFYB;LJ#bZ~Kpd;@tnLv&_y#qrg;P$

+&p2@a3*|w+N&U=36{~`9h_g?qf*QyiZSHY6CehnF?{`gK{ z*wCOK6IgwUjU+%(R=jz7Pae;Ld$X2P;UJv&yH5o>CWYKCKyw zff7|F)qX=j+{r%sj;cK4>~`88zU0s5Rh8XAy7H#`)on!S%1Wv$@jH&wz051ml@8yH zl3uw#(-6D3fC`W{zZbj)Wy_Md+c?t?ctPvzOCK?6oF@gvidgGB+@`k~&E1(KR~xG+ z!Bqq(srrk_kX*&_1@0_nwpJxk4S9zu%5`ge{+MLK0ivtQT~B&UdHZZ#ojD`DR8ET2 zEsjNR)Yr)*8K&mzi#;8C-v;j2<#rvF#WTw`FNhUZK4$yW`zJhJmi;~O@FQoO`J4=! zk$gDt3tK;8M|uBel|9scz^-;FNh`4a$c`89{ACxwgT9$vl=Aw3s0sV*h%wGpqH?_` za-~(H`h<3TpdTaa)&{$YGi87umV^7ysC?F*t;KjH$y)ZuOg;JI*qK6>7O>*JDb;ls zz?A(^>~=pa|DDOhW6_spytcPbs&jC~&U`B>c}ElW;N-T+hfLE5!Ahe*ILu*Y9Job_ zizz5;uB}K96`Mp4-CttR{kK=O2iL5rh=In}FR8P*p&o)PK)1qMi&g}L6M#V~f;`0p zgwm@`!X)97d(aPR5nF~w!E=Z8#x?caP2N^h(wyVr@3d7*!?)(+?QpwK)e-y>6W2qe z2j!PpX`qPR%y&%6$# zg+ie&4*;q@#J;=?yB?O5v*s%PS)e)b*hFgK=G1l$(&drPsw!N4B zBMZqeOU;rQPmRZbu#!tcuv=~`rFJQ?4m)X5l*~;Gtm$R9(?x1W*L{`oU-|bnexJtl z$Ma?{%Yd~b-iMPKpS?*LFMPGNwO90#e8x{tV4L%=?1DV=Upk*}>;9kbE6X^M^{hic z&RfNd-`#1|jPLGn3-SBx_U&yq`mBMbPX*8yXLF;=``oCH{y5IL;fw(I8iO16= ze~%@DojwwQV8b~ngIJI5Zhq@=XwDY2Nd@>Vn$QiALmWx&#?L|Mkb#AVi z3J{^`RM%4{WJJ*>BB`7Zk4gpzRWt6AXNobC033qUMDsYdB%?h;%CqKFLlLBXZcI`Y z%cP{Sh6tE7bwhw=aLJ%?hnrNMVx}~*NH+_^tsZd?jfJedBcy4G6DhA36Q%#Rn#_jK zI=t z^C-PyP8F!O!+{O~X6lrIuiz)*xuJZcl6*tGkZXphitXh{)KS-VeS#_2q1WT2-_^XG zsvbW$c$V$-9eCV?PL?lTSpcxyOTGW9R|?7ztcv(_VY8so;)<3;BORdU^_aVeY5mHE zdX*TZpb|8(`VCM%%$7E)I~qkvEkOqULlcWySYo55K7`o2DhJ>wRM~gYS*tL}j6E|NNS>@p4+c%Ubs5YV+oJnD1vGL@$E-W&CZzzXvoPIPF+OGJRtkKFT~#-y)?u0_l}Nj?9z zuh&nbCPt0ZFd1~757-gmln7bQ%YsZ}rSHCP_95N9e^BrdiBN&mND<$p<2VGoy>LUAPtmY&7=c5BG){9kI9esjwcv)7 zK5>I}ip8jsrwW@0eo)ezdpv(&*GZN_NxDnp685D|YukJhmITl=X8Xf&C2V^o~ySCNXZJ-Z*~ zhtjrOe+$LzlnneNF3F%Hw(L&gJ?L0*YI)|cd(+E|L(Y1hC-OUP`_%p>!n@x?qC4Y2 zpHat_3B5;B3qO*I4a}e-*_M~w*CnIXU+T|dO9;Ih3O%fB-Tl^NG9Z=y4^neu`;CJ* zGHW7PRwbn|e~aP=N8irjJ?r)&|2lyPlx3jW&Rpt850m#LwbV@4|><=iQ^k%=hO0~u10qqqo*ej;l%DSisl5UX4dt0bX> ziXkgb0;`m@QgGK~mJ+;|Vn6W?AFmSlDvUV9@E-H8yw?DKBW#U~ zgg>v@3UB)!5x7j=PCn~i>&!e~wzQ6|eRi4#$e~j6^@N0G13^R#>ZvSs*QlCdR|w>H zaFf9^Fn>GVOS6_O(?YbWC^vvyv9#}Z6}?4rd|h;9Oeqpo`_ZKXtJdWyoQy#6DHk5x zi8`YST2C*|B&}>U@<%6;+g@*xE(MSe_s~#4vF_-UL%uyMfONQ}?l1tumMR%?x~%Y6`y&AP^SBmOZi;9@fe{Z86mr-ST-73y8FJ|(8tn5u}1nO1)G~Cqeh0I|*c%7Wm z4z5$_^xA%fyE_CD7_OhKxBNL0x}csv>iS}~pNqac>s<=00@sADMcP~l^c|89U=k{s-V>`$9QrRdX z`PpqeT<3r$@Q`4{TSV;nrv(Kb3MbTv)yYERE_}@0Db?1`tzaRUUCWP5@!i4&pfE?) ztO_b~FOu6PjNyMjGXLo(y@ki9;V-KK^HAol|g-v=R)8JeV>aua+oyevrq3s1i;x>Szq;5}e zV)o&qBwb`qwHdvllZ||!xLM;C>mKh`t5i&iA&@OT&r*paXOFS*99AX>X8bZokSu8U z*1JLu;(?&iLX3O_V9@^UWNtO$CEDahxRJIU0Uncta0a=Tn9D2G%pGma6ipiui9Hj5vjC0IR3xOiS6vk8fY=Q&1)D$a5 z5^6x>_KknfXASZ1`@ri|8VG~Ddtdo`L@CJM6FXcW15O*+ibcf#Ruz3;Oog551wC(` zxc^-Gzs2?ZYvul+KO$AZlnE9FowE4F8eE|`u^Ib%rW)q~yv%xX@PK&R=?mX_3`G8Y zZx~ClY{n~rY`2yTIctv`nAPA(=*&t%*p}}_9uNQo8l!4os2iT?2cHCwdo&lEk~!{2 zA1~)%1CtMxmEHsgRV?R#Wpgl$pN3_Q0N#1Hd;mSk&o!ZN5soD=i?HRGawBc7edGQ#@FwApVP+q`?)e z4NfNE|MKqUb9CGJZ|%odfv@0O$5%CBU;n?Sz};w?*W3}L(zK~JJDSJCnAJ6?az8t* zn&m?YCU4LeVIW9;Aks}FWIFprP-Q*3A{i#-alAvE_rO$zeMF9)0X0f1#+p_D!`(;1 z{3BB}=hEnwwFU-RLaiHXYPY44LnYo$lfB(${byrU3Oh1mRrau>O3U{8oqwk1C7q-N zE^DLiv=a1~ftpY39Zo#mFqVY*F&6pz`TUcu8%n0&%Nf$CzQW|IBh+M^Ros-Fn;FR< zxF*K}Jd`yQoq`R0Qi?B{l=PxvC>brQ71yX)>5kZYlvXjFQcT$!#)$EDgQ!+W^>Mdd zDyLm0MDLG$%QYD#IQf6#_LS5=eRhurde4f&tJVAQCwtg}2WQk1PAW)+G4L)`Sf5K) z2H}W}jjDK)pXk8Bxk?gd= z-5wd8_{=j3u$D%K7u_wFdan{y+NSR#al$TxqeSXsh+6JpscdZ5u4~#zLc;^8;6&uj zO=>1~Q|evkzm8V%c`;K`Hv5RLOAZx|aMQ1m|3JT#_Qf*$R4+CoJJ=@den+~p9k6{E ztZ#e0<9{wWXBPdUH+JnM_&t*p{*DNa8wI@C5!`;!7yh^uHrw{v12#Lnf~=^D1angB z*FK2yhF7iOn^#}E&j4TV^WH4J_Vh5XEF`NnXF+D-L?|<> z#h@0Vz5zlYlchgi)qyZAH!I0gl7+_8`4(Zko01bW*K{6K%3_`fkG1t?0j_}WbR>v@ zP8@TX;oNrr+r&6di&MJ)BDMKP?s z(&8mB1u{o=mD*xKZht9(tgYvsK(g{-r-m{PGxiq^J9HJP;352=! zemDNcg{#r0m%U<1c2eQYO6k$D_v+m&D=(omp!%z~-1uR~l!k`5b^P{eD3hd%07QuH zfZtJ7>D;$ncYDMMmD(S-i81eVQW1B|HI;&!*TN&KmTlnVh7LyrV=D6X9X97Ol@0vD z`%gR9BYbIe&v(!;)Iim|9a#!Ik8K`VT8n=EU&Ft;444*hSq9rF`bz>^Q2K!&ywFJ3xV zDWf|&Q`>;%{BL!vrUoB$6o1-6`7bklJ3@>_sxC9jqFXq%_Tw@sKO zWWU!9ix5~d5qCfsLl)X45^D?2bT)xX`4qiFE=u6H~C-k_q(N1s@eWB(3-4pDRdexE%XC3L0X@!n$=cWB8Toe+MPh zbsg8CJ*0F9jxH;|6Q*I9WL9w!?te-B(2(Kz?T`PO-q;@XvG%xP9f9hARU8fF>I=U1 zJNEdv$$LNd(DT6dX1=TgM(4>!t4-Cv*7`j35y^iXm)e`YL`$@sghI~%AMf)%OJ^FF z+#=1JOjpHYe#b0zkmO9Uha6s%Ih4g|xPvy%3kT8d>b#%RbUdmhgBB$Ai`pi;n(tI> z!0&A!ifV+@N0wQt&FzFzV{kb;HJiTwU?2s>XwWzfmoZfpf!ZT~%PC@z(xLBwFhB~nSE#36T5)erD48@0K-{NaQpSsu`3 z)QT10Y*V(#`L1X2b#20AGcRsbRRWRIh=w^f6fr0@RKW=sHNZd>L3h8BEM&&q{&5k# zOSlS?P}eL6qJtMb zm0v$DH_v7#b*7&)_PDvM|wUwzUqe#9n*@4M3M+8d{>CgQHj+mta3t^%Y zo*dzMU`n`~l4$Z4{MxygM_UBE zLI`G9Vk|wmrCm{-&?8hcoW*v{667cBg^a#duQj*%`iP zIlK(?yh^67J8cuuCvLq#HT{DB9etzld*mjktla;>l=$_g?(c^l-gOCZ3b95Pu5x~7 z@*y?;#I*J1x%-~q|A16D8tY$%@Wa#_VLb89M5N?)e0;2G6oo>sGeBQ*j&%#948Xr^N^A> z>CtZEm6-#U35vC-FpeJYShHE{l6O)OZu+y>ad>$XjE;bp2+%As%HDeAkkq6UOCF7U zir-FmdyBtCLje|l_v8Y)L<45vhhI;#byB=h@giiSd9@y1hmCqW(WFJHUV=(o zzs#I?y~0Rpq9`SlOOZ|dQ`^Enbj8Qm@Mg6Pi`fH(@w!AkgxZ2*_f9O3*OcPIMC!Dg za47%WUaI`QU01;*m{5RR(6Ta3>X3SMR3s?vq<(MYy& zVbq#793*T_q^8(=tc!zwA{^TpJCer6RyAC!vkZ*qO^MJ|bCHQP8XVgypr_2-`nVx_ z7^T1m^E6r|N6w_JO*B>^PFBk&n)LktT6;U+O_J+QdqWS!p}{YSTJCv+Uz!K=F8>|r zHJzbdGZ&tmKX>8lk$Z)7oC~89|D8ifteVC!OpJ(w`58#11~kMbZW&6X+XT}qbT`*sAuce>`op3?3+|Mt%V++TuvaE33Q({ z*Hoiox;9;;pyXRh8cOF{MoN}7MXmvmmf4%6Frj(wS+=d&Q541z2nG~sC;mt`cssOs z9)ab|P;y_1-SFv?z5ZQL@IV8lQ-g}YgM5>m*Eh@%0|%*6{~DZ=45O(eP*6OckZ6@D zK;B8CJ*-5tQ#g&v{(=^xC-v6MYdq%=wpBIEaHrQ~lv&&~eI$2wXCGGO zK2mr>dRrUN7%MG|YdqTx3(bzOzc%WG#31>BE+3W3uJE!wppuOzGf5!cDr0wSK~=!- zA2cMaY%ciqal4;ec49C-%@*3&2b19c)ODG>)jzy-x{t8I5W0rx;riYthyzS3jo$=) zLX=>f_#H&Yfa7lbVzA3^mmBg|{7Y~B71;cBHS@VbMJwYhk7Y77zHH7vE;#Gp`Z4X} zU$hTU^#Abryn5#pUls4!xfQ>Dt11laU+E#UsNT?7ja+78?BfFT%gub?_uOx6->l_q zWetVrjJcTS?=@jXpr5xFxj_e>4*K~kQ`MWqQ;@AgCvh<^wdVNJS zUgTQQW>p44H$|;-;hltJ9OMVAA9%JI(RLXpbc`tJWKE(M@xpd!T2_#~G?{LM^ni$> zX_t}yFlpHG6JO*L%hj4Oo#AtezRj{x0h4`KQ9y>H6)+Thd+DdRJ9R)8&71hfbmzr? zRkvV>2y4eHDv(o=? zv|1%`$ih4h!QDUiojyn#L1)vdy*y(=U<1pG=@gsmq7t+cRh|&f533$!>-LwN}vg*oCrj*|C7yDFhNX5cC^`S*mau7pnedp>?9; zn6pO+&TeZ7+Qf7)9?R6j#iJ$Ur)?Z~de2tW<-fe$I^YzPOP3itv zvRj_nT=210^m{}9S`+?YCR_cT(TKCTfYWjA=!116c>PiPUC`v$h5!2B{SVvx9)UeK ze>R@y|6D8j)jg7r3@eXT8)z`XFBQk{duc9PZoTsUPUd;?-n?%pT9PR2S$Do6a06}6 znB+7wA^$P3{rd1MOcDg$3#XT#d8!T~PgTzo!}o`cYA!*p!jh{sy6H$G2}jK?b-jfa zIz;Tpxssn<6MZlQ;x+xB;s(fIAOr}Sg)D=Z3J_X}6&6{}B3c=W1aaxiAA2vVaPppu zURIhhq1^GZ^6GMq@18~veBeMXc`l#IhdTMJI)3H{3DK*?ye!%<3s0R;j&3mukK}Ks z8L+;}>-kJ=m{B5a7PJ<&GwK?~R6D*dQVR%-X_%tOY-vKe#eOe=ItFsd!CqVXQ94CX zFy*jI7AU@h-1I!~D^{EeXP1wE{$<34;yc>)No}M3asGq*mCbwK0adA{BoTCVqIdb! zipX*(lSGWfn8Ty|-XNM+X6gAQt5qDl@3d)<7=k|%>C{M656@NkTSEyJfYao@+W+0a zzuPqBWx%i1tQqb{{#?S}CF|cxTiqhP&*k}l*Kp=-q!aopq1S_YiVo*3`c4I{?5)Ui zJM&)1vD!tcf-u(CLeOLeXPWm76F57S^)4PMKDa z+Qq!#bS<|0`l*s;y$A6;W^i(F5#*qjxov=mYOif3iM2SiU3M*PEnr}UjU4O_X{L(W z*qb;fkMyo88f*DDI99>U>KGo~sIPgG5}Q_T0#YKjG=uSZS++4~78WLhye%t(rP4;n zU5qFP9S8ki1ZmVTZA>u$qyYh7^Z`Qk7+jap8$Ec@Ehr zb`@7u%HJQ_R}UejgF6GbW=HaD^?-uvUXu`a0Kz4SV)MS@@=c5r8oM8^X*9e00k2Ab zQtoz3yX>{|E%o`bFj$yIZUc8&wvI&{DgVx8Ub1N1F>0762gE3W8P0)pLFBZv%*Eyo zHMe#!!hNLSgb%2=4_iO4+VhbD#|{e&E%_6a9lu>hu-x*n-TPqa^ZeGG;x>)t-!orD zn5@P{lfN`{`YV9`UgaJ^zPWMgPM2dkBuY6Hl85YFQbW zr(B2GnZm#KUxkPI98+RKuMgA;*Ck_LJzs`jBlo|9v;HcN-#zSfQ7dBqw!$c-Ae6zV ze4Zb5(qIws)%Dryt9syy%kvx*vhTomQo`3mubAOY1NN~Ex4pyCFIj!=Q4~`bQrLZb&W*p<2KQk&pXt%=em+?b*u zdKR_Xp0Va)B+Ir#1(>l!fw7u$ZtlPCoElO@IN^r78meqZ9;IOc6#g)~)bC`uFTw&F zjPXhG3O0tcepIje9dYqf*PVJo$b8`j&N46c28_s^hkrd%)=8)5s6H7GkAYlgD@M#E zG-3bH63)*uB4rb_B`w7P0ZkkYth|Xcc^Gg%o^CUKb)&onX#3YX{ZDAugf^%|(sL*V zs^$DS`@gOHtoQH2G(q5z8kc3kK0$ivt4JsyzFRWk+lQ@iJ6gM+Gp+B)wgy=Fn-3>s z*%NMnymTWvjVv0R1f)^qxDT!cB7o+L6oIPi$)Nrc2~#=NPF1bIg2^D- z63q!Ma@ExW&T)2Xwx}wFn7Z?ErT=N>pB9Xx|bgjF(^ z2X=_bZgm`ZF!?*_L7_ob5Q_Cyezcdh1KE-w1p6w!x=O&Gp{Z;{k2SwhOL zo@rxh-Q>3j7GN48;}VvQ*9c9#Z%`(-Ph6DYDUb+Jr%k$RiMu*XUx}I?qbnj%#`}#u z8X$KLR>bV1a$105pZi`V#%XOoSQR`OLstk)n{%XE145|iWs?863~(ug4*NkgvEaMw z{7PE%eyA3!XgR)#ryjN-0>$SIh3ZkI;tMkpjWR=!xV%y<_xl5xV1<)h(y;c#_Bpm* zuUdn?kpNNZB_K{FKD{x!@_Dp!;vsM(ndZ8^J~|=v5HUp%JhEwDsV9E;byN2_=>I7D zRkJ1Gb9YJ&Jsqa518^v_SWxp|OZ0u_@7XW7E0gkyZ}VH9vuI!~O}y*(Zs?5ffTi$zonV^PIeoQr-m%5m z)G-+q=MrWu#QwX&eE)%7$zM1+D*L)Y^t54VPcqRgbcckCYB z{1F5d6=hpBs{~j$*?6=VF@k%TWKXdOd9W?BFdAMZ(F5dAbq+l%E(hoC-Z`?}N=6sd zN37DSpjnd20tRZEBVpeZ4=zlIL$(K=#~Tzy4zKnXZU3;OY@#=V7`C(Qa9<{KwxEjW z^7_#wYflB6QIG_AEEdF+(Gv)eEaqm;)v(s8x1glUnsue;q5ZaI76NhQ@ipbokltB! zl5&rs3K1bb_Ixa5rl9OgYCK^g(?OaIe8peZ8Xe^(?tX$HsSw3BKU^#rDPyD}?K_6G zxw{t^zQlND{H?;6Q)N3@91@?gkP!b_bCS~GQ`z*%WHWS}Vi^;0q?_uL=X4~)k$i{H zjjrqSp~PD((l2p}$wizU&Fp3VKNYUQZO64okdat-Ee-qjd)A*g_tvxdk68b$(fLEa ztvdQY(2wwsOv^E)z*(Me2itW^IbL0)9$(E!_1UnHRciL?kIP6-IUcG%~#%kLR zqOvNq8N(#bO9 z;kfHPM0t)7HA|GFv^&CFX5M?aA`l6EDMFNCoPsRF4kGq#7crYA=if*CvCl7n7d3(% zs^YRoCTgT9H9Qh4O{; zJ?dELh!bv-cW*>VAp3Tb;G;g)Fl@Mf%2~ja#sd&y!=J)G;gbk(j+Xliil8VE6{d8* zN{TBI(;Kt~BGE!HgHz-p5YVZ|^IWZz%Gz3~yE-iAn(f0J8Hd9NzEEAoOM81=2#e^2 zz}x1((MGAz!R*J&7RDax&f8$!0Sb5=!h%z8{DGD$pCpF9&Spa2oI~BOgy@`JmLO&t zPpW05I4SNY`;zKknJ4QraI-=vn)fBpa`t&$?tmAnYQ4>aHY--A?XvB4)8@Z#`M8{x zm+WQXtSVu%ZQM*Y_HpO`%uoFJRd_1cR3^j}gI0djO`Yeh(sO!S^d=uK{3Y}=GISQ1 zMKv0X3DB&w5B94v$&Awd+*!c$O;KpV#~LhUJFy{rlR^r8en&6pEgXKaToJdr^K0Y$ zkyRU3;7_KGc449gL%jNgD6!u3UH+}k+UhUN?-~BSh>(12v|Jsdt!f}2V|$oi)0gO| zh+!Ka%`x#|=0h89NonF@*V3AKIzFfa z-#W+^Ff;rZq3t7w>3h<;jjuqNF*FOQB^li7tC3hl>Tn31AKI0#l#IV*6{xQwTn^id z(y+JB&qUX2`-`fjIBGpbWXgL-anOhm4S+5>i3z*f7=@#FI&(z}&PR3^6B`TfN>rq+ zzQ*X+1@EAJ$4^!)3qqG$=x&~Ly*DYd{SA2%}hDCQ9w|8_EL;u z*_!=+wrGKuH@$P#l(!&%HGtRb0#@|KTG>dzhbv={0=7zqPX#a|ga;<=n zkdP>VpRQ1i8CCe_IRPq3dn~x#7y2RDJr#>6z?5AzH-Opz+tq*1g{%bZN5`(SI$D%D zR+Do>tfZnn9u%SnRlMI4EODfps9C2$7i_0xz^OlWV4Bfwa1U-ODB1ei<7P6<>VbJ? zCbq;QITqGAhgVmz<8rU*`N?H3s-?g|nBx?jIYN3fXEIqYkBMen7vJ1I5U5dD9T{fa zBq3LhG35-Gle&+*J5UIYFGiZuc$;Oc8Bb`&a-01D3tNinV>9JE5s@=HjW|Fu(2C2| zW5iFPJ2wVgSq)IKUWW0+3F8_zh^tAKP6qyaPW20M@f^nK*Ikc|tl=f6G|5A=1;1i#u+ zXHd4SgRc$O>d>?>45d~MrRklxVr%cYT}ml^DHNQVDO|$g zMJD=KF3UamzUz=k=Ayxn!QJeaDJd-92Su{&6>->2`~ku(9_H8oQXzah>GCyy-S|KP zyfJDG@St_&=Mia3x-F17?6HfK z8awBf-Rt8u(PLABFx4(Aql;A)XJqH|dFD>24!-b~9H*@tEevJe*0O-!d1Dlnz-fmc z@-PV=l1C!Pj-`69D3LbTQU`5T;=xT7;pq66QIfLvCUs!sKsN+P8o_uhMNTZRzbcT~ zTf9IN9R!Hl>d70bmyftaI>DL4N8N5^nnBcf-d4po4TiYg8wbGY&?E9VxPlVC=y3GN2SaAu>~SikZOo085Dir9h;cZnVKOMm3Z0HmjY4%CrRXf`xLxv zgrH*WpgdY=-yBWGFwANA-@muBUfE^P?a_PAbIPh$Hab^e;uBjZ4s!;^pOKE?r9e$z z=yqFW?fLoZ`X|IeGPsPr1Rd*OA!b=mI?@ z#1QK>rcKy5#&j>=v%E|^V`FS!vKsmT$?{lk-V(XS)R1)OgI|UYsF7~6@GZBTDK1#7 zASZL`9>VY5|0e?h?aNwHYay)LcsGFvq5t=x_Ix^h3H^1+eh_Awc|$bri`56z68@nE zUM3Q)P?K5To7pPnMrCeKOpUT|Y~GjjWsK`4Q*Df->S?%B-RfMl@=L3xE~v=3NW-k* zU+Q_bbC#vjj-}?RRV2s_nLhI+YAz6z3Td+6bTo2~9p~^M9!vXfNBGGqsNXb-E*o*2 zF^q1IOvGYS&?Nq+K%-?xAGyO%?tEG=1W%tmx!OWpwayKiSAg3c_3y?YSglcpXM*E; zc`Kh*K@iGZ=N1I#8#a^eurNym<2!Q0f|RdioEu=+4ZE6Vs}U;8%bd&Im!J zl1$7`O#(_b4ytE(4POqUG$P7)sWmtA8L6mLHVOxvgUEGsl>l89{PWG70V>O00Osdq zJoO8xc$>2&t?7+)#w-K3h&#_SL6Y#t%+|~B*YRV3_$h9y&aw7o*DRsrUv`W;`;g6U zD0>-ORpKesPDj|OyKe{1>=(9tPtWbX;=QT;c!@~lUZI^=o2;_S%(+7Y#Lc`2_TN(u zQOAin2&sxYq1zq6*A@GJJUPxM*vEVs)?uVrrkZ{*S}L55`5RqNrk1>eg=ne`f4;^D zb2kG)flc+z5sWQ;3;7&llS2t~RQdj#mF28v6(L+W0rhM#B#Z(Dt%iS!)L6~fPraOT z=&%IoEvkN1(MDFk#z)_~|We>XKSU#0iC_wsCaPop3(?+*; zmk#4zDac(~AAc$)#*Qs3!LYnI`IF2lQzdJvIJzAIi3rM&>8trS<#8v-LY~U8JZnZG z!?k#wb?ErS_%UghnZ^CGRZIH8o8qY^ss~KoPlQ@{Q2*6ze`~Y#qZ+($6&E#=a#?rr z@=}$nnm#sLL!C-KbB7UqL!(4!uC9c(HtKe-H@sJT?pF63N6w)HKPxODMihtxFOZO} zy9{7{R35gpQE3R!4`v*VloNCp!LcJhszsG_DQHEVf$hOLG0_S{!d0+ah0k%@<(l5! z;XLDGNancdB6sEwbLvrlPYB`7q0q0NZgqSR2bl(Gk0+y=)o$O<0;bQYpMQVd%+|Ky zSM9~fHt3(8r=K#H*J7>zSV*}I+lZh(#&3N1U*~PX3Gl zq$|1!kv|dvrA5yJMiLcCXXM!t_XtKSCIZfkg6QWH(5e&TE-_)%LO6Yn zivGHTgeRiD#RWaCr0laN{i9T^mFd(jhLl{ub3n$y0l%istNrs=lFf3<>}qrZfx-|R z3LzpDHJ1zB{yhz4V-q$-RH=ciO7qhC#Q{Sy$5Kn#>OQRhROMzDlgM~zAD8*-Up*qi zoX>*&tAw9wx8W{oIlbhk2f~M8e!bcXQ3#=EQv@?mc7c7P;&FA+N1!83x2Ab4+G4v8 zkw|_Bp}&g8;dU_Q=ac2y0s=T~Ve((G*xeL|1E?u;X*q@!HnqWVOv$fXf(0<~%nCE1 zStMEDtPwt+Ekca%?cV3I%QjDkPYB{Y&ESP|fyp@y6QSMY2XuT0+P^+pQ#f}-ueRgy z9E};Hn=yxflgB-oU%vE|>83VB!Nqva8q^DLriH45Ee=H0@^M?T>DF~lw!}km zEXbfVDu_0P6Xd~)VFqXQhQ$LmO-oPjOfc;yAEY=C6Hl6)R&5^? zFvF}qI6Qlgf^SemdI)dpSHkrdspXI5qrqBE{+y-1IZy6|;X)XgunQ1a25>A%S9*lt zD3g$d^$05~8oz-mG_1;)Eo(A|7kvmWDWC&N5WjOIEk912<%w{eC&mFHvUX9g+Tr8A zoUn9GvjW>3;l46F$Xl#thWnTcp!&<2lLmjvYbMzRHvAh&$1l@~(_r(C-N^ zT;+)~eIfri^ErMOeTqvBQ_25%_QtP!Kiyfj)dv9`$R@tX&Z|O|=I1>BesgXYo-k9> z<2w&45^Z;bB#)2?qJ%)>Z9+h|Bpq1v)etb4 zNTWBGWHo!`es9-ciTH2NTa($0%Buaf`Nx=`ac6(dNHV&LVmG6PL?n71+f>_A#38$HTJMTLWMFS}YA_a-dX1!e>% zw=V`&VT!u)1+ohgf;XVxO%R`~8xw`=mGf7|Py0e+o<1n{VW^8i&`})S9qnJ1QojUm zg>`Bge}QJn%rUUr^5&xUfTC7t*E8Fezmt#P_zH!K4`S}_?Xp6Cjg8yB_jqiw%9Bc!uE;JJL1Af|IB;Gxif0yhxxaql zS}soQQL~~vpS^^8{L->UE|u5&`1a}P`n})R9Si5{GjNH^KFvK90Xy4thZ9dIzI4&6 z!PlM&R9EJ9_|fg?Dn{R?`)vEFLEA}GYJTDo>e_DTRDO{>;&|zo;+=Yfy)>>vLs#o( zLY`8*)#ZWErC(zBii|DHH_Dmlj`^{r;eZNM>lJ{j`c*kD>Nd6E%>D#JHuJ7EpoB2y zO2_aCg|It>mK$~4xnt4>gRlX;8L3Y*N3i8_p-QF$^%fGcT~%~H2CL6to|eHTQHZ#h zh@(@^B`roa!BZYL0#6xo%iE1EdT~zlxsWnRKAl=#kXB~A9SC8ygKM|+^%ui(8Rr__ zvDVk1VK3;?G-DZ1mL-zM?iouY4B?DuaSxQJZyriT@ufk>;J9T zoW7ae6Vq8cZ+M49z%1xHR>9o=-!K3FU+i;$KKjudFG8^Umd(jUv&e8dCovZ&3VvJ|Mv7#b^ON-At->cGpB-3>{< zB~pD_^iV8v6;_vdimuN6=Ome4=i@6$MM6?`_K)MV3?V+45%{|Ky6dK$(&D}=157Fc zrkzJnD`G=ba_oUR*y2JvS?Ol^5f%B;3ZI#$&e~0NVV}fhZ&DDu)(*bYew&HzwuZQ6 z%F$4Uh`*5qI-?d1Ew&Zb3P9xi_=EEN&lm6x)&wHgz-~q=*Q5f%?x+=VmKPD%S0I@Y zm$!DiU5tWC$zZ*q=pZRHqPu0K;8sMGlB?j?>!Fymta76iWU%v!1zl!iNWEtzbdrhA z8Y%~=ur!z`b>Mn+4y2s>rAnZMJXzDkg94@{g-wxNfaskT@!WJP0jI%JA4#tTV2T+; zPI@Js2I1pw@=cljRU95}Jimc{D0YuKowhUkhQY6F?iDA~e4BPX3(+Dx-v;+K&pG^l znTPb6vS%vFk|U(pPj&;*dmvl{~NlbkCQq`*_ zOTebWv=pojH4hfecW(Y!f`6*5`&#h-s{7be{0t|4{dD+eWSit;C1#;0EJy*nmC$d! z5kKSq2%qQ8Eu8lMQFV^(nLyo=j&0kv`NXzu+a24s?T&5RHai_V9b1!gX5KS1-}W!q z*IHF~)f%<;aWI7xjK}r>sFNQew@^k7qO}Y*$8?0P>!#9sQPIh;hUL`eDP|EN#01Ea z%UZlgQI)fI=yByyBMp6sp$4?vlZ&F8GkdoU@hct?6_li$( zhQuAoXkvwqF@Xv)8eJ*~PQI$>3;7<1Dv9?ARnDAtVVY?<_m9}}U}0w+@E?yabzAI}ecInXn&FX-t3W^>X>#RuH(d~*`>y#Gh@ z$2^}$@B82Ro2eSmv4+E=-vR_$Oz`eKrIi(*_Ww@N1+lT4zh5`0@i4Us@n&VIzcTy!n+$$xWj!TyvC z{2W>{bD=nkavi*ME+s*t;aamziIA>B2?6bExsTga63cB^)3vz+UXdfwMW5la-iQ^G zwreYyZDBxf-K=gp?@SOci?Xlu(xU~fR}l%57OaBIDi?6Z`ds5D=jC@N)_oo6H) zl%XQa%7*v*c$N91$Er#eWW$^)l14JyCOZ)}%JEgduvrWEVoDivZe$ z_@28qNnF`?m{3JmdJ{-_n?li((|5Hl^fm zOWEag(7YT3aukDRlt?}W=^!F@Cr@=IDcVVOOCC*|2ZW-tp&8(Gr+?R@qEDLO;pQsQ$)Ys!GEOu zS_DAae>wepX|4Jt|3QHq9Uy&{Nm$Cdyp5b8Ca`5K=w@YdTAW$#3!{Toa(kVB2Q5p% z7lU==RD)m_sBlZM;Gs(rHo72?I-KgE6Ga508sq<|9zxQJ;l`t|jG0m7r6$HyK!KMM zi@D6SF26^Bli|GarWWR?8qm1^jYy0;e-k%tQTw4|`c0-%l!(X%J-zoZJCD7iN)~BU zZ`H&WNKvdAqqLiVm6OqGC{~y>7pE&s&ges9Gm9WQUQ)`zU}$3URM>N43J!$0mYSL~ zb8vBl6L%B}>ISc8_9i8n^~C~p8*vO+XJFJ&AnkCs88}^(0vknQ)6f#K_s#Fh>}A{Jr6iAuayI53=q;Ka**IA zPw=6?L2X2qcOTJ7UBWd_94-Yrs}=5gIk5Euv(vu~>8wrPQF7@{J6z419+01{^{8IR zs`^&r^pfPq8FWio@phUgIgp}yGNgLn9r(~6rki-+qCe{5{KUXGRK+&Ik~LV0ew)YgV|q&LPi@B_7J%^!~QU%RhxTM1Vt>rlk6y+%ZS1v@>d`v z{V^jsp(Us>Oc${mgz!LSK7w?bW}ojy|XGX3v~4ILjod` zxU?5<IUJTX;Fn0!4Ens$sohEwnNi7+fyJ!h(DpwJK- zUk-?vtp01IX1eM7+o@SJG--;x&x;HVxSBE{l%vz?fM!Xsy3V6#`AHw2ZhG-Ch~Dx4 zleB^R@1zZ!To2X%c>KipyJ)U>{Xi2cC;11vBhd7cTHK0G-Ib^YjyVkI#skPOx(Ld* zB~Zvwj{P(Q`M1tA_ZNCVLD075ERyt5NPE%?VpMZHSrp)&C0-e#HvCW)^@QIGizM3U zSewcnbb9zf{Cb`7r}qm>d-Im&RFRe-z7%5JB3@{T%)pf?*`CdR@KR>6ZX?dCND<7e zphy`JWvg?w;(=nI;Z$j(`B*=RmdC!TQD!e-?=m)-^pRW^+{A*g6VB`~bVN5p)x&db z+>A;W&b*M988Cpl&kNMaL`3Q@6^FAevEbsc8D?s=(4iv7q{oiRoCL1aCjYolAK_He z71|g)-8>w4ofke`RW@pgTQaE7Os?Ma{YK1uWu_l$3eBQOCQZAxh|95}_A95Wys5Z= z14A~6(-K3PkCRqmbZk7gd+r)EzHbjkt2Ocw=p=3 zdQ;;&xZOBX{Gh~jjT3g|MqrdoE4!#-e|czKk%+<^rZQ_sixalcgpKm&#ZEyrY}DKj zODaLcxsG`H3GqJCf2g&`Md7L67;f(+^ryH3E_gdc2w|lsL*>oTIOiM&Fa9_o-U5Y^ zeDnp9`a4E7-!pg&TZB3ag6JJB*NiDJ_n6M$al2~gY0BYKB;U&}&&)Y$iWAmp8I|#4 zJ28S8=@KW)H(Oe0PpJD!&Vdlv1n~MW_V*q3`ov-ZYZUon>v|XJk5iD}&F+k zTKxCN^G9{eKwM$I%=?$X{Xj>DREcXL2T!Q%f~x4(>udboU*Cfhf!CTj!1wu^UEp4u zVe~&Q{pK;bD0AACO$N1d6y_A5!0T?o=B3dzNzRo5CHS0bAE6XaQ3Rb8cxN+W-@tbL zf8=LTC0@2!LJ;!7PBNuoe`h+AWlX4WLHy9_*x@XkbVR8G8>ESf!!IxxwD_W@JuImA z#)U~=sR4dINy0J9l?$a{DolT6fY>lF#52Ze%tv~pHBnwr^M8aPPw-u0gH4{x08h?@ zg8=?&Y%>*20_g`ud12=SM%z|1Ut}$9eCqnZMyudnD<$(A1x2TOoq*_4a}5FkG-|K= z&%Suj@-sxX)|)$;9%}`~&Er=_W`a~#)AZogX+xE(44HzYO`J&deIb9jqfZ4y7mw;!5xmjwK5*38;g5!YgXEm{MhT$wZ$5`L_cvT>|UK>}x z-E;Bik5UPR31+}+>*ae?d=L^!iJ&NtFY$4wk4-0)6bDd9!)ykwQ9J z-*h`eTT;ne;Wakdz;GF+e@CNHg0N>1rIBHul7r6;d2rL~A4&jPqr7zFB=>~zy*#o? z;QtAt=<5E)Wz|bu``%u6&7$u7SGh!k-^$ygmZbb6`fS)zkIhRq1GNBqSQbM+8l1?< zgkshx?-T#2UmpAe8Y98$A!`qS`InH!TRC6iaM()8x01{@SA-of6&e<()txOdg54SVBXtKjB zNW7C{g-M9v)#^mcdH=o2nf%h3(Ud`77ljesHB%1m>Uk*h5VAJzsB;*A;FM8$C=5P(IEKZPKp)M*pZgbQ9A z{X%VINkbvyLLB3q_I~M0-q!mb0RDFBO75*$m4nZ4J-UFAT#VP!93g;3AXY1EKFs884W{)hg}lK3DR+*s0Xlr)1RK9$GD@kylLsUf?;Rs8<->);#xCuG zN*SXi`NjTUk?y9Q_rK)%c3z*`|1t6xfM*4`ZQqGXn$W{ZW(6vB@T zEMnVcO@XG4Pzg=|i$p7bK_%@@_~t-YY(mDTcxaRUJsMi4y3Q7IYYi2)D3oQ8RFS!j z?t9b>h|x;t6;BImXNwZg)CqZ%*^&dW#N;49^kr6@LE}45spa8DZzU^(RE2CIV^9>9 zvlQFAlpG`k9r|$^^}4|98cC~o6|Ff27;@uE*o1eZILgH*clZ}a-= zS&QDHX_AG-G(0B+uk$h{jr8GIcn9uirzG<@CJEH`(YShy88E(+EgDHl5U1a0Iko)a zfNv{dDTY|jLyUt;NuM;q^bz-o*p02QJ&yLlp@aORV}-Qf_ma}|yyHlnP#%Hx%*rF^ z3w7hPA7d(6sKxm+gQ&OOO|3SIl9G{T{p4xHqTc-Zi!n-BN2w-=K?4$SQe1$1)yo3_ zR=)u!>II~*Xm)^QK83_180 z)64A%9-~H8Rgn;!hNC&>1N6xfzRG-u>{7T-#Vo()VmPU!ybs`h`gEGx)inVS9Lc+$ zWuOzke+GX4&IE2{?JFbW^ptzztt#N)$FVZBN~z@oeVD?D3(8|KfR=)QEX7!{Y=;e! z*v3rULocz*>!gE`_Am0HYIjwmbLCQZ9?Ecp7wirFjb?U;0#>&!^)$pNxd{^7MT8sn zMpg+Iq6gotP$W<}f>_AB4XLLPI7o&#gaZaFJ(hd>`v@wK1vj^?E`{oFhy}H5(WwEQ zvqo&tKR1rNbE6(C86BCTHw2YgBh0EU3c3zu+97GSE=#uwjhBCUetmfqdW; zc4XZ;c_F0Y(@fRxl;=x7b>-x0?#ajzf%8a(Ww5zWnbM;B4ct__0*L4Qy>aN>oio9S z#zYp)VVw<&K`4m^aAnd2LL-ol&mDf-Gm=x#M=RO&5Z5dVtBCnudw(^FQNu8GBo?4` z#h10g>gbymca-dGP;lKN!=bIQ{~rkVzbHME>;IDJOdM|2{!c}Y2BaJ08`a2!<@;P~ zkSF*6CAk^)pOboRnhFHxA#$*ayyztf5iFmbB$YZStfA?oi4&y1g)fN;o z-Zkwf`frzoowP=T`9sa8AiCHJ5VDuJC+zh>>B|mR1psT(N%O}%R&ikUP68tJ3reDC z;kmV09PIE}7aq$l3%0}L*3!vx7nmK&c{LX#jfEdIHDuc9Lz{H%qAghIcRFF+56OAs zWv0t-S{hBcA>w7QFyqB7+|VdhnbE?rQb{$I*6iqm!OUP(l`qWq>%y-SG@@R-4gGUvB$X3qf$F#-hUq~(qsFd>S6r)T8c z#=DdaQ}^3Fq@*4lQXC!PaSWKwL3ucjA?*$`NrpnW*~tFFO5y5Yk*9_pspA%xY{VN> z*~jeK&Z#Kkp7oX)$Nr86mYCi(8P0t0UQyn)XFbS_N=9FYt}X|}@8&<)l?*?{`0BU| ztMH7{f$sJ+`#-F%@mJ-`EIqEko8L`FhzRky1{I`z1zlrs2|mFxo`x?z=xhHV_-6_k zi5B?oOrf9Iri57PZF zA8ZJ|cN{+EyxifT-!2MyJc}d_K2BGb(I++k){=(J0B$e0^4}l&KIx?K*YJj39?vdo z(8Nb}Cu2kJ>h?!zHEJ1Mh@J1Pj>lu=(ICVmj48?=^hF_{@h%l zG9^&Q^5P}P`J1b3pfpzCYOO$~h#CHRF$gc2HjW(>Jen1MIAx4>#;nMME}}4lVT}n0 zt=Fa~e30Qxt+P8en5zfXPRf!;8@IjMKhF?yw*h5?4dADWU4gUc{T>(#AJ|qW4y4SE|%G!`aJD>i^f%76C&ufniA{u?b|3@G21&ypwDlbfwp()lZ$14#~7?hJfX z!=O~Q(g}C^wyGq^m{6B;P&6J51W9b(lY}n^sDVOKWlN~&yCfrU5X2*yFpz4r`d5O= zzM1XXB@Jv3AIWC6zO+-aFJGwrA6xIgJ2c48`fc=+XfzW4?~EV_`RjNByEcZ0*%>2B zfM_EpmuvLxvP30QU-KFZz^(fluv$ioU@g5!9cMz_L9&~%LP~zl6`t%ufrZGz-c2?~ zSm;!~j=ryZ0`fB8t|mL^VZ*z#TWLhIKWn3U$bxBdRS4q=&dgV_F+Us5l=%yx39t@B zE!W{SF|jxao-K=NjsQEsCtAk&Daex=KupYpuLK?mSuLi+nw5i`i>!p`z=Xx4tKkE@#!>0 zv@M`PAyUP-YBPguvMI6Q0B)B|f$`KKNfZ*>B&|(yPXS$FN@oz+ajKOso6>JWvLl4e z22LmD3fd3UoOFwt_bV22j*jz$jN->_0*!9qOMWDYRqe|)Fzqzt<$NG;tk(@=((Lo{rJE273O>Mg5iT!MvkU5 zHM$vX$Q{8?J^MsFSETQ{2;kWRdG>2_NZz!OP3xZuNh~y;N zI?=uGx6sbal-Wef&{Q#j{fc*+$Xzry3)jB@mJZb&m+#lUAPsYoicZ|N7T=y``x`$q zMWTjuq$|`PY zAre;k=3{F}Tg*z}r>>mKaQ2sae^%c#R3Y(h2y!tKfFS_wgRlw%|Jv(v2<>K6Xz9*8*glE(c`3g2VAk*q| z%R52IM>v7A48Uu6sQZb<6S_u6xDw7jXdaO-Vo*-70Kef^q#RS}o4|>L1;fDI1)4Us z_||Z4OH+drp;l$R_$WVzC3Nl-bw@1mn!>6RqfjCXSwZ=%xk4zho_3BObb5bZ7V- zpqa#<^j%8(XQG`L5)(+2($yXQV}p-RW|31Ou3?GuwAY29j}nBh7J>9MW-v%jWOsD zEWK(ub_J{5>DW|LLpc2?%@t5C;uOc5Low<(?Rc)&&WPcg6!#8^y-jkH?i#|q=p)N< zG0K><#kFwK+{5~dOU(|?P0zHE&q#WhVPh?hjpNpDci8dRtlmZPwx96hNi`WZys77A zH{I?)R{>MZU}04vw>AVx%!g>U-+80Jkc5>3W9?@0t4~w2&N0N>cqHTL(3Tg&#>QoR!rE9iS65HlT7QAxX}t*o&wOQ{>wdfXKCP+q z6#r&Hm*_oB{+J}0r&$U3QC*@dFs#OgE_=f9TOABEGSCQqT7S~_^)@A_> zX>l!NOmMGhibv1Tnr$$y65hm)kY5uJJ+h+g#8PqDM;}jX3?V6bo|J5Cwyz4c#FdOy zXv^XwcBiMc&A_nansW*N26?u-fE!q`3s0B2A?yoZb5$VSSCW{TH$6KA=X-L#(C!AM zlpQTjt}V-ko$KmmFX5c1V|TdlIO!-3m=>EyfjKMHyk9u;V{6)HtB+igMIR|Fn@<`3 z9rOX2jsc_>oGGb1!BGKd-Hk?_4v7On2)D{KFNV`=>!YkGl!AZ?XD>`5r|7Lt7Tg(rnCK6)7a*>}dC4013tZkSN_j{p)TAx}CZ)GNxy?Q~Zu?GY^7M+DS?8p> z+EI(OJgl5Vo{zI~Ox#de;b`fzlZI~IbSIY@qWmRm5CN~<-cT5mSJ2w;57us(H79Hp z*nA9-g5E6>tn%N~IDLbm0ut)D>qrldAd`A3VPZIjZb^V75ur3EZK{W_1flBFKe3be zbxXYE&(Mbe06Mry#v%0vH zGu!9-8+n%emO@I8xifp~3gHP2Dn{}{e=`Z8HwTtrYud{W@J3Ji!P($8l3k^Qsf1Ds zi*{yt6nlQC0mTlM$t7@Q3IUOM&A`+nGzwMsCOfulm%VExuJ03C2_-dAhTz##6ID#R z%61f@?lYkc?cuqYx@GWm{1NVVczr|9JcaM0#rx=00)GS=^vADTk4v+L3fp+Y(LNOr zZeaOwQA(KwYR<5t_*tC8p_toi=WGsT72h@G{lhqc(P;@r5SIqG`d&!v`Y^( zIq4$G^@@Fc3LEcXq*m(YbjoAzsZgV1J1~JfBV3Wf^(^;4$%A{I%wf*SwcO1rG#=m+v89SX-O~U=yT++bP}G^~^58MvkED5MBA_dyc(H@2?Ji z{R)8FlfSoieIJD1!PoC9*PLUyQUZ=o24BJ7$Bduxe`yay{Evgn+!r1|t^M!Q{bb=f zgoMws?ao3b%(_cO5%X6d24*q^?9*BKKjSR?-~ak>WDT4Wc9e*#n5%5pVPDd`*YxhK zg*QQAr&?`&z&V9uMkse$Npv+rS?$HE^%Fz^5sla6!8TVG7ASwbY6itZ*;L%A3uaBW zSkYa23>n4R0?n#Xc&FnI5s5_x*BBWk2P4b7O$vLO=am?OlGr0H=+dv^jC~cYoTZ)@ zFnzRHW)-d(tVmJk@F5hsbNpt)9?%u>+>)_A z{5GaWQH*O`9-amc9(rdqArvn6&mJd%3%H$0W_eR!X<-$-&GV2lsj_)Q2R1(YSaNsg zZBjHoY@i{tiEzdi%=M#!M$-xGNRKB!l2`C3gq(;nLVEE5(xjH)btn(#kd50Mh$2q8 zYuA~(5q8I0gNz~sO`{tdJtANj25F@o49v@RW7ekg|LMoA`)}31t_|~l8uorv|6I4@ z^`XSMlYG3uxZvfSf?tBlvP}8iTslX?V9fUU>k(OzYeH~T?YTrlmk%G$65Opbm;*?x z5-1ROWaq>d&ZgN?v#Q9^x5(s%RwPp`tjVXomJp!#xybQSu}q>R`<7w0OhpCQeyTiJ zh*hdiE=9WEk5K#-0V#_tEY{Q#i)pnU+Cc31K0*Qz6c#x4+RVsG>ke!UlE_p&umpe< z@@5ws*jrQvsCZzqVLWOERAdP#8y_p)sZ-KZT1!97o7PW?sx-P~zSGBWqehxJ62Opo z@gy{z;guN9G%--TYaxP}m{s3C)dQq>Vmkol4%k3Rt?@An@Hz>)8_2|o0TPX(S4ljh z5B2$=VP!qe-O`i7%1gdBl;NDDGUtEvn+B|*xPeA^dTZT{qsVGl8D&qBl}}@9dK!&; zV?)xWq@ty`((-pREUPsM3vZ}m%+X>mi=#EDGcQT&B)hhXz9D@^=72Zecy-r2(EiSp z9W4!nXDe%~uNrxMts~%fX95dLoTkp*7+yvi{PRo=@U1&RJS9Bzp9@t>7;uiW!FbBq zo|s(S8J)wPdR4@-V}UpJ{C6Gy+x(B3z874UkiHd1nMR{Qn-IGXt;s%@Hka_jzi?8} zT*RJk|Bkl-w+;%89DB3AiLH_>5Oqh9E|_wstI5?9_^m>}rPCTXxt;IyuHv>E(TaGn4C)}xB0z^fji=hDRv+ui6mV$tXFn4fr_!*^Tn z)%)Mi0qBtzneUFjPnTPMWf`ge zg#7M?&y07JD3u^N;sF^u49E%k3rOBpN_mTdusG6Cfz-Y}%d7RYel^T78|_izK-_ve zoz1_L%ahjx{7zOE(smpLF(lNLLK)%d$jlV?i}aU6_*t4XL}*CjBX&^=YI4sT!LHa6 z|5gYSP(OqtLFrhry@lR;yQ;0olFDgr0OJbwrm0bPb)mBWu|W=B?n~|uvD{D*jnYg| zO8OY_Sz{mdDB^c^Hf*sK>nfh$60NcF%hp=|*^!rA&+%;;tBeErH9;$~X?@m1q{i*n z>4kbcbQDsV26;Uxd)4ZUv4Vt#l^O)r2p6=0M6i}$ICY=@cWaj=%NVGBQpZ}VJI%nc+ebj^}e)sMFw8OvJc+Yd4jg26tJj57@A|(-Z znBZn7(MaXL^(NSh_ZHtNSUT(&pv|I4BzX0Lh#qk-0o&{pfFWKi2w}@@9 zE)>XPdHHp~<#b6RNUoz^Em=vYgdrxy#x0^;Bsu%=$~UKN(|3}Co#f0CX?69y^ZxI# z_OaT+_tJN^=X&FLJ3F@AU~*zL`A;+dk zZy6xX5*q2~&3t$qwo0p)VVst6sCqbkrn?%PyN=4f=d`u+Ib3by#8Hqf{+W6gTs*}I zP}g}0;Vuokut4BWFDT-S%S<+@EG_i`yu#g?xxsOwF8v%U9i;d=cE6h-xa7FUVS;7Q zYDnexk6j`XBBVn5=kbiDXz;)s#j>QLhokg@ND5aC77pyU zY27Cl?Nn!1@d7}_PK+c@6m>aqkUIg)|VP2BeG<1)&5&R^ph=4d_V6R#lNZ4#h3UF#~8*KWLIY! z@@&TI(cMq=d_Yl8Z;+<;l$8idE7rm;inl6X)s(o4(i)2Oi)9J;aZpS%*31rp2}AYM zUyFY)#zc4D^|F1IN}S!7u+E9MqM5TlgEdeUeI7YxV=Ju)f*`Vl zo@$F^P_{CJ4=`X&>cT@eoKVqL{nR6RlAhI+oSYIml0h*-z)UHBI6A1tTQbd1M2+#p zc5_L~)|T;?AyENga119VeQxQnWF8u$*)ZDuS$h!?6H4p^mQLO42c71@CnD0C5?a1z?m$Uoi@udG#c5JiE=Pd#@*oPA#bQZDU}&gyQ&EWr&SzC zmB647F~ZPDyJ%x@Q>W%%Qq0S$O3-2yN$Q4u4o`fJH;@O2bc9<;0*oZ2W^l9Zg4ECt zFBZ-oqaF>zAO{GbMZc6+7@<~&C@H*x!Cl&i!*`7e>BNf?ieH5^3Al)P6;g+y8c~c` z!~Q~W2?hn%Hbz#5RyvAggM!xkUgEFw*1H>C_wNMy&*9JK|N1+&6W9al4f%v{iU0Dv z415){4&`~F+Q{~5n10CCoP6Gs2o{Ug-8Ol=@!n*c-xw}(tQK>*WH=E`J!%<*MR3YS znj%XB?BParw*-@h2WaXE4g(B?%}_nCo3096?_ML=O{!*<>ybI4Hc$f6{k4#-ze18h zUiuKYbP6VUtzehD7Y+kkMlljKFUC<2%ExDNS7^9JGqeU{r1M%d7me;E_o^p>0)WRm zDk=W_a>9M7bKc&M(%;KP+<8ri_KjSjv~8Fzv9tcXONlp(`5o;DKL`oZ3at4 zJ*48gh%pW5*8kQeH$jG;h<3p_+8&RBqsT}<3r>~7wLekC57^KpV^O{X(ZRlB8fO`! zSH&Tn17lEvihbUtdp(1g>i2g#%ZwQjM_uAtLuI7t$W??^EgcnNWWd{dpB|Lyux%v@W1O4cry8Z2yBq`ibD4c5>IB)_y8Jp zuO=OS9w9nnVVW4?9(3&W*rPg`uT>I8UkAf6f=4r()Os9Zn!Kq~8nponjF9_mU~ObK~;rz&rPIJuuN(Bs$7CBI^+l{7-6zba0*1*FSY>3FQs884X|FENP1?+ zN@$T)AKr3-a!ss@iGja!)5ehge{Z%rbnlL+CxeJw<^SNOADH88=a`nf=$ed7bh6lu zAxt0w`3zialSqp@8exPGw+$=o*YYMG%&Trwt#*_!N7I3m;^d>LudWB84Fgf23qkEn zGcy%m9uwmZ#y|lJFR}x(>>U?^N!-4yjVM=pqo72Mw1R16rCfkYo5{1OuF_u~W~(i4>zTKNpK1xT8qKEUXC)woiQ_MO25D2D4Ns4- zHjo^?Z&r5MLqnpV3zi1BL@y!C5c#|sAEEBg%oW(BCoCi^Wv}q%Eb|nRTcx0F{b*au zMVBjoYOyzr(?J_0gqKVoN&!iBSjh=!hXn4X+_H(sb%GVSgK3!jc$~M9GN$5WGu;r# z{3!JMAqq2BDy4FAk#K5`7$oW9O7}c*DvPQEGYpsc>~Q=zoW4V*GJ|G`EVtyq z$ww=|7C%h4e9N1d!=hjeBqI+N5qy5aH06%g`T!;k&mftd^L(nYippw8DASZRPP6<1{f5Ql@+)~M4B`MU-NgK-2;(stC(;66zrXJHk z&Q>vOoP_ZX6aDm}9FJl&j@uC@pd>O*#Za{G?OyA=fE$a@foD9EY+aLVHOuOl9@ z4k2ybPfC9%5174LqcckSnrHD2rge(%;TwaxS z^9iy;!1(H=1DO7Gtna4j7j)2&9H13oO^OjsJ}#H=;~4%*_$e+UE8-!-lo_y=7mWcX zYdb^#$Ki<($tD_E1_WJ##0?zMzL^ZfKn*JBDkAWmfKK)bmN*wDqe#+On%F{AmvT)j zJiC=HEY1t@8TJ;X){nOlt8Zo#%OsV-oWk1`i^O0>US7P5A}UU0tAi>}te0c!yk zcL3du0ZmktDb_^)1+pI02ujh`?*vDb#F|JFau<*=cwkEft!f&bl}4&%Ij&Ggs6Kwf z27S)*a(Qrh8P#(~3?s%xzF-xy_Nr>U&=1rpeyRdO!vJ)y%N+YB{(p}b|228+dbt|* zCA#(Sblv~^&nRLA*$kTnAMdWzf(Vg$g}K!@qp)k6)3{*VSAw4$&fWriA*XeXwlOG_ zOs{)AD0nuNQ*)shh}zv6Au6ObfwYm`C#y&vs4PGG;wd`5v!fPGn;%<{0b}GY?SfR( zDV=0kKnbZ4;k+L*V>ck>NQvLJQDxNDVa8$c>n4l=IsMB-=1G4NnFgMa=!& zIo2%@(e{^u?D;Cc_&{q)~7ikL6OTHyc>sP~tDaU#N#i-~mxw5{`U8?~A42kBwmTPCQa^ z%gT{!Y21RoV5A9$dFxU%i?d+ z-M~mM={;LcPrX_O;=VNY@fDg^F2ZM~3?UfQLnv+QoUk-=C~(cA;?u&Uy9f)zD*fF7 z$bwR^DjCh&0l3{7|2ss2_7mkhY|S)>1wN)+{M7w-(`&CGk?W7&;Rg);T{|yGZ5uwx z)Ah8^y&tsKFQaoXNCbb(y~j9x(w^f^kquG)sAeN2hlN1u``*gcd3}tR99hO4uDB0E zvyvkkuE^@B+jOhT>6+)^LEP_$N>STNG}KPw9c56!gQqHsWK~fRsXQQ?d#X96&~~tC z>rdGP!5~dTImeiDjgcAQg#&h1$m6Ogc3;E=0I2qCC~RP`%0ToT=}(N;{Lj_iBS$C4 zH_fpWxz?Gfbqjuz_`@j__o=j<(N79LzeL|jjR5icef;y+n82rqgHO18ihX6ziQU+X z!OJ|P-z+sT9wgj9jXV#qydUDD@9GupgXgb2J%i@r&{5ZtByo`eg=g z6kiB*^&@2EEx;kob8*xG*DI>OvuP7mOZc02)&!6hseoa}pw{V@M)3;90_%Hu`9AL-TGqnpM53>j7KM7%|fp6)BzbsM2tr@8pBh|n9EW}K-V zCTu)g!b`1_*=&2=kTyQd;Hd59SJ0&sJJT}T>;KOFMYsKOo7M4#IQ`sH9p*s# zo(1RO2!WJ-iZ|z!Hm6s`*B^fw3taBDt97I%(f$_V8KC%O;v;({587}a(kG?va~EkP zL*PCe0tz$hd>hqUvy$nzQC`SU**b;lWkO77Jnz^uuh`EX^ME5wB2PC|%sx@7JhM^c zou=#r?`m=s_?-|T?8S=0;iNTm82}v&saXZq44LdQpt*-!6f%e1s*inuH z88h9=-HH!#KZU6nP8ME7mui=57KAPvPL`rYzE$8}vaw%z6u?*qd<(L4e8$nfR2nPS z#Gh<)w@Zd3S=_|f^p3+hj6ESNgNmL@qAHb2BoxG%Le;ZQa~Mgh?Ll5J9-&1bM=Tzo&xw( zevc^LqY>|@UmR-*$FiDqoW`_Y$vlptx2n5Q%x?{s|5L@`{q*naec%1T-`fhpB1UU)JJ0 zehcAin!-W(iSys^|S|RXN7KXw}K*o>CaRl{Kg?qa7T)uAnYer3c>i@K9>-a%G^~6kGir@ziVJ{e2C%tJ7HW_P;6n zzQg@{o7eUo@~^bU>(ik5`Sbk!9_%~-p&+vOuJXjgM(Zv#uWojJl=!MyX}vKu1LhH_ zd+!dj?_$xV4L}L>2R6_=!UicqX-=DDVGtP~e~ByrcAJD}e5By8!yKH)02U`8PpLXn z=GU#WHIg+_9aD)I7`Ju}b{X-iu@o;lj4s)3fEV@v(s)jcw;hYYp{bp)H~E&S2`v45 zBgz>Zt$6X0$Y!cU?A|!aubcFBv_hI}%^a$xnzs%~y**s(4EEfq3X zxo-IpIJI737^Jz9g^68`-n3AY_`M9PEIw{__3WsSiqm$CB04Q#Y5pjUKyE>DaDn~O zA*gV@X7S_#@hzQ4;R7V{b>t)DqZsY77I`ZDQZ99X1855~={;bhPzg_AlY)ZN~ ziQ>`YX=43goxW39CX8q8R2>l0rOYV$Y+6zC>jim8!jXWUbDq6K^H!xFPuHxOb2qNk z|1R-&-6l*ozG!~^aB}9Wy_0p-ye7O&u6ekts9H|F1(KURZOLA4-`k$y{8POq{&@Sa z^ag6M<#`O8+e41aRu=%zB-^b##@A@Sk~ zBu>!|Wk{)e>^yJ85t1AIl<+{tC}`;ZCNGvON5LY-i!3-TZ}IC_B?viWU6Mke-==Gj6hxhqGqxWvxWzI(w+7Sxps23QfsNu(!SkRV$jUrU<)=eO zi!j+mb`y4#dM6Y;V$|gJV=y^>DZvUa!Y(f1o>ltgR&65!1gWNg6@;bfS(!b%p^;ca zhNfHaQzfHd{EEw-&a6L7&Vn&p7C&sNc_D6eq>{St64zHvMIfEFnapQ?xA7px0vLWQ zux{u?t&nM?`#q?XZ}4`SYzcEDs~HHbypymZV(!h2i2sU4hqbz@c&QuZi*Cf?LP77) z`)cl?zfBY}P+PUc`v6bGGZgKEr_tXo?Kz1(qbLHI#E;21$@4V#pd;`jn?IeDCnx?_(TGL28Jtf3iZwTH|) z?NC_>?g%%rQJnw9)H{V|0(5D+v2EM7ZQHhO+jwK!HafO#Cmq{XcXrSG`-`IpXN#SDzds<&H9870=6VIy9KrIG<$rhXAJh=jv2h()3$o$2uW9o=VVHg z9<~N)7>>-2+`!KW*n<9(!ve45-;RU1o!(mOPjHcN^%{b`koHg*hNJy|t9~!2F1+>K zcEkFe&h&q;IQ~7K|5-Ksd*1L(Ncd4E==*!y(Eni7)aPFC^DpPl6MakEy=wpW??0b+ zf69Hbyk68ctH=v#%Xf)>ote@5QGCbgS>ul-de5beqOmIw_)vhvV{v+qKf4R$2T?=1NfRGl}|E`%Uix;f#XXG0HNRsDyzqEv>+eI88efwCoCx#|p8T}Ae;OvkS1J42lL`;?Zz za512Q?9yzxWHRV(0?4u)bN7|!*o*!y0pdHBXAEf#H|uy$Rzi7Ored~qd6#f-NM z9YzP-K#5@qYGgp_h**BK{otP_lgUE5Ks5yQ`KMCQ!*Jz!(R;}?H=Q&#Y7d2H_7ue1 z?9+_6;AGX-Q%wHK07Axagq4?Xk+Z9^01?U`z1f%J8BbpRsxQg%jG@U-8&|c}&pF@W zPFydj0`8|BbA!eusQGr~4;ydK@BgD7+wdmdjePXc{;mlGU6Zi=D9^n!7<&E1z$LTx z#`05{RiN;sS~2sNL(%3PX9|ssV89+G@h<9NG_nAiWhh_WHDmIYLYg95xKW&p-;0+r z%)-u*D95f;2N^zO(+vnMKZaa3AP3Ek8>r9_@7>#iuGctiY;~L?T3GnN?#ncWVk(Ke zRHDL;jzgtHRRQD%W1WH&uf`x~unIEZRf@%BAd+#a(kVwTOodBlUjnnpYOQS2RRYGS zUdBU>JhLSxG%|z92$nRDSQEf)YoHu#>|f>qs~6P6?b$PD{E8QLM<>}&hPtg1uzA?} zP{Q!z)*n+tEA9jt0tlOI9nM;em=cey^UK(oNzJnze#q4;JlrPv6h{g%I*PsbQmO%y z&t4A4V$!h|7ARF|m?|RCMh!=VNiZ_20n&iL0b&6l+)TK*Z*yeKEi+<~SKRn;$L)RfHia`acVHp2uz}+Ym8G0%bpj-GQGnFEDT

#XddsO)oD=VTkKYVQY_hbI= zjNqAI?nS&+;MLFHyPv6_*IPxw*Tl-!?}Nbi+mg4kgSNQ9&+h)mC&EL1`_U{*2@E}5 z0UB3I$A+%M&!HT0L_RIPHgs^hnZ;l0mur=KNV`U1cjj2^KX zl_Z{vB@r5_GD(ZaLMp8%kVP=8fwNKtW_waXPH2oGC^BAtw6d6X<=(xtKEo_wP;WLy zn#?`pV+R-m^hFE{F)AfPw(``tu@;Ua;R-YjG1V+~8L(j*1-;jEQqs^rtG~vVh4x-8 zFFkr}q?pOMSZe&Y5+7bY<;VWEhqSW7#KweD$$Vo5%&mZ~b%@%LE9s&IO*TL$?UtV7i&wA`=R$fw%!zAi)ugD5)ZwVOTfa}uhJmx_ zAIju@f}(A;x&C73&k5iK<}AuNq8bHCdqE$aF3r!+qBml3QV}q&2cpII;F;3wmy}zh zHn4&21BUB1!;*`V%|nnJA3e?n3r_LiQAPcP8)}9Rin!V!j|+;X!(%=c_`*fR7|X?j zB(FhMSuf2FJ4y>9wN==0k;_t$&enpYCT@f(K^#`Mmh5bfcflgby)271JspUkc~@;9 z)uA6Jf4pRwLDA?vUKIO*_Y|0vr-R5yKGmHUd`tQL68%rqxyXk#4A@uC;F87S_JwGC zv~CTG#hrLj9p;x1*l*KLNfo@`lDzHxu$D&LdFhz{)ro)J{xrYibJXG^tCwOwPL}(v znI+U$%wc6)jwtz#j?F!h*zkn>$hLsIZ^aEQ6-d8z4(T+KXzqpLKWm(}u2mCoTmn`~ zn2`jDH%phYciety%Ckzg(;)?A$!2?A!;(96%wkZ%z+Cqfz$GcnREDl$7lw1}aKm-#s^WupOkp&CT_+vE~Il8oG^Bxpn$se{>; zK8MObNwIx$#-{u0tn38s%(wEoDrA|A;T(^=#cdf+CNsutt+Il?z{K1r#%KbTHhgem z$$Dh`Em(>I5p_0z6v05hS=^-=1p*~AWSTB-G9I>w4ThHMw;D7im38KzoIw_Rnc5?j z%aNo9fQhSlWd!tE;vx~TV{xOiQmQzT&dgNy`EPj_YJdfN9G+F)hvUzPUOn zw=Eeae)$)*b4`hZ2AffVE0-k{P@n~T^7F?W%V+DvY1bGU+Zll1K<1`2afU-cHH1p< z(bzogbR)N~hr8}nNZ%|P$Y82|fggHea^f-qR*Vw5P%ErvLQHQwmk7EhAmii_|Lbb0-wSrHQFXDO;P^Hov$7-yMVkFCO0e2c9xe6P@USLgv-y2O zzjNIK!|-D_(?iSJ)8hl0US{lsT}++hdiVEG-Pbo6b)#xymG|RO#snAgGBv%^C^B-T z*uRv?MA7}AF@nBc06i}IKK{H}c37Xk#e}n*y=+{YTM=i~z%=P*S6p4J@=}iqn3RoD z0iVW-8S9$O?6ekKpQsEdRzsshCAefT4X$R%3Km4V($&1ROe~r{|qOj5?E-^zPIMP+g3_lNsX^gA@&oq{Jkz7=Wr!ebj z-79exn6|Kb3(WwRu`O|CG^jg*E1a(Jo0Xyt=vYKOjM+t&8s_?RQAHUG1Vr9FF&Rr% z-uTz%i6pF4iBU2dIHR*S-Fm;a{T71`lSF-gd7jwBEH<{uq_WfqsDC6jc03{G3A<*k z9#yGCS2lRMxfORSib+~x6A*I3UpBp3mdk@rT_7x|#4(;2cma zGE)UfK?bpqqap2yh~7op@r?t>IcQnBc{<5FufYo4>~t|7{>hv18OH$mTR z*MxxavgxzfQWYXRLaty+ufq*yVQ1nW0cV|8E-U;zR+c2#;pGTyIR|qdIe48<*$da5 z;-?!9oHI==rM&tBr^B@H83kN<5uCg^+8+tn{3)t-7gl8rGq?n~)?Ym|VTjR|nkGjU zfqGC3d9*NdF`plus&zC_NyS_7jU>q61(HqCDLc6lMhKa$(bgEZKZAr0Z)EmOj^1OD za>=e%8M4roT)&>Kz8*xI^pTb+}*Wu6E7q_m(zH=NT@QG|t7g2xHz zdF~-oQ4o^kiV&SJavZ`C_unR%hIlPkYfi5^tBkifBzn9>`gP;R zas3kj;nJc=W-+;-1=+6ZXvWt3AjYaA!Vo5j7?!^l>Co> zNiz=nw*j2B^ARbg75|I{d4uU}OeOF+Ro#c4w6%L7;>f)j-{5Hh^vLf-v!$>O4WE=~ z5i*ZMhs9q`QqYyA>I>IOyb~HgPT_-;WkK>VvLuBIU%Aat@u1l0L*qv{(UX15{{qlD z(6nNRrDZ=U9>wlcrKwuB;*~=V9MOmKIS@lMjb1dB%rF%vR>uNfsf|T3Da&NF%5pbW z5EWD!^hhZsv`_G*Lk!R`Dufi_T23OLsOvDDC)ZL6yQtE~Q_sC{e9J5r7%aR63TaZu zh;}T`>~MxdS!}l^z?PVi675yi%^ql)Qc6H;L{kL{I;ln=Bpt|UIjs;}yvyUT6;8q+ znVPAY?(?KZ)bWpG%n!Qi50!Soo7rIPY`KbG_L9IM(J^EQRSN8(aY&wnWO0tiRFoT9 zWynINW!Wb(ynQuHe3A?mSSEU%E_dM#-*vzIo+% z|M?|&ffi;&h5RRN;w|7V^5^yK`<4HfpEgdJJ#4x9gv#1t0z1Q}WpGV*Dh{x1I$aG| z{D7S$%zSLYFw^_JRp(`c3~vzW6&?5l)}wl&b$ap%`Ye=be4`3H%RU`w&iRo$nOW!D zForqSzq;TPR0UvZ0_WRCe-pwLCQ+jAUtk9o@wdyJLQprkNb#|?_{kiz#9SDl5kn=C zn*(_VjVMxx4=BioV^hH@-GW%A)S)_#uw>Vq%QTt`j8FH=o_Y(K$P0+vgp)Zl85Cq$ zoGX)${#C@)s_-JXI?hx%q_f5)ze@W%(g3A(ZWj?xSkhF>7)k_X>`D8_V|OS`>C_>q zkSl-N@R3w}iEPWxEc6j`O0=G(!Ajo2ODW{sLAoanDJR+R3O0^^H`Or%zYB$p@92+Y zlXLr?D;1+3n^=Iuv?M~~^rWho0VG%lXNHSvKyP(%Y@~MN8{_&7@gY8V`rB_$-1EN+t}Lembb85bBPeo|3um=7fWxX zN^*!PK5A)iX|N!iBi0BFJIN-95YmJ0oc-^gP)RPzIu;%CnEI_!Th4`P8!T2jnl;`Xzf|#|MQ$J+!bl*L;+4w|0yB zCMF{jITT@%b-jHe$0tAmcNpe+TNMeFO_)TDn;7kwK&g8HxO?G7iS~6 z8c_K=T1`T>y#CH}5~tu$c7qd^$H+7zlf?9=>cj@;LxzB)Zv!Si7f} zl}|0)t5E}I3JnhPhHgb+Zlupf&*+S>>@U)kZIeucp+X#*$U>>?741DVH)IW{KXL&5%2c0VbHc0sFsookNuveBeZ^NuaB{ zNKzZc3t=KmGPhB*^+_jO4T^zQs@;u@!?st48oHyDT!uw3jZCrozhP?xx3cA7I2I! zFxs#Cr2V64`12KbM;bUaiJ!Z+DAWZOW#AgN%BGLMY|}PANM@_Tv(VQeo;l91mX$J= z8NtY)gP`>oG|uzhRG-{)K!jq^_v|1y>P-2xBvOMg?eir&?9M=$vLG)=O{8Elpp@apuECrwIPM@ z*D(bOg4BgN6jd@;db$H8_-3n$RKX73%5M5yLDzyFPK}6 z1*Eu6zD!tA*U7NTl_qoaO)~bEi~TLD2(O&55hIu`ob#aaAvm0L__gpj&CzPnn!NIq zkwUQ|vPiNSTiU+q(PY~8rRAxL@{L@sLFsxHR~Cj{_{5R{?aE5Q&5Lx!za12?2+P;M{#zfB(VZ_)PBEi4WGE z<`q^|O}SL9&1OMe?F<9j;qBZJC0>CEtwsKUhr|Msp0Z~;#HzH9q`0TqwEIDlpYg@^B1CyY@k85J=xNsO6I57rjg94R8YX$*TW5RYng7vsiI{8b} z{>~J{jH6m)>-H557%#+~4Jxl{nK28l25F{EO^H_pA?~mHFEKYBqQPW5>-EO9DeIhA zZoBRQ2LoSjD{9enu|Lxm=%h{I$9N8aWDudW=fQuD_uQN;^BTL`Ys5$+HVYRpSqOq*Yp|%%p{zDP>sB?~ULNlm4fZK@Ql=FZ#H@-ow^IHC4PpT0$< zgk(eu$zb=bi&GV-AL=xqk%g(Wh?&~K3TAgOQ!3ABPz3IYi8JW3Bn@^5G0)#|@{(j! z`V?nvV3Qq?N0^;Cfen=hXB<%+UFx7>LCk#~z65Krq{?`yis;beMn@q4ONyt@9iX#S znL+0WM^|bGqvmr<^Kv%O9hgQWl(~OcQ&Q!&Y-9N+SETC%M^Mfg(cxo*Q{hybO

` command (#7257) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- Cargo.lock | 59 +++++++++++++-------------- bin/reth/Cargo.toml | 1 + bin/reth/src/commands/db/checksum.rs | 60 ++++++++++++++++++++++++++++ bin/reth/src/commands/db/mod.rs | 8 ++++ 4 files changed, 99 insertions(+), 29 deletions(-) create mode 100644 bin/reth/src/commands/db/checksum.rs diff --git a/Cargo.lock b/Cargo.lock index 0ad542fdf9be1..afd37d295ce63 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -84,7 +84,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.13", + "getrandom 0.2.14", "once_cell", "version_check", "zerocopy", @@ -131,9 +131,9 @@ checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "alloy-chains" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96c81b05c893348760f232c4cc6a6a77fd91cfb09885d4eaad25cd03bd7732e" +checksum = "40646aa7f01e396139cf0d6c3a7475eeb8094a0f41d8199f10860c8aef09d2f1" dependencies = [ "alloy-rlp", "arbitrary", @@ -271,7 +271,7 @@ dependencies = [ "derive_arbitrary", "derive_more", "ethereum_ssz", - "getrandom 0.2.13", + "getrandom 0.2.14", "hex-literal", "itoa", "k256", @@ -622,9 +622,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "aquamarine" @@ -911,9 +911,9 @@ checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "backon" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c491fa80d69c03084223a4e73c378dd9f9a1e612eb54051213f88b2d5249b458" +checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0" dependencies = [ "fastrand 2.0.2", "futures-core", @@ -1307,9 +1307,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.4" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -1752,9 +1752,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] @@ -2565,9 +2565,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] @@ -3033,9 +3033,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06fddc2749e0528d2813f95e050e87e52c8cbbae56223b9babf73b3e53b0cc6" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "libc", @@ -3426,7 +3426,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -4233,7 +4233,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.13", + "getrandom 0.2.14", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -4979,7 +4979,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 2.0.58", @@ -5648,9 +5648,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -5730,7 +5730,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.13", + "getrandom 0.2.14", ] [[package]] @@ -5823,7 +5823,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.13", + "getrandom 0.2.14", "libredox", "thiserror", ] @@ -5974,6 +5974,7 @@ dependencies = [ name = "reth" version = "0.2.0-beta.5" dependencies = [ + "ahash", "alloy-rlp", "aquamarine", "assert_matches", @@ -7541,7 +7542,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.13", + "getrandom 0.2.14", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -8756,9 +8757,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "ef89ece63debf11bc32d1ed8d078ac870cbeb44da02afb02a9ff135ae7ca0582" dependencies = [ "deranged", "itoa", @@ -8780,9 +8781,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -9427,7 +9428,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.2.13", + "getrandom 0.2.14", ] [[package]] diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 2f155e2afc843..e12da0d9e051d 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -92,6 +92,7 @@ similar-asserts.workspace = true itertools.workspace = true rayon.workspace = true boyer-moore-magiclen = "0.2.16" +ahash = "0.8" [target.'cfg(unix)'.dependencies] tikv-jemallocator = { version = "0.5.0", optional = true } diff --git a/bin/reth/src/commands/db/checksum.rs b/bin/reth/src/commands/db/checksum.rs new file mode 100644 index 0000000000000..7079f4e46dae3 --- /dev/null +++ b/bin/reth/src/commands/db/checksum.rs @@ -0,0 +1,60 @@ +use crate::utils::DbTool; +use ahash::AHasher; +use clap::Parser; +use reth_db::{ + cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx, DatabaseEnv, RawKey, + RawTable, RawValue, TableViewer, Tables, +}; +use std::{hash::Hasher, time::Instant}; +use tracing::{info, warn}; + +#[derive(Parser, Debug)] +/// The arguments for the `reth db checksum` command +pub struct Command { + /// The table name + table: Tables, +} + +impl Command { + /// Execute `db checksum` command + pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { + self.table.view(&ChecksumViewer { tool }) + } +} + +struct ChecksumViewer<'a, DB: Database> { + tool: &'a DbTool, +} + +impl TableViewer<()> for ChecksumViewer<'_, DB> { + type Error = eyre::Report; + + fn view(&self) -> Result<(), Self::Error> { + warn!("This command should be run without the node running!"); + + let provider = + self.tool.provider_factory.provider()?.disable_long_read_transaction_safety(); + let tx = provider.tx_ref(); + + let mut cursor = tx.cursor_read::>()?; + let walker = cursor.walk(None)?; + + let start_time = Instant::now(); + let mut hasher = AHasher::default(); + for (index, entry) in walker.enumerate() { + let (k, v): (RawKey, RawValue) = entry?; + + if index % 100_000 == 0 { + info!("Hashed {index} entries."); + } + + hasher.write(k.raw_key()); + hasher.write(v.raw_value()); + } + + let elapsed = start_time.elapsed(); + info!("{} checksum: {:x}, took {:?}", T::NAME, hasher.finish(), elapsed); + + Ok(()) + } +} diff --git a/bin/reth/src/commands/db/mod.rs b/bin/reth/src/commands/db/mod.rs index 7975c79ff2876..f28f8375f0c02 100644 --- a/bin/reth/src/commands/db/mod.rs +++ b/bin/reth/src/commands/db/mod.rs @@ -20,6 +20,7 @@ use std::{ sync::Arc, }; +mod checksum; mod clear; mod diff; mod get; @@ -69,6 +70,8 @@ pub enum Subcommands { Stats(stats::Command), /// Lists the contents of a table List(list::Command), + /// Calculates the content checksum of a table + Checksum(checksum::Command), /// Create a diff between two database tables or two entire databases. Diff(diff::Command), /// Gets the content of a table for the given key @@ -121,6 +124,11 @@ impl Command { command.execute(&tool)?; }); } + Subcommands::Checksum(command) => { + db_ro_exec!(self.chain, &db_path, db_args, static_files_path, tool, { + command.execute(&tool)?; + }); + } Subcommands::Diff(command) => { db_ro_exec!(self.chain, &db_path, db_args, static_files_path, tool, { command.execute(&tool)?; From 05d9cf950b85f2af84a04012b9d60a1de1f36978 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 11 Apr 2024 01:37:09 +0200 Subject: [PATCH 114/700] ci: fix iai benches the sequel (#7562) --- .github/workflows/bench.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index e024602e0fce1..9291f7a6cf20c 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -9,6 +9,7 @@ on: env: CARGO_TERM_COLOR: always BASELINE: base + IAI_CALLGRIND_RUNNER: iai-callgrind-runner concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -19,7 +20,7 @@ jobs: iai: runs-on: group: Reth - # Only run benchmarks in merge groups + # Only run benchmarks in merge groups and on main if: github.event_name != 'pull_request' steps: - uses: actions/checkout@v4 @@ -33,11 +34,16 @@ jobs: uses: taiki-e/install-action@cargo-binstall - name: Install iai-callgrind-runner run: | + echo "::group::Install" version=$(cargo metadata --format-version=1 |\ jq '.packages[] | select(.name == "iai-callgrind").version' |\ tr -d '"' ) - cargo binstall iai-callgrind-runner --version $version --no-confirm --no-symlinks + cargo binstall iai-callgrind-runner --version $version --no-confirm --no-symlinks --force + echo "::endgroup::" + echo "::group::Verification" + which iai-callgrind-runner + echo "::endgroup::" - name: Checkout base uses: actions/checkout@v4 with: From 80f8707a1b7641ea94d749d43aec08970b2b1424 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 11 Apr 2024 10:20:42 +0200 Subject: [PATCH 115/700] chore: add `reth-exex` to CODEOWNERS (#7563) --- CODEOWNERS | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 7ec66f759789a..8efa8da85062f 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,23 +1,24 @@ * @gakonst bin/ @onbjerg +crates/blockchain-tree @rakita @rkrasiuk +crates/consensus/auto-seal @mattsse +crates/consensus/beacon @rkrasiuk @mattsse @Rjected +crates/exex @onbjerg @shekhirin +crates/metrics @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk +crates/payload/ @mattsse @Rjected +crates/prune @shekhirin @joshieDo crates/revm/src/ @rakita crates/revm/ @mattsse -crates/stages/ @onbjerg @rkrasiuk @shekhirin -crates/storage/ @rakita @joshieDo @shekhirin -crates/transaction-pool/ @mattsse crates/rpc/ @mattsse @Rjected crates/rpc/rpc-types @mattsse @Rjected @Evalir crates/rpc/rpc-types-compat @mattsse @Rjected @Evalir -crates/payload/ @mattsse @Rjected -crates/consensus/auto-seal @mattsse -crates/consensus/beacon @rkrasiuk @mattsse @Rjected -crates/trie @rkrasiuk -crates/blockchain-tree @rakita @rkrasiuk -crates/metrics @onbjerg -crates/tracing @onbjerg -crates/tasks @mattsse -crates/prune @shekhirin @joshieDo +crates/stages/ @onbjerg @rkrasiuk @shekhirin crates/static-file @joshieDo @shekhirin +crates/storage/ @rakita @joshieDo @shekhirin +crates/tasks @mattsse +crates/tracing @onbjerg +crates/transaction-pool/ @mattsse +crates/trie @rkrasiuk .github/ @onbjerg @gakonst @DaniPopes From 681b1a598f575bb1ab4974552a815eb28cae4384 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 11 Apr 2024 14:27:01 +0100 Subject: [PATCH 116/700] feat(storage): non-optional committed chain in CanonStateNotification (#7566) --- crates/payload/basic/src/lib.rs | 29 ++++++++++----------- crates/rpc/rpc/src/eth/api/fee_history.rs | 23 ++++++++-------- crates/rpc/rpc/src/eth/cache/mod.rs | 9 +++---- crates/rpc/rpc/src/eth/pubsub.rs | 5 +--- crates/storage/provider/src/traits/chain.rs | 17 ++++++------ 5 files changed, 37 insertions(+), 46 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 7dc154a45b83c..ef8ce91087117 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -194,23 +194,22 @@ where } fn on_new_state(&mut self, new_state: CanonStateNotification) { - if let Some(committed) = new_state.committed() { - let mut cached = CachedReads::default(); - - // extract the state from the notification and put it into the cache - let new_state = committed.state(); - for (addr, acc) in new_state.bundle_accounts_iter() { - if let Some(info) = acc.info.clone() { - // we want pre cache existing accounts and their storage - // this only includes changed accounts and storage but is better than nothing - let storage = - acc.storage.iter().map(|(key, slot)| (*key, slot.present_value)).collect(); - cached.insert_account(addr, info, storage); - } + let mut cached = CachedReads::default(); + + // extract the state from the notification and put it into the cache + let committed = new_state.committed(); + let new_state = committed.state(); + for (addr, acc) in new_state.bundle_accounts_iter() { + if let Some(info) = acc.info.clone() { + // we want pre cache existing accounts and their storage + // this only includes changed accounts and storage but is better than nothing + let storage = + acc.storage.iter().map(|(key, slot)| (*key, slot.present_value)).collect(); + cached.insert_account(addr, info, storage); } - - self.pre_cached = Some(PrecachedState { block: committed.tip().hash(), cached }); } + + self.pre_cached = Some(PrecachedState { block: committed.tip().hash(), cached }); } } diff --git a/crates/rpc/rpc/src/eth/api/fee_history.rs b/crates/rpc/rpc/src/eth/api/fee_history.rs index f0cb62dda2de4..1d62a4aa1941f 100644 --- a/crates/rpc/rpc/src/eth/api/fee_history.rs +++ b/crates/rpc/rpc/src/eth/api/fee_history.rs @@ -239,18 +239,17 @@ pub async fn fee_history_cache_new_blocks_task( // the stream ended, we are done break; }; - if let Some(committed) = event.committed() { - let (blocks, receipts): (Vec<_>, Vec<_>) = committed - .blocks_and_receipts() - .map(|(block, receipts)| { - (block.block.clone(), Arc::new(receipts.iter().flatten().cloned().collect::>())) - }) - .unzip(); - fee_history_cache.insert_blocks(blocks.into_iter().zip(receipts)).await; - - // keep track of missing blocks - missing_blocks = fee_history_cache.missing_consecutive_blocks().await; - } + let (blocks, receipts): (Vec<_>, Vec<_>) = event + .committed() + .blocks_and_receipts() + .map(|(block, receipts)| { + (block.block.clone(), Arc::new(receipts.iter().flatten().cloned().collect::>())) + }) + .unzip(); + fee_history_cache.insert_blocks(blocks.into_iter().zip(receipts)).await; + + // keep track of missing blocks + missing_blocks = fee_history_cache.missing_consecutive_blocks().await; } } } diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index 347236f68a7b5..45f9f8c7e40bc 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -651,12 +651,9 @@ where eth_state_cache.to_service.send(CacheAction::RemoveReorgedChain { chain_change }); } - if let Some(committed) = event.committed() { - let chain_change = ChainChange::new(committed); + let chain_change = ChainChange::new(event.committed()); - let _ = eth_state_cache - .to_service - .send(CacheAction::CacheNewCanonicalChain { chain_change }); - } + let _ = + eth_state_cache.to_service.send(CacheAction::CacheNewCanonicalChain { chain_change }); } } diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index ad74868675907..eb41c1afb259f 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -314,10 +314,7 @@ where /// Returns a stream that yields all new RPC blocks. fn new_headers_stream(&self) -> impl Stream { self.chain_events.canonical_state_stream().flat_map(|new_chain| { - let headers = new_chain - .committed() - .map(|chain| chain.headers().collect::>()) - .unwrap_or_default(); + let headers = new_chain.committed().headers().collect::>(); futures::stream::iter( headers.into_iter().map(reth_rpc_types_compat::block::from_primitive_with_hash), ) diff --git a/crates/storage/provider/src/traits/chain.rs b/crates/storage/provider/src/traits/chain.rs index 397fea79b03c2..df51aecb771cc 100644 --- a/crates/storage/provider/src/traits/chain.rs +++ b/crates/storage/provider/src/traits/chain.rs @@ -97,18 +97,18 @@ impl CanonStateNotification { /// Get old chain if any. pub fn reverted(&self) -> Option> { match self { - Self::Reorg { old, .. } => Some(old.clone()), Self::Commit { .. } => None, + Self::Reorg { old, .. } => Some(old.clone()), } } /// Get the new chain if any. /// /// Returns the new committed [Chain] for [Self::Reorg] and [Self::Commit] variants. - pub fn committed(&self) -> Option> { + pub fn committed(&self) -> Arc { match self { - Self::Reorg { new, .. } => Some(new.clone()), - Self::Commit { new } => Some(new.clone()), + Self::Commit { new } => new.clone(), + Self::Reorg { new, .. } => new.clone(), } } @@ -118,8 +118,8 @@ impl CanonStateNotification { /// new block. pub fn tip(&self) -> &SealedBlockWithSenders { match self { - Self::Reorg { new, .. } => new.tip(), Self::Commit { new } => new.tip(), + Self::Reorg { new, .. } => new.tip(), } } @@ -135,10 +135,9 @@ impl CanonStateNotification { .extend(old.receipts_with_attachment().into_iter().map(|receipt| (receipt, true))); } // get new receipts - if let Some(new) = self.committed() { - receipts - .extend(new.receipts_with_attachment().into_iter().map(|receipt| (receipt, false))); - } + receipts.extend( + self.committed().receipts_with_attachment().into_iter().map(|receipt| (receipt, false)), + ); receipts } } From 8d65186dbbe988bcf05f4781a899c4d0f622dd5d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 11 Apr 2024 16:46:26 +0200 Subject: [PATCH 117/700] chore: relax stateprovider db trait bound (#7567) --- crates/revm/src/database.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 3bd922287d6e7..36a7ec96f27ef 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -16,11 +16,11 @@ pub type RethStateDBBox<'a> = StateDBBox<'a, RethError>; /// Wrapper around StateProvider that implements revm database trait #[derive(Debug, Clone)] -pub struct StateProviderDatabase(pub DB); +pub struct StateProviderDatabase(pub DB); -impl StateProviderDatabase { +impl StateProviderDatabase { /// Create new State with generic StateProvider. - pub fn new(db: DB) -> Self { + pub const fn new(db: DB) -> Self { Self(db) } @@ -30,7 +30,7 @@ impl StateProviderDatabase { } } -impl Deref for StateProviderDatabase { +impl Deref for StateProviderDatabase { type Target = DB; fn deref(&self) -> &Self::Target { @@ -38,7 +38,7 @@ impl Deref for StateProviderDatabase { } } -impl DerefMut for StateProviderDatabase { +impl DerefMut for StateProviderDatabase { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } From 33dea48e6a8fa6bd44a5ff61ca421210d0213a22 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 11 Apr 2024 16:56:15 +0200 Subject: [PATCH 118/700] chore: rm unused fn (#7568) --- crates/revm/src/processor.rs | 82 ++++++++++++++---------------------- 1 file changed, 31 insertions(+), 51 deletions(-) diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index 1101eeaedff17..9015c7b91ef2f 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -1,39 +1,39 @@ -use crate::{ - database::StateProviderDatabase, - eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - stack::{InspectorStack, InspectorStackConfig}, - state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, -}; -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -use reth_node_api::ConfigureEvm; -use reth_primitives::{ - Address, Block, BlockNumber, BlockWithSenders, Bloom, ChainSpec, GotExpected, Hardfork, Header, - PruneMode, PruneModes, PruneSegmentError, Receipt, ReceiptWithBloom, Receipts, - TransactionSigned, Withdrawals, B256, MINIMUM_PRUNING_DISTANCE, U256, -}; -use reth_provider::{ - BlockExecutor, BlockExecutorStats, ProviderError, PrunableBlockExecutor, StateProvider, -}; +#[cfg(not(feature = "optimism"))] +use revm::DatabaseCommit; use revm::{ - db::{states::bundle_state::BundleRetention, EmptyDBTyped, StateDBBox}, + db::{states::bundle_state::BundleRetention, StateDBBox}, inspector_handle_register, interpreter::Host, primitives::{CfgEnvWithHandlerCfg, ResultAndState}, - Evm, State, StateBuilder, + Evm, State, }; use std::{sync::Arc, time::Instant}; +#[cfg(not(feature = "optimism"))] +use tracing::{debug, trace}; +use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; +use reth_node_api::ConfigureEvm; #[cfg(feature = "optimism")] use reth_primitives::revm::env::fill_op_tx_env; #[cfg(not(feature = "optimism"))] use reth_primitives::revm::env::fill_tx_env; - +use reth_primitives::{ + Address, Block, BlockNumber, BlockWithSenders, Bloom, ChainSpec, GotExpected, Hardfork, Header, + PruneMode, PruneModes, PruneSegmentError, Receipt, ReceiptWithBloom, Receipts, + TransactionSigned, Withdrawals, B256, MINIMUM_PRUNING_DISTANCE, U256, +}; #[cfg(not(feature = "optimism"))] use reth_provider::BundleStateWithReceipts; -#[cfg(not(feature = "optimism"))] -use revm::DatabaseCommit; -#[cfg(not(feature = "optimism"))] -use tracing::{debug, trace}; +use reth_provider::{ + BlockExecutor, BlockExecutorStats, ProviderError, PrunableBlockExecutor, StateProvider, +}; + +use crate::{ + database::StateProviderDatabase, + eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, + stack::{InspectorStack, InspectorStackConfig}, + state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, +}; /// EVMProcessor is a block executor that uses revm to execute blocks or multiple blocks. /// @@ -88,30 +88,6 @@ where &self.chain_spec } - /// Create a new pocessor with the given chain spec. - pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - // create evm with boxed empty db that is going to be set later. - let db = Box::new( - StateBuilder::new().with_database_boxed(Box::new(EmptyDBTyped::::new())), - ) - .build(); - - // Hook and inspector stack that we want to invoke on that hook. - let stack = InspectorStack::new(InspectorStackConfig::default()); - let evm = evm_config.evm_with_inspector(db, stack); - EVMProcessor { - chain_spec, - evm, - receipts: Receipts::new(), - first_block: None, - tip: None, - prune_modes: PruneModes::none(), - pruning_address_filter: None, - stats: BlockExecutorStats::default(), - _evm_config: evm_config, - } - } - /// Creates a new executor from the given chain spec and database. pub fn new_with_db( chain_spec: Arc, @@ -571,16 +547,20 @@ pub fn compare_receipts_root_and_logs_bloom( #[cfg(test)] mod tests { - use super::*; - use crate::test_utils::{StateProviderTest, TestEvmConfig}; + use std::collections::HashMap; + + use revm::{Database, TransitionState}; + use reth_primitives::{ bytes, constants::{BEACON_ROOTS_ADDRESS, EIP1559_INITIAL_BASE_FEE, SYSTEM_ADDRESS}, keccak256, Account, Bytes, ChainSpecBuilder, ForkCondition, Signature, Transaction, TransactionKind, TxEip1559, MAINNET, }; - use revm::{Database, TransitionState}; - use std::collections::HashMap; + + use crate::test_utils::{StateProviderTest, TestEvmConfig}; + + use super::*; static BEACON_ROOT_CONTRACT_CODE: Bytes = bytes!("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"); From dc9fc372cb257f47d7636b25559cdaffa65003e9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 11 Apr 2024 17:16:22 +0200 Subject: [PATCH 119/700] chore: make function private (#7569) --- crates/revm/src/processor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index 9015c7b91ef2f..ec698bcd177b1 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -165,7 +165,7 @@ where /// /// If cancun is not activated or the block is the genesis block, then this is a no-op, and no /// state changes are made. - pub fn apply_beacon_root_contract_call( + fn apply_beacon_root_contract_call( &mut self, block: &Block, ) -> Result<(), BlockExecutionError> { From 007e5c2c4756cf33a8546cde58a13a19ea83d5ba Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 11 Apr 2024 18:15:13 +0200 Subject: [PATCH 120/700] feat: exex manager (#7340) Co-authored-by: Alexey Shekhirin --- Cargo.lock | 7 + crates/exex/Cargo.toml | 12 + crates/exex/src/lib.rs | 32 +- crates/exex/src/manager.rs | 466 ++++++++++++++++++++++++++ crates/node-builder/Cargo.toml | 1 - crates/node-builder/src/builder.rs | 87 ++++- crates/node-builder/src/exex.rs | 31 +- crates/node-ethereum/tests/it/exex.rs | 2 +- 8 files changed, 594 insertions(+), 44 deletions(-) create mode 100644 crates/exex/src/manager.rs diff --git a/Cargo.lock b/Cargo.lock index afd37d295ce63..3a16200591090 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6488,13 +6488,20 @@ dependencies = [ name = "reth-exex" version = "0.2.0-beta.5" dependencies = [ + "eyre", + "futures", + "metrics", "reth-config", + "reth-metrics", "reth-node-api", "reth-node-core", "reth-primitives", "reth-provider", "reth-tasks", + "reth-tracing", "tokio", + "tokio-stream", + "tokio-util", ] [[package]] diff --git a/crates/exex/Cargo.toml b/crates/exex/Cargo.toml index 91fd04ae9a1a2..d501a906ea27b 100644 --- a/crates/exex/Cargo.toml +++ b/crates/exex/Cargo.toml @@ -12,10 +12,22 @@ description = "Execution extensions for Reth" workspace = true [dependencies] +## reth reth-config.workspace = true +reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-tasks.workspace = true +reth-tracing.workspace = true + +## async +futures.workspace = true tokio.workspace = true +tokio-stream.workspace = true +tokio-util.workspace = true + +## misc +eyre.workspace = true +metrics.workspace = true diff --git a/crates/exex/src/lib.rs b/crates/exex/src/lib.rs index 411e223af0a90..638d8af79954b 100644 --- a/crates/exex/src/lib.rs +++ b/crates/exex/src/lib.rs @@ -1,6 +1,31 @@ -//! Execution extensions. +// todo: expand this (examples, assumptions, invariants) +//! Execution extensions (ExEx). //! -//! TBD +//! An execution extension is a task that derives its state from Reth's state. +//! +//! Some examples of such state derives are rollups, bridges, and indexers. +//! +//! An ExEx is a [`Future`] resolving to a `Result<()>` that is run indefinitely alongside Reth. +//! +//! ExEx's are initialized using an async closure that resolves to the ExEx; this closure gets +//! passed an [`ExExContext`] where it is possible to spawn additional tasks and modify Reth. +//! +//! Most ExEx's will want to derive their state from the [`CanonStateNotification`] channel given in +//! [`ExExContext`]. A new notification is emitted whenever blocks are executed in live and +//! historical sync. +//! +//! # Pruning +//! +//! ExEx's **SHOULD** emit an `ExExEvent::FinishedHeight` event to signify what blocks have been +//! processed. This event is used by Reth to determine what state can be pruned. +//! +//! An ExEx will only receive notifications for blocks greater than the block emitted in the event. +//! To clarify: if the ExEx emits `ExExEvent::FinishedHeight(0)` it will receive notifications for +//! any `block_number > 0`. +//! +//! [`Future`]: std::future::Future +//! [`ExExContext`]: crate::ExExContext +//! [`CanonStateNotification`]: reth_provider::CanonStateNotification #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", @@ -14,3 +39,6 @@ pub use context::*; mod event; pub use event::*; + +mod manager; +pub use manager::*; diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs new file mode 100644 index 0000000000000..7e202b2d72a3a --- /dev/null +++ b/crates/exex/src/manager.rs @@ -0,0 +1,466 @@ +use std::{ + collections::VecDeque, + future::{poll_fn, Future}, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, +}; + +use crate::ExExEvent; +use futures::StreamExt; +use metrics::Gauge; +use reth_metrics::{metrics::Counter, Metrics}; +use reth_primitives::BlockNumber; +use reth_provider::CanonStateNotification; +use reth_tracing::tracing::debug; +use tokio::sync::{ + mpsc::{self, error::SendError, Receiver, UnboundedReceiver, UnboundedSender}, + watch, +}; +use tokio_stream::wrappers::WatchStream; +use tokio_util::sync::{PollSendError, PollSender}; + +/// Metrics for an ExEx. +#[derive(Metrics)] +#[metrics(scope = "exex")] +struct ExExMetrics { + /// The total number of canonical state notifications sent to an ExEx. + notifications_sent_total: Counter, + /// The total number of events an ExEx has sent to the manager. + events_sent_total: Counter, +} + +/// A handle to an ExEx used by the [`ExExManager`] to communicate with ExEx's. +/// +/// A handle should be created for each ExEx with a unique ID. The channels returned by +/// [`ExExHandle::new`] should be given to the ExEx, while the handle itself should be given to the +/// manager in [`ExExManager::new`]. +#[derive(Debug)] +pub struct ExExHandle { + /// The execution extension's ID. + id: String, + /// Metrics for an ExEx. + metrics: ExExMetrics, + + /// Channel to send [`CanonStateNotification`]s to the ExEx. + sender: PollSender, + /// Channel to receive [`ExExEvent`]s from the ExEx. + receiver: UnboundedReceiver, + /// The ID of the next notification to send to this ExEx. + next_notification_id: usize, + + /// The finished block number of the ExEx. + /// + /// If this is `None`, the ExEx has not emitted a `FinishedHeight` event. + finished_height: Option, +} + +impl ExExHandle { + /// Create a new handle for the given ExEx. + /// + /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a + /// [`Receiver`] for [`CanonStateNotification`]s that should be given to the ExEx. + pub fn new(id: String) -> (Self, UnboundedSender, Receiver) { + let (canon_tx, canon_rx) = mpsc::channel(1); + let (event_tx, event_rx) = mpsc::unbounded_channel(); + + ( + Self { + id: id.clone(), + metrics: ExExMetrics::new_with_labels(&[("exex", id)]), + sender: PollSender::new(canon_tx), + receiver: event_rx, + next_notification_id: 0, + finished_height: None, + }, + event_tx, + canon_rx, + ) + } + + /// Reserves a slot in the `PollSender` channel and sends the notification if the slot was + /// successfully reserved. + /// + /// When the notification is sent, it is considered delivered. + fn send( + &mut self, + cx: &mut Context<'_>, + (event_id, notification): &(usize, CanonStateNotification), + ) -> Poll>> { + // check that this notification is above the finished height of the exex if the exex has set + // one + if let Some(finished_height) = self.finished_height { + if finished_height >= notification.tip().number { + self.next_notification_id = event_id + 1; + return Poll::Ready(Ok(())) + } + } + + match self.sender.poll_reserve(cx) { + Poll::Ready(Ok(())) => (), + other => return other, + } + + match self.sender.send_item(notification.clone()) { + Ok(()) => { + self.next_notification_id = event_id + 1; + self.metrics.notifications_sent_total.increment(1); + Poll::Ready(Ok(())) + } + Err(err) => Poll::Ready(Err(err)), + } + } +} + +/// Metrics for the ExEx manager. +#[derive(Metrics)] +#[metrics(scope = "exex_manager")] +pub struct ExExManagerMetrics { + /// Max size of the internal state notifications buffer. + max_capacity: Gauge, + /// Current capacity of the internal state notifications buffer. + current_capacity: Gauge, + /// Current size of the internal state notifications buffer. + /// + /// Note that this might be slightly bigger than the maximum capacity in some cases. + buffer_size: Gauge, +} + +/// The execution extension manager. +/// +/// The manager is responsible for: +/// +/// - Receiving relevant events from the rest of the node, and sending these to the execution +/// extensions +/// - Backpressure +/// - Error handling +/// - Monitoring +#[derive(Debug)] +pub struct ExExManager { + /// Handles to communicate with the ExEx's. + exex_handles: Vec, + + /// [`CanonStateNotification`] channel from the [`ExExManagerHandle`]s. + handle_rx: UnboundedReceiver, + + /// The minimum notification ID currently present in the buffer. + min_id: usize, + /// Monotonically increasing ID for [`CanonStateNotification`]s. + next_id: usize, + /// Internal buffer of [`CanonStateNotification`]s. + /// + /// The first element of the tuple is a monotonically increasing ID unique to the notification + /// (the second element of the tuple). + buffer: VecDeque<(usize, CanonStateNotification)>, + /// Max size of the internal state notifications buffer. + max_capacity: usize, + /// Current state notifications buffer capacity. + /// + /// Used to inform the execution stage of possible batch sizes. + current_capacity: Arc, + + /// Whether the manager is ready to receive new notifications. + is_ready: watch::Sender, + + /// The finished height of all ExEx's. + /// + /// This is the lowest common denominator between all ExEx's. If an ExEx has not emitted a + /// `FinishedHeight` event, it will be `None`. + /// + /// This block is used to (amongst other things) determine what blocks are safe to prune. + /// + /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. + finished_height: watch::Sender>, + + /// A handle to the ExEx manager. + handle: ExExManagerHandle, + /// Metrics for the ExEx manager. + metrics: ExExManagerMetrics, +} + +impl ExExManager { + /// Create a new [`ExExManager`]. + /// + /// You must provide an [`ExExHandle`] for each ExEx and the maximum capacity of the + /// notification buffer in the manager. + /// + /// When the capacity is exceeded (which can happen if an ExEx is slow) no one can send + /// notifications over [`ExExManagerHandle`]s until there is capacity again. + pub fn new(handles: Vec, max_capacity: usize) -> Self { + let num_exexs = handles.len(); + + let (handle_tx, handle_rx) = mpsc::unbounded_channel(); + let (is_ready_tx, is_ready_rx) = watch::channel(true); + let (finished_height_tx, finished_height_rx) = watch::channel(None); + + let current_capacity = Arc::new(AtomicUsize::new(max_capacity)); + + let metrics = ExExManagerMetrics::default(); + metrics.max_capacity.set(max_capacity as f64); + + Self { + exex_handles: handles, + + handle_rx, + + min_id: 0, + next_id: 0, + buffer: VecDeque::with_capacity(max_capacity), + max_capacity, + current_capacity: Arc::clone(¤t_capacity), + + is_ready: is_ready_tx, + finished_height: finished_height_tx, + + handle: ExExManagerHandle { + exex_tx: handle_tx, + num_exexs, + is_ready_receiver: is_ready_rx.clone(), + is_ready: WatchStream::new(is_ready_rx), + current_capacity, + finished_height: finished_height_rx, + }, + metrics, + } + } + + /// Returns the handle to the manager. + pub fn handle(&self) -> ExExManagerHandle { + self.handle.clone() + } + + /// Updates the current buffer capacity and notifies all `is_ready` watchers of the manager's + /// readiness to receive notifications. + fn update_capacity(&mut self) { + let capacity = self.max_capacity.saturating_sub(self.buffer.len()); + self.current_capacity.store(capacity, Ordering::Relaxed); + self.metrics.current_capacity.set(capacity as f64); + self.metrics.buffer_size.set(self.buffer.len() as f64); + + // we can safely ignore if the channel is closed, since the manager always holds it open + // internally + let _ = self.is_ready.send(capacity > 0); + } + + /// Pushes a new notification into the managers internal buffer, assigning the notification a + /// unique ID. + fn push_notification(&mut self, notification: CanonStateNotification) { + let next_id = self.next_id; + self.buffer.push_back((next_id, notification)); + self.next_id += 1; + } +} + +impl Future for ExExManager { + type Output = eyre::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // drain handle notifications + while self.buffer.len() < self.max_capacity { + if let Poll::Ready(Some(notification)) = self.handle_rx.poll_recv(cx) { + debug!("received new notification"); + self.push_notification(notification); + continue + } + break + } + + // update capacity + self.update_capacity(); + + // advance all poll senders + let mut min_id = usize::MAX; + for idx in (0..self.exex_handles.len()).rev() { + let mut exex = self.exex_handles.swap_remove(idx); + + // it is a logic error for this to ever underflow since the manager manages the + // notification IDs + let notification_id = exex + .next_notification_id + .checked_sub(self.min_id) + .expect("exex expected notification ID outside the manager's range"); + if let Some(notification) = self.buffer.get(notification_id) { + debug!(exex.id, notification_id, "sent notification to exex"); + if let Poll::Ready(Err(err)) = exex.send(cx, notification) { + // the channel was closed, which is irrecoverable for the manager + return Poll::Ready(Err(err.into())) + } + } + min_id = min_id.min(exex.next_notification_id); + self.exex_handles.push(exex); + } + + // remove processed buffered notifications + self.buffer.retain(|&(id, _)| id >= min_id); + self.min_id = min_id; + debug!(min_id, "lowest notification id in buffer updated"); + + // update capacity + self.update_capacity(); + + // handle incoming exex events + for exex in self.exex_handles.iter_mut() { + while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) { + debug!(?event, id = exex.id, "received event from exex"); + exex.metrics.events_sent_total.increment(1); + match event { + ExExEvent::FinishedHeight(height) => exex.finished_height = Some(height), + } + } + } + + // update watch channel block number + let finished_height = self.exex_handles.iter_mut().try_fold(u64::MAX, |curr, exex| { + let height = match exex.finished_height { + None => return Err(()), + Some(height) => height, + }; + + if height < curr { + Ok(height) + } else { + Ok(curr) + } + }); + if let Ok(finished_height) = finished_height { + let _ = self.finished_height.send(Some(finished_height)); + } + + Poll::Pending + } +} + +/// A handle to communicate with the [`ExExManager`]. +#[derive(Debug)] +pub struct ExExManagerHandle { + /// Channel to send notifications to the ExEx manager. + exex_tx: UnboundedSender, + /// The number of ExEx's running on the node. + num_exexs: usize, + /// A watch channel denoting whether the manager is ready for new notifications or not. + /// + /// This is stored internally alongside a `WatchStream` representation of the same value. This + /// field is only used to create a new `WatchStream` when the handle is cloned, but is + /// otherwise unused. + is_ready_receiver: watch::Receiver, + /// A stream of bools denoting whether the manager is ready for new notifications. + is_ready: WatchStream, + /// The current capacity of the manager's internal notification buffer. + current_capacity: Arc, + /// The finished height of all ExEx's. + /// + /// This is the lowest common denominator between all ExEx's. If an ExEx has not emitted a + /// `FinishedHeight` event, it will be `None`. + /// + /// This block is used to (amongst other things) determine what blocks are safe to prune. + /// + /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. + finished_height: watch::Receiver>, +} + +impl ExExManagerHandle { + /// Synchronously send a notification over the channel to all execution extensions. + /// + /// Senders should call [`Self::has_capacity`] first. + pub fn send( + &self, + notification: CanonStateNotification, + ) -> Result<(), SendError> { + self.exex_tx.send(notification) + } + + /// Asynchronously send a notification over the channel to all execution extensions. + /// + /// The returned future resolves when the notification has been delivered. If there is no + /// capacity in the channel, the future will wait. + pub async fn send_async( + &mut self, + notification: CanonStateNotification, + ) -> Result<(), SendError> { + self.ready().await; + self.exex_tx.send(notification) + } + + /// Get the current capacity of the ExEx manager's internal notification buffer. + pub fn capacity(&self) -> usize { + self.current_capacity.load(Ordering::Relaxed) + } + + /// Whether there is capacity in the ExEx manager's internal notification buffer. + /// + /// If this returns `false`, the owner of the handle should **NOT** send new notifications over + /// the channel until the manager is ready again, as this can lead to unbounded memory growth. + pub fn has_capacity(&self) -> bool { + self.current_capacity.load(Ordering::Relaxed) > 0 + } + + /// Returns `true` if there are ExEx's installed in the node. + pub fn has_exexs(&self) -> bool { + self.num_exexs > 0 + } + + /// The finished height of all ExEx's. + /// + /// This is the lowest common denominator between all ExEx's. If an ExEx has not emitted a + /// `FinishedHeight` event, it will be `None`. + /// + /// This block is used to (amongst other things) determine what blocks are safe to prune. + /// + /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. + pub fn finished_height(&mut self) -> Option { + *self.finished_height.borrow_and_update() + } + + /// Wait until the manager is ready for new notifications. + pub async fn ready(&mut self) { + poll_fn(|cx| self.poll_ready(cx)).await + } + + /// Wait until the manager is ready for new notifications. + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()> { + // if this returns `Poll::Ready(None)` the stream is exhausted, which means the underlying + // channel is closed. + // + // this can only happen if the manager died, and the node is shutting down, so we ignore it + let mut pinned = std::pin::pin!(&mut self.is_ready); + if pinned.poll_next_unpin(cx) == Poll::Ready(Some(true)) { + Poll::Ready(()) + } else { + Poll::Pending + } + } +} + +impl Clone for ExExManagerHandle { + fn clone(&self) -> Self { + Self { + exex_tx: self.exex_tx.clone(), + num_exexs: self.num_exexs, + is_ready_receiver: self.is_ready_receiver.clone(), + is_ready: WatchStream::new(self.is_ready_receiver.clone()), + current_capacity: self.current_capacity.clone(), + finished_height: self.finished_height.clone(), + } + } +} + +#[cfg(test)] +mod tests { + #[tokio::test] + async fn delivers_events() {} + + #[tokio::test] + async fn capacity() {} + + #[tokio::test] + async fn updates_block_height() {} + + #[tokio::test] + async fn slow_exex() {} + + #[tokio::test] + async fn is_ready() {} +} diff --git a/crates/node-builder/Cargo.toml b/crates/node-builder/Cargo.toml index 4996878fcde48..693a20ac0e1cc 100644 --- a/crates/node-builder/Cargo.toml +++ b/crates/node-builder/Cargo.toml @@ -36,7 +36,6 @@ reth-prune.workspace = true reth-stages.workspace = true reth-config.workspace = true - ## async futures.workspace = true tokio = { workspace = true, features = [ diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 91942685676c7..00e62bc30544e 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -14,7 +14,7 @@ use crate::{ Node, NodeHandle, }; use eyre::Context; -use futures::{future::Either, stream, stream_select, Future, StreamExt}; +use futures::{future, future::Either, stream, stream_select, Future, StreamExt}; use rayon::ThreadPoolBuilder; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, @@ -28,7 +28,7 @@ use reth_db::{ test_utils::{create_test_rw_db, TempDatabase}, DatabaseEnv, }; -use reth_exex::ExExContext; +use reth_exex::{ExExContext, ExExHandle, ExExManager}; use reth_interfaces::p2p::either::EitherDownloader; use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; @@ -44,7 +44,9 @@ use reth_node_core::{ utils::write_peers_to_file, }; use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, format_ether, ChainSpec}; -use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, ProviderFactory}; +use reth_provider::{ + providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, +}; use reth_prune::PrunerBuilder; use reth_revm::EvmProcessorFactory; use reth_rpc_engine_api::EngineApi; @@ -434,7 +436,11 @@ where } /// Installs an ExEx (Execution Extension) in the node. - pub fn install_exex(mut self, exex: F) -> Self + /// + /// # Note + /// + /// The ExEx ID must be unique. + pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self where F: Fn( ExExContext< @@ -449,7 +455,7 @@ where R: Future> + Send, E: Future> + Send, { - self.state.exexs.push(Box::new(exex)); + self.state.exexs.push((exex_id.into(), Box::new(exex))); self } @@ -561,8 +567,6 @@ where let NodeComponents { transaction_pool, network, payload_builder } = components_builder.build_components(&ctx).await?; - // TODO(alexey): launch ExExs and consume their events - let BuilderContext { provider: blockchain_db, executor, @@ -585,6 +589,69 @@ where debug!(target: "reth::cli", "calling on_component_initialized hook"); on_component_initialized.on_event(node_components.clone())?; + // spawn exexs + let mut exex_handles = Vec::with_capacity(self.state.exexs.len()); + let mut exexs = Vec::with_capacity(self.state.exexs.len()); + for (id, exex) in self.state.exexs { + // create a new exex handle + let (handle, events, notifications) = ExExHandle::new(id.clone()); + exex_handles.push(handle); + + // create the launch context for the exex + let context = ExExContext { + head, + provider: blockchain_db.clone(), + task_executor: executor.clone(), + data_dir: data_dir.clone(), + config: config.clone(), + reth_config: reth_config.clone(), + events, + notifications, + }; + + let executor = executor.clone(); + exexs.push(async move { + debug!(target: "reth::cli", id, "spawning exex"); + let span = reth_tracing::tracing::info_span!("exex", id); + let _enter = span.enter(); + + // init the exex + let exex = exex.launch(context).await.unwrap(); + + // spawn it as a crit task + executor.spawn_critical("exex", async move { + info!(target: "reth::cli", id, "ExEx started"); + exex.await.unwrap_or_else(|_| panic!("exex {} crashed", id)) + }); + }); + } + + future::join_all(exexs).await; + + // spawn exex manager + if !exex_handles.is_empty() { + debug!(target: "reth::cli", "spawning exex manager"); + // todo(onbjerg): rm magic number + let exex_manager = ExExManager::new(exex_handles, 1024); + let mut exex_manager_handle = exex_manager.handle(); + executor.spawn_critical("exex manager", async move { + exex_manager.await.expect("exex manager crashed"); + }); + + // send notifications from the blockchain tree to exex manager + let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); + executor.spawn_critical("exex manager blockchain tree notifications", async move { + while let Ok(notification) = canon_state_notifications.recv().await { + exex_manager_handle + .send_async(notification) + .await + .expect("blockchain tree notification could not be sent to exex manager"); + } + }); + + info!(target: "reth::cli", "ExEx Manager started"); + } + // create pipeline let network_client = network.fetch_client().await?; let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); @@ -1070,7 +1137,7 @@ where } /// Installs an ExEx (Execution Extension) in the node. - pub fn install_exex(mut self, exex: F) -> Self + pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self where F: Fn( ExExContext< @@ -1085,7 +1152,7 @@ where R: Future> + Send, E: Future> + Send, { - self.builder.state.exexs.push(Box::new(exex)); + self.builder.state.exexs.push((exex_id.into(), Box::new(exex))); self } @@ -1301,7 +1368,7 @@ pub struct ComponentsState { /// Additional RPC hooks. rpc: RpcHooks, /// The ExExs (execution extensions) of the node. - exexs: Vec>>, + exexs: Vec<(String, Box>)>, } impl std::fmt::Debug diff --git a/crates/node-builder/src/exex.rs b/crates/node-builder/src/exex.rs index d4bec54e7c8bd..ff2d0f84a3d18 100644 --- a/crates/node-builder/src/exex.rs +++ b/crates/node-builder/src/exex.rs @@ -1,33 +1,4 @@ -#![allow(dead_code)] -// todo: expand this (examples, assumptions, invariants) -//! Execution extensions (ExEx). -//! -//! An execution extension is a task that derives its state from Reth's state. -//! -//! Some examples of state such state derives are rollups, bridges, and indexers. -//! -//! An ExEx is a [`Future`] resolving to a `Result<()>` that is run indefinitely alongside Reth. -//! -//! ExEx's are initialized using an async closure that resolves to the ExEx; this closure gets -//! passed an [`ExExContext`] where it is possible to spawn additional tasks and modify Reth. -//! -//! Most ExEx's will want to derive their state from the [`CanonStateNotification`] channel given in -//! [`ExExContext`]. A new notification is emitted whenever blocks are executed in live and -//! historical sync. -//! -//! # Pruning -//! -//! ExEx's **SHOULD** emit an `ExExEvent::FinishedHeight` event to signify what blocks have been -//! processed. This event is used by Reth to determine what state can be pruned. -//! -//! An ExEx will not receive notifications for blocks less than the block emitted in the event. To -//! clarify: if the ExEx emits `ExExEvent::FinishedHeight(0)` it will receive notifications for any -//! `block_number >= 0`. -//! -//! [`Future`]: std::future::Future -//! [`ExExContext`]: reth_exex::ExExContext -//! [`CanonStateNotification`]: reth_provider::CanonStateNotification - +//! Types for launching execution extensions (ExEx). use crate::FullNodeTypes; use futures::{future::BoxFuture, FutureExt}; use reth_exex::ExExContext; diff --git a/crates/node-ethereum/tests/it/exex.rs b/crates/node-ethereum/tests/it/exex.rs index b98f9e5fc6561..b1f7a92f762cf 100644 --- a/crates/node-ethereum/tests/it/exex.rs +++ b/crates/node-ethereum/tests/it/exex.rs @@ -29,6 +29,6 @@ fn basic_exex() { .with_database(db) .with_types(EthereumNode::default()) .with_components(EthereumNode::components()) - .install_exex(move |ctx| future::ok(DummyExEx { _ctx: ctx })) + .install_exex("dummy", move |ctx| future::ok(DummyExEx { _ctx: ctx })) .check_launch(); } From 3ffc729833ade7886bd3e1ac2bf75a93637c776f Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 11 Apr 2024 17:38:03 +0100 Subject: [PATCH 121/700] feat(examples): OP Stack bridge stats ExEx (#7556) Co-authored-by: Oliver Nordbjerg Co-authored-by: Oliver Nordbjerg --- Cargo.lock | 78 +- Cargo.toml | 1 + crates/exex/src/event.rs | 2 +- crates/storage/provider/src/chain.rs | 7 +- examples/Cargo.toml | 5 +- examples/cli-extension-event-hooks/Cargo.toml | 2 +- examples/exex/op-bridge/Cargo.toml | 23 + .../op-bridge/l1_standard_bridge_abi.json | 664 ++++++++++++++++++ examples/exex/op-bridge/src/main.rs | 244 +++++++ 9 files changed, 1019 insertions(+), 7 deletions(-) create mode 100644 examples/exex/op-bridge/Cargo.toml create mode 100644 examples/exex/op-bridge/l1_standard_bridge_abi.json create mode 100644 examples/exex/op-bridge/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 3a16200591090..b0af1abad7509 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -452,6 +452,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "452d929748ac948a10481fff4123affead32c553cf362841c5103dd508bdfc16" dependencies = [ + "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.4.1", @@ -470,11 +471,13 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df64e094f6d2099339f9e82b5b38440b159757b6920878f28316243f8166c8d1" dependencies = [ + "alloy-json-abi", "const-hex", "dunce", "heck 0.5.0", "proc-macro2", "quote", + "serde_json", "syn 2.0.58", "syn-solidity", ] @@ -494,6 +497,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43bc2d6dfc2a19fd56644494479510f98b1ee929e04cf0d4aa45e98baa3e545b" dependencies = [ + "alloy-json-abi", "alloy-primitives", "alloy-sol-macro", "const-hex", @@ -2396,7 +2400,7 @@ dependencies = [ "enr", "fnv", "futures", - "hashlink", + "hashlink 0.8.4", "hex", "hkdf", "lazy_static", @@ -2751,6 +2755,18 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fast-float" version = "0.2.0" @@ -3191,6 +3207,15 @@ dependencies = [ "hashbrown 0.14.3", ] +[[package]] +name = "hashlink" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" +dependencies = [ + "hashbrown 0.14.3", +] + [[package]] name = "hdrhistogram" version = "7.5.4" @@ -4408,6 +4433,17 @@ dependencies = [ "libsecp256k1-core", ] +[[package]] +name = "libsqlite3-sys" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -5029,6 +5065,26 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +[[package]] +name = "op-bridge" +version = "0.0.0" +dependencies = [ + "alloy-sol-types", + "eyre", + "futures", + "itertools 0.12.1", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "reth-tracing", + "rusqlite", + "tokio", +] + [[package]] name = "opaque-debug" version = "0.3.1" @@ -7654,6 +7710,20 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86854cf50259291520509879a5c294c3c9a4c334e9ff65071c51e42ef1e2343" +[[package]] +name = "rusqlite" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" +dependencies = [ + "bitflags 2.5.0", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink 0.9.0", + "libsqlite3-sys", + "smallvec", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -9444,6 +9514,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "vergen" version = "8.3.1" diff --git a/Cargo.toml b/Cargo.toml index a9d0fd7ed8e16..34c9740a2ffdd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ members = [ "examples/trace-transaction-cli/", "examples/polygon-p2p/", "examples/custom-inspector/", + "examples/exex/op-bridge/", "testing/ef-tests/", ] default-members = ["bin/reth"] diff --git a/crates/exex/src/event.rs b/crates/exex/src/event.rs index cc6ac4365f87e..7929cf0316e56 100644 --- a/crates/exex/src/event.rs +++ b/crates/exex/src/event.rs @@ -1,7 +1,7 @@ use reth_primitives::BlockNumber; /// Events emitted by an ExEx. -#[derive(Debug)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ExExEvent { /// Highest block processed by the ExEx. /// diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 78430748ba275..eb9ef6a4b7ec3 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -8,7 +8,7 @@ use reth_primitives::{ }; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; -use std::{borrow::Cow, collections::BTreeMap, fmt}; +use std::{borrow::Cow, collections::BTreeMap, fmt, ops::RangeInclusive}; /// A chain of blocks and their final state. /// @@ -177,6 +177,11 @@ impl Chain { self.blocks.len() } + /// Returns the range of block numbers in the chain. + pub fn range(&self) -> RangeInclusive { + self.first().number..=self.tip().number + } + /// Get all receipts for the given block. pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { let num = self.block_number(block_hash)?; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 92bb0f1f1ea8a..82b6be45ad2e1 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -7,14 +7,11 @@ license.workspace = true [dev-dependencies] reth-primitives.workspace = true - reth-db.workspace = true reth-provider.workspace = true - reth-rpc-builder.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true - reth-revm.workspace = true reth-blockchain-tree.workspace = true reth-beacon-consensus.workspace = true @@ -22,6 +19,7 @@ reth-network-api.workspace = true reth-network.workspace = true reth-transaction-pool.workspace = true reth-tasks.workspace = true + eyre.workspace = true futures.workspace = true async-trait.workspace = true @@ -38,3 +36,4 @@ path = "network.rs" [[example]] name = "network-txpool" path = "network-txpool.rs" + diff --git a/examples/cli-extension-event-hooks/Cargo.toml b/examples/cli-extension-event-hooks/Cargo.toml index 2acac14ee78e7..8664057e7d85f 100644 --- a/examples/cli-extension-event-hooks/Cargo.toml +++ b/examples/cli-extension-event-hooks/Cargo.toml @@ -7,4 +7,4 @@ license.workspace = true [dependencies] reth.workspace = true -reth-node-ethereum.workspace = true \ No newline at end of file +reth-node-ethereum.workspace = true diff --git a/examples/exex/op-bridge/Cargo.toml b/examples/exex/op-bridge/Cargo.toml new file mode 100644 index 0000000000000..3d87b2801765b --- /dev/null +++ b/examples/exex/op-bridge/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "op-bridge" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-exex.workspace = true +reth-node-api.workspace = true +reth-node-core.workspace = true +reth-node-ethereum.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-tracing.workspace = true + +eyre.workspace = true +tokio.workspace = true +futures.workspace = true +alloy-sol-types = { workspace = true, features = ["json"] } +itertools.workspace = true +rusqlite = { version = "0.31.0", features = ["bundled"] } diff --git a/examples/exex/op-bridge/l1_standard_bridge_abi.json b/examples/exex/op-bridge/l1_standard_bridge_abi.json new file mode 100644 index 0000000000000..4ae6406f0793f --- /dev/null +++ b/examples/exex/op-bridge/l1_standard_bridge_abi.json @@ -0,0 +1,664 @@ +[ + { + "inputs": [ + { + "internalType": "address payable", + "name": "_messenger", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "localToken", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "remoteToken", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "extraData", + "type": "bytes" + } + ], + "name": "ERC20BridgeFinalized", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "localToken", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "remoteToken", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "extraData", + "type": "bytes" + } + ], + "name": "ERC20BridgeInitiated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "l1Token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "l2Token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "extraData", + "type": "bytes" + } + ], + "name": "ERC20DepositInitiated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "l1Token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "l2Token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "extraData", + "type": "bytes" + } + ], + "name": "ERC20WithdrawalFinalized", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "extraData", + "type": "bytes" + } + ], + "name": "ETHBridgeFinalized", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "extraData", + "type": "bytes" + } + ], + "name": "ETHBridgeInitiated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "extraData", + "type": "bytes" + } + ], + "name": "ETHDepositInitiated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "extraData", + "type": "bytes" + } + ], + "name": "ETHWithdrawalFinalized", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint8", + "name": "version", + "type": "uint8" + } + ], + "name": "Initialized", + "type": "event" + }, + { + "inputs": [], + "name": "MESSENGER", + "outputs": [ + { + "internalType": "contract CrossDomainMessenger", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "OTHER_BRIDGE", + "outputs": [ + { + "internalType": "contract StandardBridge", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_localToken", + "type": "address" + }, + { + "internalType": "address", + "name": "_remoteToken", + "type": "address" + }, + { "internalType": "uint256", "name": "_amount", "type": "uint256" }, + { + "internalType": "uint32", + "name": "_minGasLimit", + "type": "uint32" + }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "bridgeERC20", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_localToken", + "type": "address" + }, + { + "internalType": "address", + "name": "_remoteToken", + "type": "address" + }, + { "internalType": "address", "name": "_to", "type": "address" }, + { "internalType": "uint256", "name": "_amount", "type": "uint256" }, + { + "internalType": "uint32", + "name": "_minGasLimit", + "type": "uint32" + }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "bridgeERC20To", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32", + "name": "_minGasLimit", + "type": "uint32" + }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "bridgeETH", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "address", "name": "_to", "type": "address" }, + { + "internalType": "uint32", + "name": "_minGasLimit", + "type": "uint32" + }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "bridgeETHTo", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_l1Token", + "type": "address" + }, + { + "internalType": "address", + "name": "_l2Token", + "type": "address" + }, + { "internalType": "uint256", "name": "_amount", "type": "uint256" }, + { + "internalType": "uint32", + "name": "_minGasLimit", + "type": "uint32" + }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "depositERC20", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_l1Token", + "type": "address" + }, + { + "internalType": "address", + "name": "_l2Token", + "type": "address" + }, + { "internalType": "address", "name": "_to", "type": "address" }, + { "internalType": "uint256", "name": "_amount", "type": "uint256" }, + { + "internalType": "uint32", + "name": "_minGasLimit", + "type": "uint32" + }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "depositERC20To", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32", + "name": "_minGasLimit", + "type": "uint32" + }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "depositETH", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "address", "name": "_to", "type": "address" }, + { + "internalType": "uint32", + "name": "_minGasLimit", + "type": "uint32" + }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "depositETHTo", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "address", "name": "", "type": "address" }, + { "internalType": "address", "name": "", "type": "address" } + ], + "name": "deposits", + "outputs": [ + { "internalType": "uint256", "name": "", "type": "uint256" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_localToken", + "type": "address" + }, + { + "internalType": "address", + "name": "_remoteToken", + "type": "address" + }, + { "internalType": "address", "name": "_from", "type": "address" }, + { "internalType": "address", "name": "_to", "type": "address" }, + { "internalType": "uint256", "name": "_amount", "type": "uint256" }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "finalizeBridgeERC20", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "address", "name": "_from", "type": "address" }, + { "internalType": "address", "name": "_to", "type": "address" }, + { "internalType": "uint256", "name": "_amount", "type": "uint256" }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "finalizeBridgeETH", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_l1Token", + "type": "address" + }, + { + "internalType": "address", + "name": "_l2Token", + "type": "address" + }, + { "internalType": "address", "name": "_from", "type": "address" }, + { "internalType": "address", "name": "_to", "type": "address" }, + { "internalType": "uint256", "name": "_amount", "type": "uint256" }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "finalizeERC20Withdrawal", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "address", "name": "_from", "type": "address" }, + { "internalType": "address", "name": "_to", "type": "address" }, + { "internalType": "uint256", "name": "_amount", "type": "uint256" }, + { "internalType": "bytes", "name": "_extraData", "type": "bytes" } + ], + "name": "finalizeETHWithdrawal", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract SuperchainConfig", + "name": "_superchainConfig", + "type": "address" + } + ], + "name": "initialize", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "l2TokenBridge", + "outputs": [ + { "internalType": "address", "name": "", "type": "address" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "messenger", + "outputs": [ + { + "internalType": "contract CrossDomainMessenger", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "otherBridge", + "outputs": [ + { + "internalType": "contract StandardBridge", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "paused", + "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "superchainConfig", + "outputs": [ + { + "internalType": "contract SuperchainConfig", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [{ "internalType": "string", "name": "", "type": "string" }], + "stateMutability": "view", + "type": "function" + }, + { "stateMutability": "payable", "type": "receive" } +] diff --git a/examples/exex/op-bridge/src/main.rs b/examples/exex/op-bridge/src/main.rs new file mode 100644 index 0000000000000..814ffce689d97 --- /dev/null +++ b/examples/exex/op-bridge/src/main.rs @@ -0,0 +1,244 @@ +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; + +use alloy_sol_types::{sol, SolEventInterface}; +use futures::Future; +use reth::builder::FullNodeTypes; +use reth_exex::{ExExContext, ExExEvent}; +use reth_node_ethereum::EthereumNode; +use reth_primitives::{Log, SealedBlockWithSenders, TransactionSigned}; +use reth_provider::Chain; +use reth_tracing::tracing::info; +use rusqlite::Connection; + +sol!(L1StandardBridge, "l1_standard_bridge_abi.json"); +use crate::L1StandardBridge::{ETHBridgeFinalized, ETHBridgeInitiated, L1StandardBridgeEvents}; + +/// An example of ExEx that listens to ETH bridging events from OP Stack chains +/// and stores deposits and withdrawals in a SQLite database. +struct OPBridgeExEx { + ctx: ExExContext, + connection: Connection, +} + +impl OPBridgeExEx { + fn new(ctx: ExExContext, connection: Connection) -> eyre::Result { + // Create deposits and withdrawals tables + connection.execute( + r#" + CREATE TABLE IF NOT EXISTS deposits ( + id INTEGER PRIMARY KEY, + block_number INTEGER NOT NULL, + tx_hash TEXT NOT NULL UNIQUE, + contract_address TEXT NOT NULL, + "from" TEXT NOT NULL, + "to" TEXT NOT NULL, + amount TEXT NOT NULL + ); + "#, + (), + )?; + connection.execute( + r#" + CREATE TABLE IF NOT EXISTS withdrawals ( + id INTEGER PRIMARY KEY, + block_number INTEGER NOT NULL, + tx_hash TEXT NOT NULL UNIQUE, + contract_address TEXT NOT NULL, + "from" TEXT NOT NULL, + "to" TEXT NOT NULL, + amount TEXT NOT NULL + ); + "#, + (), + )?; + + // Create a bridge contract addresses table and insert known ones with their respective + // names + connection.execute( + r#" + CREATE TABLE IF NOT EXISTS contracts ( + id INTEGER PRIMARY KEY, + address TEXT NOT NULL UNIQUE, + name TEXT NOT NULL + ); + "#, + (), + )?; + connection.execute( + r#" + INSERT OR IGNORE INTO contracts (address, name) + VALUES + ('0x3154Cf16ccdb4C6d922629664174b904d80F2C35', 'Base'), + ('0x3a05E5d33d7Ab3864D53aaEc93c8301C1Fa49115', 'Blast'), + ('0x697402166Fbf2F22E970df8a6486Ef171dbfc524', 'Blast'), + ('0x99C9fc46f92E8a1c0deC1b1747d010903E884bE1', 'Optimism'), + ('0x735aDBbE72226BD52e818E7181953f42E3b0FF21', 'Mode'), + ('0x3B95bC951EE0f553ba487327278cAc44f29715E5', 'Manta'); + "#, + (), + )?; + + info!("Initialized database tables"); + + Ok(Self { ctx, connection }) + } +} + +impl Future for OPBridgeExEx { + type Output = eyre::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + // Process all new chain state notifications until there are no more + while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) { + // If there was a reorg, delete all deposits and withdrawals that were reverted + if let Some(reverted_chain) = notification.reverted() { + let events = decode_chain_into_events(&reverted_chain); + + let mut deposits = 0; + let mut withdrawals = 0; + + for (_, tx, _, event) in events { + match event { + // L1 -> L2 deposit + L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { + .. + }) => { + let deleted = this.connection.execute( + "DELETE FROM deposits WHERE tx_hash = ?;", + (tx.hash().to_string(),), + )?; + deposits += deleted; + } + // L2 -> L1 withdrawal + L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { + .. + }) => { + let deleted = this.connection.execute( + "DELETE FROM withdrawals WHERE tx_hash = ?;", + (tx.hash().to_string(),), + )?; + withdrawals += deleted; + } + _ => continue, + }; + } + + info!(block_range = ?reverted_chain.range(), %deposits, %withdrawals, "Reverted chain events"); + } + + // Insert all new deposits and withdrawals + let committed_chain = notification.committed(); + let events = decode_chain_into_events(&committed_chain); + + let mut deposits = 0; + let mut withdrawals = 0; + + for (block, tx, log, event) in events { + match event { + // L1 -> L2 deposit + L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { + amount, + from, + to, + .. + }) => { + let inserted = this.connection.execute( + r#" + INSERT INTO deposits (block_number, tx_hash, contract_address, "from", "to", amount) + VALUES (?, ?, ?, ?, ?, ?) + "#, + ( + block.number, + tx.hash().to_string(), + log.address.to_string(), + from.to_string(), + to.to_string(), + amount.to_string(), + ), + )?; + deposits += inserted; + } + // L2 -> L1 withdrawal + L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { + amount, + from, + to, + .. + }) => { + let inserted = this.connection.execute( + r#" + INSERT INTO withdrawals (block_number, tx_hash, contract_address, "from", "to", amount) + VALUES (?, ?, ?, ?, ?, ?) + "#, + ( + block.number, + tx.hash().to_string(), + log.address.to_string(), + from.to_string(), + to.to_string(), + amount.to_string(), + ), + )?; + withdrawals += inserted; + } + _ => continue, + }; + } + + info!(block_range = ?committed_chain.range(), %deposits, %withdrawals, "Committed chain events"); + + // Send a finished height event, signaling the node that we don't need any blocks below + // this height anymore + this.ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; + } + + Poll::Pending + } +} + +/// Decode chain of blocks into a flattened list of receipt logs, and filter only +/// [L1StandardBridgeEvents]. +fn decode_chain_into_events( + chain: &Chain, +) -> impl Iterator +{ + chain + // Get all blocks and receipts + .blocks_and_receipts() + // Get all receipts + .flat_map(|(block, receipts)| { + block + .body + .iter() + .zip(receipts.iter().flatten()) + .map(move |(tx, receipt)| (block, tx, receipt)) + }) + // Get all logs + .flat_map(|(block, tx, receipt)| receipt.logs.iter().map(move |log| (block, tx, log))) + // Decode and filter bridge events + .filter_map(|(block, tx, log)| { + L1StandardBridgeEvents::decode_raw_log(&log.topics, &log.data, true) + .ok() + .map(|event| (block, tx, log, event)) + }) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("OPBridge", move |ctx| async { + let connection = Connection::open("op_bridge.db")?; + OPBridgeExEx::new(ctx, connection) + }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} From 39dea65b63d1acac8370bac12ddea2c809ed7803 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 11 Apr 2024 17:58:36 +0100 Subject: [PATCH 122/700] feat(examples): minimal viable ExEx (#7565) Co-authored-by: Oliver Nordbjerg Co-authored-by: Oliver Nordbjerg --- Cargo.lock | 16 +++++++++ Cargo.toml | 1 + examples/exex/minimal/Cargo.toml | 19 ++++++++++ examples/exex/minimal/src/main.rs | 58 +++++++++++++++++++++++++++++++ 4 files changed, 94 insertions(+) create mode 100644 examples/exex/minimal/Cargo.toml create mode 100644 examples/exex/minimal/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index b0af1abad7509..0d7d75f8fac8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4700,6 +4700,22 @@ dependencies = [ "unicase", ] +[[package]] +name = "minimal" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "tokio", +] + [[package]] name = "minimal-lexical" version = "0.2.1" diff --git a/Cargo.toml b/Cargo.toml index 34c9740a2ffdd..fb17f01e33ed0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ members = [ "examples/trace-transaction-cli/", "examples/polygon-p2p/", "examples/custom-inspector/", + "examples/exex/minimal/", "examples/exex/op-bridge/", "testing/ef-tests/", ] diff --git a/examples/exex/minimal/Cargo.toml b/examples/exex/minimal/Cargo.toml new file mode 100644 index 0000000000000..c1c586fd5c381 --- /dev/null +++ b/examples/exex/minimal/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "minimal" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-exex.workspace = true +reth-node-api.workspace = true +reth-node-core.workspace = true +reth-node-ethereum.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true + +eyre.workspace = true +tokio.workspace = true +futures.workspace = true diff --git a/examples/exex/minimal/src/main.rs b/examples/exex/minimal/src/main.rs new file mode 100644 index 0000000000000..3cc8c6b06fc62 --- /dev/null +++ b/examples/exex/minimal/src/main.rs @@ -0,0 +1,58 @@ +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; + +use futures::Future; +use reth::builder::FullNodeTypes; +use reth_exex::{ExExContext, ExExEvent}; +use reth_node_ethereum::EthereumNode; +use reth_provider::CanonStateNotification; + +/// A minimal example of an ExEx that simply prints out commit and reorg notifications. +struct MinimalExEx { + ctx: ExExContext, +} + +impl Future for MinimalExEx { + type Output = eyre::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + // Process all new chain state notifications until there are no more + while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) { + // Process one notification + match ¬ification { + CanonStateNotification::Commit { new } => { + println!("Received commit: {:?}", new.first().number..=new.tip().number); + } + CanonStateNotification::Reorg { old, new } => { + println!( + "Received reorg: {:?} -> {:?}", + old.first().number..=old.tip().number, + new.first().number..=new.tip().number + ); + } + }; + + // Send a finished height event, signaling the node that we don't need any blocks below + // this height anymore + this.ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; + } + + Poll::Pending + } +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("Minimal", move |ctx| async { Ok(MinimalExEx { ctx }) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} From 830478055879c35ef8aecd4549083d7a74f8c28c Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 11 Apr 2024 16:37:20 -0400 Subject: [PATCH 123/700] fix: disable long read transaction safety in db diff (#7575) --- bin/reth/src/commands/db/diff.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/bin/reth/src/commands/db/diff.rs b/bin/reth/src/commands/db/diff.rs index b7c46f42bb9c4..9c098a50b574c 100644 --- a/bin/reth/src/commands/db/diff.rs +++ b/bin/reth/src/commands/db/diff.rs @@ -21,7 +21,7 @@ use std::{ io::Write, path::{Path, PathBuf}, }; -use tracing::info; +use tracing::{info, warn}; #[derive(Parser, Debug)] /// The arguments for the `reth db diff` command @@ -58,6 +58,7 @@ impl Command { /// The discrepancies and extra elements, along with a brief summary of the diff results are /// then written to a file in the output directory. pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { + warn!("Make sure the node is not running when running `reth db diff`!"); // open second db let second_db_path: PathBuf = self.secondary_datadir.join("db").into(); let second_db = open_db_read_only(&second_db_path, self.second_db.database_args())?; @@ -68,8 +69,13 @@ impl Command { }; for table in tables { - let primary_tx = tool.provider_factory.db_ref().tx()?; - let secondary_tx = second_db.tx()?; + let mut primary_tx = tool.provider_factory.db_ref().tx()?; + let mut secondary_tx = second_db.tx()?; + + // disable long read transaction safety, since this will run for a while and it's + // expected that the node is not running + primary_tx.disable_long_read_transaction_safety(); + secondary_tx.disable_long_read_transaction_safety(); let output_dir = self.output.clone(); match table { From 6a83391a538ef3092371661fe97cc7863e7d3d3f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 11 Apr 2024 22:48:04 +0200 Subject: [PATCH 124/700] chore: move tree setup to builder (#7577) --- Cargo.lock | 1 - crates/blockchain-tree/src/blockchain_tree.rs | 24 +++++- crates/node-builder/src/builder.rs | 18 ++-- crates/node-core/Cargo.toml | 2 - crates/node-core/src/node_config.rs | 85 +------------------ 5 files changed, 37 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d7d75f8fac8a..275156fabb636 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6857,7 +6857,6 @@ dependencies = [ "rand 0.8.5", "reth-auto-seal-consensus", "reth-beacon-consensus", - "reth-blockchain-tree", "reth-config", "reth-consensus-common", "reth-db", diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 16f20a47e2bcd..a05667209bade 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -80,7 +80,26 @@ where DB: Database + Clone, EVM: ExecutorFactory, { - /// Create a new blockchain tree. + /// Builds the blockchain tree for the node. + /// + /// This method configures the blockchain tree, which is a critical component of the node, + /// responsible for managing the blockchain state, including blocks, transactions, and receipts. + /// It integrates with the consensus mechanism and the EVM for executing transactions. + /// + /// # Parameters + /// - `externals`: External components required by the blockchain tree: + /// - `provider_factory`: A factory for creating various blockchain-related providers, such + /// as for accessing the database or static files. + /// - `consensus`: The consensus configuration, which defines how the node reaches agreement + /// on the blockchain state with other nodes. + /// - `evm_config`: The EVM (Ethereum Virtual Machine) configuration, which affects how + /// smart contracts and transactions are executed. Proper validation of this configuration + /// is crucial for the correct execution of transactions. + /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect + /// its structure or performance. + /// - `prune_modes`: Configuration for pruning old blockchain data. This helps in managing the + /// storage space efficiently. It's important to validate this configuration to ensure it does + /// not lead to unintended data loss. pub fn new( externals: TreeExternals, config: BlockchainTreeConfig, @@ -124,6 +143,9 @@ where } /// Set the sync metric events sender. + /// + /// A transmitter for sending synchronization metrics. This is used for monitoring the node's + /// synchronization process with the blockchain network. pub fn with_sync_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { self.sync_metrics_tx = Some(metrics_tx); self diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 00e62bc30544e..44d13becadb59 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -20,7 +20,9 @@ use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensusEngine, }; -use reth_blockchain_tree::{BlockchainTreeConfig, ShareableBlockchainTree}; +use reth_blockchain_tree::{ + BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, +}; use reth_config::config::EtlConfig; use reth_db::{ database::Database, @@ -530,16 +532,20 @@ where let prune_config = config.prune_config()?.or_else(|| reth_config.prune.clone()); + // Configure the blockchain tree for the node let evm_config = types.evm_config(); let tree_config = BlockchainTreeConfig::default(); - let tree = config.build_blockchain_tree( + let tree_externals = TreeExternals::new( provider_factory.clone(), consensus.clone(), - prune_config.clone(), - sync_metrics_tx.clone(), + EvmProcessorFactory::new(config.chain.clone(), evm_config.clone()), + ); + let tree = BlockchainTree::new( + tree_externals, tree_config, - evm_config.clone(), - )?; + prune_config.as_ref().map(|config| config.segments.clone()), + )? + .with_sync_metrics_tx(sync_metrics_tx.clone()); let canon_state_notification_sender = tree.canon_state_notification_sender(); let blockchain_tree = ShareableBlockchainTree::new(tree); diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 7722938e318c5..4bf2591a1fba5 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -38,7 +38,6 @@ reth-downloaders.workspace = true reth-revm.workspace = true reth-stages.workspace = true reth-prune.workspace = true -reth-blockchain-tree.workspace = true reth-static-file.workspace = true # ethereum @@ -106,7 +105,6 @@ optimism = [ "reth-rpc-types-compat/optimism", "reth-auto-seal-consensus/optimism", "reth-consensus-common/optimism", - "reth-blockchain-tree/optimism", "reth-beacon-consensus/optimism", ] diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index aff0ee4c32b31..71b9f0ed42ea9 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -15,9 +15,6 @@ use metrics_exporter_prometheus::PrometheusHandle; use once_cell::sync::Lazy; use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; use reth_beacon_consensus::BeaconConsensus; -use reth_blockchain_tree::{ - config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, -}; use reth_config::{ config::{PruneConfig, StageConfig}, Config, @@ -51,10 +48,7 @@ use reth_provider::{ CanonStateSubscriptions, HeaderProvider, HeaderSyncMode, ProviderFactory, StageCheckpointReader, }; -use reth_revm::{ - stack::{Hook, InspectorStackConfig}, - EvmProcessorFactory, -}; +use reth_revm::stack::{Hook, InspectorStackConfig}; use reth_stages::{ prelude::*, stages::{ @@ -62,7 +56,6 @@ use reth_stages::{ IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, TransactionLookupStage, }, - MetricEvent, }; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; @@ -72,10 +65,7 @@ use reth_transaction_pool::{ }; use secp256k1::SecretKey; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; -use tokio::sync::{ - mpsc::{Receiver, UnboundedSender}, - watch, -}; +use tokio::sync::{mpsc::Receiver, watch}; use tracing::*; /// The default prometheus recorder handle. We use a global static to ensure that it is only @@ -383,77 +373,6 @@ impl NodeConfig { Ok(builder) } - /// Builds the blockchain tree for the node. - /// - /// This method configures the blockchain tree, which is a critical component of the node, - /// responsible for managing the blockchain state, including blocks, transactions, and receipts. - /// It integrates with the consensus mechanism and the EVM for executing transactions. - /// - /// # Parameters - /// - `provider_factory`: A factory for creating various blockchain-related providers, such as - /// for accessing the database or static files. - /// - `consensus`: The consensus configuration, which defines how the node reaches agreement on - /// the blockchain state with other nodes. - /// - `prune_config`: Configuration for pruning old blockchain data. This helps in managing the - /// storage space efficiently. It's important to validate this configuration to ensure it does - /// not lead to unintended data loss. - /// - `sync_metrics_tx`: A transmitter for sending synchronization metrics. This is used for - /// monitoring the node's synchronization process with the blockchain network. - /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect - /// its structure or performance. - /// - `evm_config`: The EVM (Ethereum Virtual Machine) configuration, which affects how smart - /// contracts and transactions are executed. Proper validation of this configuration is - /// crucial for the correct execution of transactions. - /// - /// # Returns - /// A `ShareableBlockchainTree` instance, which provides access to the blockchain state and - /// supports operations like block insertion, state reversion, and transaction execution. - /// - /// # Example - /// ```rust,ignore - /// let tree = config.build_blockchain_tree( - /// provider_factory, - /// consensus, - /// prune_config, - /// sync_metrics_tx, - /// BlockchainTreeConfig::default(), - /// evm_config, - /// )?; - /// ``` - /// - /// # Note - /// Ensure that all configurations passed to this method are validated beforehand to prevent - /// runtime errors. Specifically, `prune_config` and `evm_config` should be checked to ensure - /// they meet the node's operational requirements. - pub fn build_blockchain_tree( - &self, - provider_factory: ProviderFactory, - consensus: Arc, - prune_config: Option, - sync_metrics_tx: UnboundedSender, - tree_config: BlockchainTreeConfig, - evm_config: EvmConfig, - ) -> eyre::Result>> - where - DB: Database + Unpin + Clone + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, - { - // configure blockchain tree - let tree_externals = TreeExternals::new( - provider_factory, - consensus.clone(), - EvmProcessorFactory::new(self.chain.clone(), evm_config), - ); - let tree = BlockchainTree::new( - tree_externals, - tree_config, - prune_config.map(|config| config.segments), - )? - .with_sync_metrics_tx(sync_metrics_tx); - - Ok(tree) - } - /// Build a transaction pool and spawn the transaction pool maintenance task pub fn build_and_spawn_txpool( &self, From 210158fba8e6d218d010f2b5a0d8db489c3e22f7 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Fri, 12 Apr 2024 11:58:15 +0200 Subject: [PATCH 125/700] refactor: rm unused fns in `NodeConfig` (#7584) --- crates/node-core/src/node_config.rs | 135 +--------------------------- 1 file changed, 4 insertions(+), 131 deletions(-) diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 71b9f0ed42ea9..a92aeb2978a2f 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -5,10 +5,9 @@ use crate::{ get_secret_key, DatabaseArgs, DebugArgs, DevArgs, DiscoveryArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, - cli::config::RethTransactionPoolConfig, dirs::{ChainPath, DataDirPath}, metrics::prometheus_exporter, - utils::{get_single_header, write_peers_to_file}, + utils::get_single_header, }; use discv5::ListenConfig; use metrics_exporter_prometheus::PrometheusHandle; @@ -25,7 +24,6 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_interfaces::{ - blockchain_tree::BlockchainTreeEngine, consensus::Consensus, p2p::{ bodies::{client::BodiesClient, downloader::BodyDownloader}, @@ -33,20 +31,15 @@ use reth_interfaces::{ }, RethResult, }; -use reth_network::{ - transactions::{TransactionFetcherConfig, TransactionsManagerConfig}, - NetworkBuilder, NetworkConfig, NetworkHandle, NetworkManager, -}; +use reth_network::{NetworkBuilder, NetworkConfig, NetworkManager}; use reth_node_api::ConfigureEvm; use reth_primitives::{ constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, kzg::KzgSettings, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, Head, SealedHeader, TxHash, B256, MAINNET, }; use reth_provider::{ - providers::{BlockchainProvider, StaticFileProvider}, - BlockHashReader, BlockNumReader, BlockReader, BlockchainTreePendingStateProvider, - CanonStateSubscriptions, HeaderProvider, HeaderSyncMode, ProviderFactory, - StageCheckpointReader, + providers::StaticFileProvider, BlockHashReader, BlockNumReader, HeaderProvider, HeaderSyncMode, + ProviderFactory, StageCheckpointReader, }; use reth_revm::stack::{Hook, InspectorStackConfig}; use reth_stages::{ @@ -59,10 +52,6 @@ use reth_stages::{ }; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; -use reth_transaction_pool::{ - blobstore::{DiskFileBlobStore, DiskFileBlobStoreConfig}, - EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, -}; use secp256k1::SecretKey; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; use tokio::sync::{mpsc::Receiver, watch}; @@ -373,77 +362,6 @@ impl NodeConfig { Ok(builder) } - /// Build a transaction pool and spawn the transaction pool maintenance task - pub fn build_and_spawn_txpool( - &self, - blockchain_db: &BlockchainProvider, - head: Head, - executor: &TaskExecutor, - data_dir: &ChainPath, - ) -> eyre::Result, DiskFileBlobStore>> - where - DB: Database + Unpin + Clone + 'static, - Tree: BlockchainTreeEngine - + BlockchainTreePendingStateProvider - + CanonStateSubscriptions - + Clone - + 'static, - { - let blob_store = DiskFileBlobStore::open( - data_dir.blobstore_path(), - DiskFileBlobStoreConfig::default() - .with_max_cached_entries(self.txpool.max_cached_entries), - )?; - let validator = TransactionValidationTaskExecutor::eth_builder(Arc::clone(&self.chain)) - .with_head_timestamp(head.timestamp) - .kzg_settings(self.kzg_settings()?) - // use an additional validation task so we can validate transactions in parallel - .with_additional_tasks(1) - // set the max tx size in bytes allowed to enter the pool - .with_max_tx_input_bytes(self.txpool.max_tx_input_bytes) - .build_with_tasks(blockchain_db.clone(), executor.clone(), blob_store.clone()); - - let transaction_pool = - reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.txpool.pool_config()); - info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); - - // spawn txpool maintenance task - { - let pool = transaction_pool.clone(); - let chain_events = blockchain_db.canonical_state_stream(); - let client = blockchain_db.clone(); - let transactions_backup_config = - reth_transaction_pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup(transactions_path); - - executor.spawn_critical_with_graceful_shutdown_signal( - "local transactions backup task", - |shutdown| { - reth_transaction_pool::maintain::backup_local_transactions_task( - shutdown, - pool.clone(), - transactions_backup_config, - ) - }, - ); - - // spawn the maintenance task - executor.spawn_critical( - "txpool maintenance task", - reth_transaction_pool::maintain::maintain_transaction_pool_future( - client, - pool, - chain_events, - executor.clone(), - Default::default(), - ), - ); - debug!(target: "reth::cli", "Spawned txpool maintenance task"); - } - - Ok(transaction_pool) - } - /// Returns the [Consensus] instance to use. /// /// By default this will be a [BeaconConsensus] instance, but if the `--dev` flag is set, it @@ -539,51 +457,6 @@ impl NodeConfig { Ok(()) } - /// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected - /// to that network. - pub fn start_network( - &self, - builder: NetworkBuilder, - task_executor: &TaskExecutor, - pool: Pool, - client: C, - data_dir: &ChainPath, - ) -> NetworkHandle - where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, - { - let (handle, network, txpool, eth) = builder - .transactions( - pool, // Configure transactions manager - TransactionsManagerConfig { - transaction_fetcher_config: TransactionFetcherConfig::new( - self.network.soft_limit_byte_size_pooled_transactions_response, - self.network - .soft_limit_byte_size_pooled_transactions_response_on_pack_request, - ), - }, - ) - .request_handler(client) - .split_with_handle(); - - task_executor.spawn_critical("p2p txpool", txpool); - task_executor.spawn_critical("p2p eth request handler", eth); - - let default_peers_path = data_dir.known_peers_path(); - let known_peers_file = self.network.persistent_peers_file(default_peers_path); - task_executor.spawn_critical_with_graceful_shutdown_signal( - "p2p network task", - |shutdown| { - network.run_until_graceful_shutdown(shutdown, |network| { - write_peers_to_file(network, known_peers_file) - }) - }, - ); - - handle - } - /// Fetches the head block from the database. /// /// If the database is empty, returns the genesis block. From c738087f10cc7de449c024066be5fc3343daef02 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Fri, 12 Apr 2024 11:58:41 +0200 Subject: [PATCH 126/700] chore(tasks): comment formatting (#7583) --- crates/tasks/src/metrics.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/tasks/src/metrics.rs b/crates/tasks/src/metrics.rs index e619db275f400..127783cf0bd22 100644 --- a/crates/tasks/src/metrics.rs +++ b/crates/tasks/src/metrics.rs @@ -20,12 +20,11 @@ pub struct TaskExecutorMetrics { impl TaskExecutorMetrics { /// Increments the counter for spawned critical tasks. - pub(crate) fn inc_critical_tasks(&self) { self.critical_tasks.increment(1); } - /// Increments the counter for spawned regular tasks. + /// Increments the counter for spawned regular tasks. pub(crate) fn inc_regular_tasks(&self) { self.regular_tasks.increment(1); } @@ -49,7 +48,6 @@ impl IncCounterOnDrop { impl Drop for IncCounterOnDrop { /// Increment the counter when the instance is dropped. - fn drop(&mut self) { self.0.increment(1); } From 47d533217c2d4fdeced51a384194ba1562b68c2d Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Fri, 12 Apr 2024 11:59:09 +0200 Subject: [PATCH 127/700] chore: clean up some errors (#7585) --- crates/interfaces/src/provider.rs | 6 ------ crates/rpc/rpc/src/eth/filter.rs | 2 +- crates/stages/src/stages/execution.rs | 4 ++-- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/crates/interfaces/src/provider.rs b/crates/interfaces/src/provider.rs index e8a62e300eec0..7221c0cdfc9d9 100644 --- a/crates/interfaces/src/provider.rs +++ b/crates/interfaces/src/provider.rs @@ -23,9 +23,6 @@ pub enum ProviderError { /// Error when recovering the sender for a transaction #[error("failed to recover sender for transaction")] SenderRecoveryError, - /// Inconsistent header gap. - #[error("inconsistent header gap in the database")] - InconsistentHeaderGap, /// The header number was not found for the given block hash. #[error("block hash {0} does not exist in Headers table")] BlockHashNotFound(BlockHash), @@ -65,9 +62,6 @@ pub enum ProviderError { /// The specific receipt is missing #[error("no receipt found for {0:?}")] ReceiptNotFound(TxHashOrNumber), - /// Unable to find a specific block. - #[error("block does not exist {0:?}")] - BlockNotFound(BlockHashOrNumber), /// Unable to find the best block. #[error("best block does not exist")] BestBlockNotFound, diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index ad6b28f1ee50a..8ae7f6c55c5fe 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -476,7 +476,7 @@ where None => self .provider .block_hash(header.number)? - .ok_or(ProviderError::BlockNotFound(header.number.into()))?, + .ok_or(ProviderError::HeaderNotFound(header.number.into()))?, }; if let Some(receipts) = self.eth_cache.get_receipts(block_hash).await? { diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 81aef2ad3aa2b..7c6cefbb83f48 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -166,7 +166,7 @@ impl ExecutionStage { // we need the block's transactions but we don't need the transaction hashes let block = provider .block_with_senders(block_number.into(), TransactionVariant::NoHash)? - .ok_or_else(|| ProviderError::BlockNotFound(block_number.into()))?; + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; fetch_block_duration += fetch_block_start.elapsed(); @@ -456,7 +456,7 @@ impl Stage for ExecutionStage { for block_number in range { stage_checkpoint.progress.processed -= provider .block_by_number(block_number)? - .ok_or_else(|| ProviderError::BlockNotFound(block_number.into()))? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))? .gas_used; } } From 9c5aea8c81376a771545aec1a528c60b8c2c2c59 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 12 Apr 2024 13:57:06 +0200 Subject: [PATCH 128/700] feat(op): import below bedrock blocks (#7555) Co-authored-by: Atris --- bin/reth/Cargo.toml | 2 +- bin/reth/src/commands/import.rs | 27 ++++++++- bin/reth/src/commands/mod.rs | 1 + crates/net/downloaders/src/file_client.rs | 18 +++++- crates/node-core/src/args/utils.rs | 16 ++++-- crates/primitives/res/genesis/optimism.json | 32 +++++++++++ crates/primitives/src/chain/mod.rs | 2 +- crates/primitives/src/chain/spec.rs | 56 ++++++++++++++++++- crates/primitives/src/lib.rs | 5 +- crates/primitives/src/transaction/mod.rs | 2 +- .../primitives/src/transaction/signature.rs | 8 +++ 11 files changed, 152 insertions(+), 17 deletions(-) create mode 100644 crates/primitives/res/genesis/optimism.json diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index e12da0d9e051d..0ee2ecf0dadca 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -85,7 +85,7 @@ futures.workspace = true # misc aquamarine.workspace = true eyre.workspace = true -clap = { workspace = true, features = ["derive"] } +clap = { workspace = true, features = ["derive", "env"] } tempfile.workspace = true backon = "0.4" similar-asserts.workspace = true diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index f59e9e149d6be..032a22dd4e96d 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -21,7 +21,7 @@ use reth_downloaders::{ use reth_interfaces::consensus::Consensus; use reth_node_core::{events::node::NodeEvent, init::init_genesis}; use reth_node_ethereum::EthEvmConfig; -use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; +use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256, OP_RETH_MAINNET_BELOW_BEDROCK}; use reth_provider::{HeaderSyncMode, ProviderFactory, StageCheckpointReader}; use reth_stages::{ prelude::*, @@ -61,6 +61,15 @@ pub struct ImportCommand { )] chain: Arc, + /// Disables execution stage. + #[arg(long, verbatim_doc_comment)] + disable_execution: bool, + + /// Import OP Mainnet chain below Bedrock. Caution! Flag must be set as env var, since the env + /// var is read by another process too, in order to make below Bedrock import work. + #[arg(long, verbatim_doc_comment, env = OP_RETH_MAINNET_BELOW_BEDROCK)] + op_mainnet_below_bedrock: bool, + #[command(flatten)] db: DatabaseArgs, @@ -74,9 +83,18 @@ pub struct ImportCommand { impl ImportCommand { /// Execute `import` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute(mut self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); + if self.op_mainnet_below_bedrock { + self.disable_execution = true; + debug!(target: "reth::cli", "Importing OP mainnet below bedrock"); + } + + if self.disable_execution { + debug!(target: "reth::cli", "Execution stage disabled"); + } + // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); @@ -118,6 +136,7 @@ impl ImportCommand { provider_factory.static_file_provider(), PruneModes::default(), ), + self.disable_execution, ) .await?; @@ -154,6 +173,7 @@ impl ImportCommand { consensus: &Arc, file_client: Arc, static_file_producer: StaticFileProducer, + disable_execution: bool, ) -> eyre::Result<(Pipeline, impl Stream)> where DB: Database + Clone + Unpin + 'static, @@ -209,7 +229,8 @@ impl ImportCommand { .max(config.stages.account_hashing.clean_threshold) .max(config.stages.storage_hashing.clean_threshold), config.prune.map(|prune| prune.segments).unwrap_or_default(), - )), + )) + .disable_if(StageId::Execution, || disable_execution), ) .build(provider_factory, static_file_producer); diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index 771c2719d976b..278531f716176 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -5,6 +5,7 @@ pub mod db; pub mod debug_cmd; pub mod dump_genesis; pub mod import; + pub mod init_cmd; pub mod node; diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index ebc5fe40895a7..8f9122090304e 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -65,7 +65,8 @@ impl FileClient { let metadata = file.metadata().await?; let file_len = metadata.len(); - // read the entire file into memory + // todo: read chunks into memory. for op mainnet 1/8 th of blocks below bedrock can be + // decoded at once let mut reader = vec![]; file.read_to_end(&mut reader).await.unwrap(); @@ -76,8 +77,12 @@ impl FileClient { // use with_capacity to make sure the internal buffer contains the entire file let mut stream = FramedRead::with_capacity(&reader[..], BlockFileCodec, file_len as usize); + let mut log_interval = 0; + let mut log_interval_start_block = 0; + while let Some(block_res) = stream.next().await { let block = block_res?; + let block_number = block.header.number; let block_hash = block.header.hash_slow(); // add to the internal maps @@ -91,6 +96,17 @@ impl FileClient { withdrawals: block.withdrawals, }, ); + + if log_interval == 0 { + log_interval_start_block = block_number; + } else if log_interval % 100000 == 0 { + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + "inserted blocks into db" + ); + log_interval_start_block = block_number + 1; + } + log_interval += 1; } trace!(blocks = headers.len(), "Initialized file client"); diff --git a/crates/node-core/src/args/utils.rs b/crates/node-core/src/args/utils.rs index 32d235242cf58..13fcd500d66a4 100644 --- a/crates/node-core/src/args/utils.rs +++ b/crates/node-core/src/args/utils.rs @@ -10,14 +10,14 @@ use std::{ }; #[cfg(feature = "optimism")] -use reth_primitives::{BASE_MAINNET, BASE_SEPOLIA, OP_SEPOLIA}; +use reth_primitives::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; #[cfg(not(feature = "optimism"))] use reth_primitives::{DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA}; #[cfg(feature = "optimism")] /// Chains supported by op-reth. First value should be used as the default. -pub const SUPPORTED_CHAINS: &[&str] = &["base", "base-sepolia", "optimism-sepolia"]; +pub const SUPPORTED_CHAINS: &[&str] = &["base", "base-sepolia", "optimism", "optimism-sepolia"]; #[cfg(not(feature = "optimism"))] /// Chains supported by reth. First value should be used as the default. pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "goerli", "holesky", "dev"]; @@ -43,11 +43,13 @@ pub fn chain_spec_value_parser(s: &str) -> eyre::Result, eyre::Er #[cfg(not(feature = "optimism"))] "dev" => DEV.clone(), #[cfg(feature = "optimism")] - "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(), + "optimism" => OP_MAINNET.clone(), #[cfg(feature = "optimism")] - "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(), + "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(), #[cfg(feature = "optimism")] "base" => BASE_MAINNET.clone(), + #[cfg(feature = "optimism")] + "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(), _ => { let raw = fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?; serde_json::from_str(&raw)? @@ -78,11 +80,13 @@ pub fn genesis_value_parser(s: &str) -> eyre::Result, eyre::Error #[cfg(not(feature = "optimism"))] "dev" => DEV.clone(), #[cfg(feature = "optimism")] - "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(), + "optimism" => OP_MAINNET.clone(), #[cfg(feature = "optimism")] - "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(), + "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(), #[cfg(feature = "optimism")] "base" => BASE_MAINNET.clone(), + #[cfg(feature = "optimism")] + "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(), _ => { // try to read json from path first let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) { diff --git a/crates/primitives/res/genesis/optimism.json b/crates/primitives/res/genesis/optimism.json new file mode 100644 index 0000000000000..2fb05781e9bcc --- /dev/null +++ b/crates/primitives/res/genesis/optimism.json @@ -0,0 +1,32 @@ +{ + "config": { + "ChainName": "optimism-mainnet", + "chainId": 10, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 3950000, + "londonBlock": 3950000, + "arrowGlacierBlock": 3950000, + "grayGlacierBlock": 3950000, + "mergeNetsplitBlock": 3950000, + "bedrockBlock": 105235063, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "optimism": { + "eip1559Elasticity": 6, + "eip1559Denominator": 50 + }, + "regolithTime": 0 + }, + "difficulty": "1", + "gasLimit": "15000000", + "extradata": "0x000000000000000000000000000000000000000000000000000000000000000000000398232e2064f896018496b4b44b3d62751f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "alloc": {} +} \ No newline at end of file diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index 34f5b9c1f198e..6f9673e719468 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -6,7 +6,7 @@ pub use spec::{ MAINNET, SEPOLIA, }; #[cfg(feature = "optimism")] -pub use spec::{BASE_MAINNET, BASE_SEPOLIA, OP_SEPOLIA}; +pub use spec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; // The chain spec module. mod spec; diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index c2491b956d250..32d3b2ad841a8 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -243,6 +243,58 @@ pub static DEV: Lazy> = Lazy::new(|| { .into() }); +/// The Optimism Mainnet spec +#[cfg(feature = "optimism")] +pub static OP_MAINNET: Lazy> = Lazy::new(|| { + ChainSpec { + chain: Chain::optimism_mainnet(), + // genesis contains empty alloc field because state at first bedrock block is imported + // manually from trusted source + genesis: serde_json::from_str(include_str!("../../res/genesis/optimism.json")) + .expect("Can't deserialize Optimism Mainnet genesis json"), + genesis_hash: Some(b256!( + "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" + )), + fork_timestamps: ForkTimestamps::default() + .shanghai(1699981200) + .canyon(1699981200) + .cancun(1707238800) + .ecotone(1707238800), + paris_block_and_final_difficulty: Some((0, U256::from(0))), + hardforks: BTreeMap::from([ + (Hardfork::Frontier, ForkCondition::Block(0)), + (Hardfork::Homestead, ForkCondition::Block(0)), + (Hardfork::Tangerine, ForkCondition::Block(0)), + (Hardfork::SpuriousDragon, ForkCondition::Block(0)), + (Hardfork::Byzantium, ForkCondition::Block(0)), + (Hardfork::Constantinople, ForkCondition::Block(0)), + (Hardfork::Petersburg, ForkCondition::Block(0)), + (Hardfork::Istanbul, ForkCondition::Block(0)), + (Hardfork::MuirGlacier, ForkCondition::Block(0)), + (Hardfork::Berlin, ForkCondition::Block(3950000)), + (Hardfork::London, ForkCondition::Block(3950000)), + (Hardfork::ArrowGlacier, ForkCondition::Block(3950000)), + (Hardfork::GrayGlacier, ForkCondition::Block(3950000)), + ( + Hardfork::Paris, + ForkCondition::TTD { fork_block: Some(3950000), total_difficulty: U256::from(0) }, + ), + (Hardfork::Bedrock, ForkCondition::Block(105235063)), + (Hardfork::Regolith, ForkCondition::Timestamp(0)), + ]), + base_fee_params: BaseFeeParamsKind::Variable( + vec![ + (Hardfork::London, BaseFeeParams::optimism()), + (Hardfork::Canyon, BaseFeeParams::optimism_canyon()), + ] + .into(), + ), + prune_delete_limit: 1700, + ..Default::default() + } + .into() +}); + /// The OP Sepolia spec #[cfg(feature = "optimism")] pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { @@ -469,7 +521,7 @@ impl BaseFeeParams { } } - /// Get the base fee parameters for optimism goerli (post Canyon) + /// Get the base fee parameters for optimism sepolia (post Canyon) #[cfg(feature = "optimism")] pub const fn optimism_sepolia_canyon() -> BaseFeeParams { BaseFeeParams { @@ -3176,7 +3228,7 @@ Post-merge hard forks (timestamp based): #[cfg(feature = "optimism")] #[test] - fn latest_op_mainnet_fork_id() { + fn latest_base_mainnet_fork_id() { assert_eq!( ForkId { hash: ForkHash([0x51, 0xcc, 0x98, 0xb3]), next: 0 }, BASE_MAINNET.latest_fork_id() diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 0f858c914d488..303fd1e7a505a 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -98,8 +98,9 @@ pub use transaction::{ InvalidTransactionError, Signature, Transaction, TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, TxEip4844, TxHashOrNumber, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, - EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, OP_RETH_MAINNET_BELOW_BEDROCK, }; + pub use withdrawal::{Withdrawal, Withdrawals}; // Re-exports @@ -141,7 +142,7 @@ pub use c_kzg as kzg; #[cfg(feature = "optimism")] mod optimism { pub use crate::{ - chain::{BASE_MAINNET, BASE_SEPOLIA, OP_SEPOLIA}, + chain::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}, transaction::{TxDeposit, DEPOSIT_TX_TYPE_ID}, }; } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 5e9890fbf988a..a08f7775f2db7 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -28,7 +28,7 @@ pub use sidecar::generate_blob_sidecar; #[cfg(feature = "c-kzg")] pub use sidecar::{BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError}; -pub use signature::Signature; +pub use signature::{Signature, OP_RETH_MAINNET_BELOW_BEDROCK}; pub use tx_type::{ TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 221a0a6e36e8b..1f2c71b4a358f 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -14,6 +14,9 @@ const SECP256K1N_HALF: U256 = U256::from_be_bytes([ 0x5D, 0x57, 0x6E, 0x73, 0x57, 0xA4, 0x50, 0x1D, 0xDF, 0xE9, 0x2F, 0x46, 0x68, 0x1B, 0x20, 0xA0, ]); +/// Running OP Mainnet migration for chain below bedrock.] +pub const OP_RETH_MAINNET_BELOW_BEDROCK: &str = "OP_RETH_MAINNET_BELOW_BEDROCK"; + /// r, s: Values corresponding to the signature of the /// transaction and used to determine the sender of /// the transaction; formally Tr and Ts. This is expanded in Appendix F of yellow paper. @@ -94,9 +97,14 @@ impl Signature { let v = u64::decode(buf)?; let r = Decodable::decode(buf)?; let s = Decodable::decode(buf)?; + if v < 35 { // non-EIP-155 legacy scheme, v = 27 for even y-parity, v = 28 for odd y-parity if v != 27 && v != 28 { + #[cfg(feature = "optimism")] + if std::env::var(OP_RETH_MAINNET_BELOW_BEDROCK) == Ok(true.to_string()) && v == 0 { + return Ok((Signature { r, s, odd_y_parity: false }, None)) + } return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) } let odd_y_parity = v == 28; From b6fc756b42a64278ae4dcf193a6951b3ee782871 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 12 Apr 2024 14:06:52 +0200 Subject: [PATCH 129/700] chore: move pipeline setup to node-builder (#7588) --- Cargo.lock | 3 +- crates/node-builder/Cargo.toml | 1 + crates/node-builder/src/builder.rs | 56 ++++---- crates/node-builder/src/lib.rs | 2 + crates/node-builder/src/setup.rs | 202 ++++++++++++++++++++++++++++ crates/node-core/Cargo.toml | 2 - crates/node-core/src/node_config.rs | 198 +-------------------------- 7 files changed, 238 insertions(+), 226 deletions(-) create mode 100644 crates/node-builder/src/setup.rs diff --git a/Cargo.lock b/Cargo.lock index 275156fabb636..e4e910fe9642f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6811,6 +6811,7 @@ dependencies = [ "reth-blockchain-tree", "reth-config", "reth-db", + "reth-downloaders", "reth-exex", "reth-interfaces", "reth-network", @@ -6861,7 +6862,6 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-discv4", - "reth-downloaders", "reth-interfaces", "reth-metrics", "reth-net-nat", @@ -6871,7 +6871,6 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune", - "reth-revm", "reth-rpc", "reth-rpc-api", "reth-rpc-builder", diff --git a/crates/node-builder/Cargo.toml b/crates/node-builder/Cargo.toml index 693a20ac0e1cc..944d35b49df18 100644 --- a/crates/node-builder/Cargo.toml +++ b/crates/node-builder/Cargo.toml @@ -35,6 +35,7 @@ reth-static-file.workspace = true reth-prune.workspace = true reth-stages.workspace = true reth-config.workspace = true +reth-downloaders.workspace = true ## async futures.workspace = true diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 44d13becadb59..a487c93d664d2 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -711,20 +711,20 @@ where ) .build(); - let mut pipeline = config - .build_networked_pipeline( - &reth_config.stages, - client.clone(), - Arc::clone(&consensus), - provider_factory.clone(), - &executor, - sync_metrics_tx, - prune_config.clone(), - max_block, - static_file_producer, - evm_config, - ) - .await?; + let mut pipeline = crate::setup::build_networked_pipeline( + &config, + &reth_config.stages, + client.clone(), + Arc::clone(&consensus), + provider_factory.clone(), + &executor, + sync_metrics_tx, + prune_config.clone(), + max_block, + static_file_producer, + evm_config, + ) + .await?; let pipeline_events = pipeline.events(); task.set_pipeline_events(pipeline_events); @@ -733,20 +733,20 @@ where (pipeline, EitherDownloader::Left(client)) } else { - let pipeline = config - .build_networked_pipeline( - &reth_config.stages, - network_client.clone(), - Arc::clone(&consensus), - provider_factory.clone(), - &executor, - sync_metrics_tx, - prune_config.clone(), - max_block, - static_file_producer, - evm_config, - ) - .await?; + let pipeline = crate::setup::build_networked_pipeline( + &config, + &reth_config.stages, + network_client.clone(), + Arc::clone(&consensus), + provider_factory.clone(), + &executor, + sync_metrics_tx, + prune_config.clone(), + max_block, + static_file_producer, + evm_config, + ) + .await?; (pipeline, EitherDownloader::Right(network_client)) }; diff --git a/crates/node-builder/src/lib.rs b/crates/node-builder/src/lib.rs index 5dae4d96828bf..f5d7012d112e0 100644 --- a/crates/node-builder/src/lib.rs +++ b/crates/node-builder/src/lib.rs @@ -26,6 +26,8 @@ pub use handle::NodeHandle; pub mod rpc; +pub mod setup; + /// Support for installing the ExExs (execution extensions) in a node. pub mod exex; diff --git a/crates/node-builder/src/setup.rs b/crates/node-builder/src/setup.rs new file mode 100644 index 0000000000000..827f711cebda6 --- /dev/null +++ b/crates/node-builder/src/setup.rs @@ -0,0 +1,202 @@ +//! Helpers for setting up parts of the node. + +use crate::ConfigureEvm; +use reth_config::{config::StageConfig, PruneConfig}; +use reth_db::database::Database; +use reth_downloaders::{ + bodies::bodies::BodiesDownloaderBuilder, + headers::reverse_headers::ReverseHeadersDownloaderBuilder, +}; +use reth_interfaces::{ + consensus::Consensus, + p2p::{ + bodies::{client::BodiesClient, downloader::BodyDownloader}, + headers::{client::HeadersClient, downloader::HeaderDownloader}, + }, +}; +use reth_node_core::{ + node_config::NodeConfig, + primitives::{BlockNumber, B256}, +}; +use reth_provider::{HeaderSyncMode, ProviderFactory}; +use reth_revm::stack::{Hook, InspectorStackConfig}; +use reth_stages::{ + prelude::DefaultStages, + stages::{ + AccountHashingStage, ExecutionStage, ExecutionStageThresholds, IndexAccountHistoryStage, + IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, + TransactionLookupStage, + }, + Pipeline, StageSet, +}; +use reth_static_file::StaticFileProducer; +use reth_tasks::TaskExecutor; +use reth_tracing::tracing::debug; +use std::sync::Arc; +use tokio::sync::watch; + +/// Constructs a [Pipeline] that's wired to the network +#[allow(clippy::too_many_arguments)] +pub async fn build_networked_pipeline( + node_config: &NodeConfig, + config: &StageConfig, + client: Client, + consensus: Arc, + provider_factory: ProviderFactory, + task_executor: &TaskExecutor, + metrics_tx: reth_stages::MetricEventsSender, + prune_config: Option, + max_block: Option, + static_file_producer: StaticFileProducer, + evm_config: EvmConfig, +) -> eyre::Result> +where + DB: Database + Unpin + Clone + 'static, + Client: HeadersClient + BodiesClient + Clone + 'static, + EvmConfig: ConfigureEvm + Clone + 'static, +{ + // building network downloaders using the fetch client + let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) + .build(client.clone(), Arc::clone(&consensus)) + .into_task_with(task_executor); + + let body_downloader = BodiesDownloaderBuilder::new(config.bodies) + .build(client, Arc::clone(&consensus), provider_factory.clone()) + .into_task_with(task_executor); + + let pipeline = build_pipeline( + node_config, + provider_factory, + config, + header_downloader, + body_downloader, + consensus, + max_block, + metrics_tx, + prune_config, + static_file_producer, + evm_config, + ) + .await?; + + Ok(pipeline) +} + +/// Builds the [Pipeline] with the given [ProviderFactory] and downloaders. +#[allow(clippy::too_many_arguments)] +pub async fn build_pipeline( + node_config: &NodeConfig, + provider_factory: ProviderFactory, + stage_config: &StageConfig, + header_downloader: H, + body_downloader: B, + consensus: Arc, + max_block: Option, + metrics_tx: reth_stages::MetricEventsSender, + prune_config: Option, + static_file_producer: StaticFileProducer, + evm_config: EvmConfig, +) -> eyre::Result> +where + DB: Database + Clone + 'static, + H: HeaderDownloader + 'static, + B: BodyDownloader + 'static, + EvmConfig: ConfigureEvm + Clone + 'static, +{ + let mut builder = Pipeline::builder(); + + if let Some(max_block) = max_block { + debug!(target: "reth::cli", max_block, "Configuring builder to use max block"); + builder = builder.with_max_block(max_block) + } + + let (tip_tx, tip_rx) = watch::channel(B256::ZERO); + let factory = reth_revm::EvmProcessorFactory::new(node_config.chain.clone(), evm_config); + + let stack_config = InspectorStackConfig { + use_printer_tracer: node_config.debug.print_inspector, + hook: if let Some(hook_block) = node_config.debug.hook_block { + Hook::Block(hook_block) + } else if let Some(tx) = node_config.debug.hook_transaction { + Hook::Transaction(tx) + } else if node_config.debug.hook_all { + Hook::All + } else { + Hook::None + }, + }; + + let factory = factory.with_stack_config(stack_config); + + let prune_modes = prune_config.map(|prune| prune.segments).unwrap_or_default(); + + let header_mode = if node_config.debug.continuous { + HeaderSyncMode::Continuous + } else { + HeaderSyncMode::Tip(tip_rx) + }; + let pipeline = builder + .with_tip_sender(tip_tx) + .with_metrics_tx(metrics_tx.clone()) + .add_stages( + DefaultStages::new( + provider_factory.clone(), + header_mode, + Arc::clone(&consensus), + header_downloader, + body_downloader, + factory.clone(), + stage_config.etl.clone(), + ) + .set(SenderRecoveryStage { + commit_threshold: stage_config.sender_recovery.commit_threshold, + }) + .set( + ExecutionStage::new( + factory, + ExecutionStageThresholds { + max_blocks: stage_config.execution.max_blocks, + max_changes: stage_config.execution.max_changes, + max_cumulative_gas: stage_config.execution.max_cumulative_gas, + max_duration: stage_config.execution.max_duration, + }, + stage_config + .merkle + .clean_threshold + .max(stage_config.account_hashing.clean_threshold) + .max(stage_config.storage_hashing.clean_threshold), + prune_modes.clone(), + ) + .with_metrics_tx(metrics_tx), + ) + .set(AccountHashingStage::new( + stage_config.account_hashing.clean_threshold, + stage_config.account_hashing.commit_threshold, + stage_config.etl.clone(), + )) + .set(StorageHashingStage::new( + stage_config.storage_hashing.clean_threshold, + stage_config.storage_hashing.commit_threshold, + stage_config.etl.clone(), + )) + .set(MerkleStage::new_execution(stage_config.merkle.clean_threshold)) + .set(TransactionLookupStage::new( + stage_config.transaction_lookup.chunk_size, + stage_config.etl.clone(), + prune_modes.transaction_lookup, + )) + .set(IndexAccountHistoryStage::new( + stage_config.index_account_history.commit_threshold, + prune_modes.account_history, + stage_config.etl.clone(), + )) + .set(IndexStorageHistoryStage::new( + stage_config.index_storage_history.commit_threshold, + prune_modes.storage_history, + stage_config.etl.clone(), + )), + ) + .build(provider_factory, static_file_producer); + + Ok(pipeline) +} diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 4bf2591a1fba5..e4f6e3330a65e 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -34,8 +34,6 @@ reth-tasks.workspace = true reth-consensus-common.workspace = true reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true -reth-downloaders.workspace = true -reth-revm.workspace = true reth-stages.workspace = true reth-prune.workspace = true reth-static-file.workspace = true diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index a92aeb2978a2f..d6d3a63b98751 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -14,47 +14,22 @@ use metrics_exporter_prometheus::PrometheusHandle; use once_cell::sync::Lazy; use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; use reth_beacon_consensus::BeaconConsensus; -use reth_config::{ - config::{PruneConfig, StageConfig}, - Config, -}; +use reth_config::{config::PruneConfig, Config}; use reth_db::{database::Database, database_metrics::DatabaseMetrics}; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{client::BodiesClient, downloader::BodyDownloader}, - headers::{client::HeadersClient, downloader::HeaderDownloader}, - }, - RethResult, -}; +use reth_interfaces::{consensus::Consensus, p2p::headers::client::HeadersClient, RethResult}; use reth_network::{NetworkBuilder, NetworkConfig, NetworkManager}; -use reth_node_api::ConfigureEvm; use reth_primitives::{ constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, kzg::KzgSettings, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, Head, SealedHeader, TxHash, B256, MAINNET, }; use reth_provider::{ - providers::StaticFileProvider, BlockHashReader, BlockNumReader, HeaderProvider, HeaderSyncMode, + providers::StaticFileProvider, BlockHashReader, BlockNumReader, HeaderProvider, ProviderFactory, StageCheckpointReader, }; -use reth_revm::stack::{Hook, InspectorStackConfig}; -use reth_stages::{ - prelude::*, - stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, IndexAccountHistoryStage, - IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, - TransactionLookupStage, - }, -}; -use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use secp256k1::SecretKey; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; -use tokio::sync::{mpsc::Receiver, watch}; +use tokio::sync::mpsc::Receiver; use tracing::*; /// The default prometheus recorder handle. We use a global static to ensure that it is only @@ -374,54 +349,6 @@ impl NodeConfig { } } - /// Constructs a [Pipeline] that's wired to the network - #[allow(clippy::too_many_arguments)] - pub async fn build_networked_pipeline( - &self, - config: &StageConfig, - client: Client, - consensus: Arc, - provider_factory: ProviderFactory, - task_executor: &TaskExecutor, - metrics_tx: reth_stages::MetricEventsSender, - prune_config: Option, - max_block: Option, - static_file_producer: StaticFileProducer, - evm_config: EvmConfig, - ) -> eyre::Result> - where - DB: Database + Unpin + Clone + 'static, - Client: HeadersClient + BodiesClient + Clone + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, - { - // building network downloaders using the fetch client - let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) - .build(client.clone(), Arc::clone(&consensus)) - .into_task_with(task_executor); - - let body_downloader = BodiesDownloaderBuilder::new(config.bodies) - .build(client, Arc::clone(&consensus), provider_factory.clone()) - .into_task_with(task_executor); - - let pipeline = self - .build_pipeline( - provider_factory, - config, - header_downloader, - body_downloader, - consensus, - max_block, - self.debug.continuous, - metrics_tx, - prune_config, - static_file_producer, - evm_config, - ) - .await?; - - Ok(pipeline) - } - /// Loads 'MAINNET_KZG_TRUSTED_SETUP' pub fn kzg_settings(&self) -> eyre::Result> { Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) @@ -583,123 +510,6 @@ impl NodeConfig { }) } - /// Builds the [Pipeline] with the given [ProviderFactory] and downloaders. - #[allow(clippy::too_many_arguments)] - pub async fn build_pipeline( - &self, - provider_factory: ProviderFactory, - stage_config: &StageConfig, - header_downloader: H, - body_downloader: B, - consensus: Arc, - max_block: Option, - continuous: bool, - metrics_tx: reth_stages::MetricEventsSender, - prune_config: Option, - static_file_producer: StaticFileProducer, - evm_config: EvmConfig, - ) -> eyre::Result> - where - DB: Database + Clone + 'static, - H: HeaderDownloader + 'static, - B: BodyDownloader + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, - { - let mut builder = Pipeline::builder(); - - if let Some(max_block) = max_block { - debug!(target: "reth::cli", max_block, "Configuring builder to use max block"); - builder = builder.with_max_block(max_block) - } - - let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = reth_revm::EvmProcessorFactory::new(self.chain.clone(), evm_config); - - let stack_config = InspectorStackConfig { - use_printer_tracer: self.debug.print_inspector, - hook: if let Some(hook_block) = self.debug.hook_block { - Hook::Block(hook_block) - } else if let Some(tx) = self.debug.hook_transaction { - Hook::Transaction(tx) - } else if self.debug.hook_all { - Hook::All - } else { - Hook::None - }, - }; - - let factory = factory.with_stack_config(stack_config); - - let prune_modes = prune_config.map(|prune| prune.segments).unwrap_or_default(); - - let header_mode = - if continuous { HeaderSyncMode::Continuous } else { HeaderSyncMode::Tip(tip_rx) }; - let pipeline = builder - .with_tip_sender(tip_tx) - .with_metrics_tx(metrics_tx.clone()) - .add_stages( - DefaultStages::new( - provider_factory.clone(), - header_mode, - Arc::clone(&consensus), - header_downloader, - body_downloader, - factory.clone(), - stage_config.etl.clone(), - ) - .set(SenderRecoveryStage { - commit_threshold: stage_config.sender_recovery.commit_threshold, - }) - .set( - ExecutionStage::new( - factory, - ExecutionStageThresholds { - max_blocks: stage_config.execution.max_blocks, - max_changes: stage_config.execution.max_changes, - max_cumulative_gas: stage_config.execution.max_cumulative_gas, - max_duration: stage_config.execution.max_duration, - }, - stage_config - .merkle - .clean_threshold - .max(stage_config.account_hashing.clean_threshold) - .max(stage_config.storage_hashing.clean_threshold), - prune_modes.clone(), - ) - .with_metrics_tx(metrics_tx), - ) - .set(AccountHashingStage::new( - stage_config.account_hashing.clean_threshold, - stage_config.account_hashing.commit_threshold, - stage_config.etl.clone(), - )) - .set(StorageHashingStage::new( - stage_config.storage_hashing.clean_threshold, - stage_config.storage_hashing.commit_threshold, - stage_config.etl.clone(), - )) - .set(MerkleStage::new_execution(stage_config.merkle.clean_threshold)) - .set(TransactionLookupStage::new( - stage_config.transaction_lookup.chunk_size, - stage_config.etl.clone(), - prune_modes.transaction_lookup, - )) - .set(IndexAccountHistoryStage::new( - stage_config.index_account_history.commit_threshold, - prune_modes.account_history, - stage_config.etl.clone(), - )) - .set(IndexStorageHistoryStage::new( - stage_config.index_storage_history.commit_threshold, - prune_modes.storage_history, - stage_config.etl.clone(), - )), - ) - .build(provider_factory, static_file_producer); - - Ok(pipeline) - } - /// Change rpc port numbers based on the instance number, using the inner /// [RpcServerArgs::adjust_instance_ports] method. pub fn adjust_instance_ports(&mut self) { From 4fb55eacfc9792b65a76ae0fe715dc27026bfd6d Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 12 Apr 2024 08:47:59 -0400 Subject: [PATCH 130/700] feat: introduce reth db stats --checksum (#7581) --- bin/reth/src/commands/db/checksum.rs | 31 ++++++-- bin/reth/src/commands/db/stats.rs | 108 ++++++++++++++++++++++++++- 2 files changed, 129 insertions(+), 10 deletions(-) diff --git a/bin/reth/src/commands/db/checksum.rs b/bin/reth/src/commands/db/checksum.rs index 7079f4e46dae3..689b6ca5a94f8 100644 --- a/bin/reth/src/commands/db/checksum.rs +++ b/bin/reth/src/commands/db/checksum.rs @@ -5,7 +5,10 @@ use reth_db::{ cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx, DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables, }; -use std::{hash::Hasher, time::Instant}; +use std::{ + hash::Hasher, + time::{Duration, Instant}, +}; use tracing::{info, warn}; #[derive(Parser, Debug)] @@ -18,20 +21,21 @@ pub struct Command { impl Command { /// Execute `db checksum` command pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { + warn!("This command should be run without the node running!"); self.table.view(&ChecksumViewer { tool }) } } -struct ChecksumViewer<'a, DB: Database> { +pub(crate) struct ChecksumViewer<'a, DB: Database> { tool: &'a DbTool, } -impl TableViewer<()> for ChecksumViewer<'_, DB> { - type Error = eyre::Report; - - fn view(&self) -> Result<(), Self::Error> { - warn!("This command should be run without the node running!"); +impl ChecksumViewer<'_, DB> { + pub(crate) fn new(tool: &'_ DbTool) -> ChecksumViewer<'_, DB> { + ChecksumViewer { tool } + } + pub(crate) fn get_checksum(&self) -> Result<(u64, Duration), eyre::Report> { let provider = self.tool.provider_factory.provider()?.disable_long_read_transaction_safety(); let tx = provider.tx_ref(); @@ -52,8 +56,19 @@ impl TableViewer<()> for ChecksumViewer<'_, DB> { hasher.write(v.raw_value()); } + let checksum = hasher.finish(); let elapsed = start_time.elapsed(); - info!("{} checksum: {:x}, took {:?}", T::NAME, hasher.finish(), elapsed); + + Ok((checksum, elapsed)) + } +} + +impl TableViewer<()> for ChecksumViewer<'_, DB> { + type Error = eyre::Report; + + fn view(&self) -> Result<(), Self::Error> { + let (checksum, elapsed) = self.get_checksum::()?; + info!("Checksum for table `{}`: {:#x} (elapsed: {:?})", T::NAME, checksum, elapsed); Ok(()) } diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index 6e31e31e1d454..5ffc136dd16ee 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -1,13 +1,24 @@ -use crate::utils::DbTool; +use std::time::Duration; + +use crate::{commands::db::checksum::ChecksumViewer, utils::DbTool}; use clap::Parser; use comfy_table::{Cell, Row, Table as ComfyTable}; use eyre::WrapErr; use human_bytes::human_bytes; use itertools::Itertools; -use reth_db::{database::Database, mdbx, static_file::iter_static_files, DatabaseEnv, Tables}; +use reth_db::{ + database::Database, mdbx, static_file::iter_static_files, AccountChangeSets, AccountsHistory, + AccountsTrie, BlockBodyIndices, BlockOmmers, BlockWithdrawals, Bytecodes, CanonicalHeaders, + DatabaseEnv, HashedAccounts, HashedStorages, HeaderNumbers, HeaderTerminalDifficulties, + Headers, PlainAccountState, PlainStorageState, PruneCheckpoints, Receipts, + StageCheckpointProgresses, StageCheckpoints, StorageChangeSets, StoragesHistory, StoragesTrie, + Tables, TransactionBlocks, TransactionHashNumbers, TransactionSenders, Transactions, + VersionHistory, +}; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_primitives::static_file::{find_fixed_range, SegmentRangeInclusive}; use reth_provider::providers::StaticFileProvider; +use tracing::info; #[derive(Parser, Debug)] /// The arguments for the `reth db stats` command @@ -15,9 +26,19 @@ pub struct Command { /// Show only the total size for static files. #[arg(long, default_value_t = false)] only_total_size: bool, + /// Show only the summary per static file segment. #[arg(long, default_value_t = false)] summary: bool, + + /// Show a checksum of each table in the database. + /// + /// WARNING: this option will take a long time to run, as it needs to traverse and hash the + /// entire database. + /// + /// For individual table checksums, use the `reth db checksum` command. + #[arg(long, default_value_t = false)] + checksum: bool, } impl Command { @@ -27,6 +48,12 @@ impl Command { data_dir: ChainPath, tool: &DbTool, ) -> eyre::Result<()> { + if self.checksum { + let checksum_report = self.checksum_report(tool)?; + println!("{checksum_report}"); + println!("\n"); + } + let static_files_stats_table = self.static_files_stats_table(data_dir)?; println!("{static_files_stats_table}"); @@ -285,4 +312,81 @@ impl Command { Ok(table) } + + fn checksum_report(&self, tool: &DbTool) -> eyre::Result { + let mut table = ComfyTable::new(); + table.load_preset(comfy_table::presets::ASCII_MARKDOWN); + table.set_header(vec![Cell::new("Table"), Cell::new("Checksum"), Cell::new("Elapsed")]); + + let db_tables = Tables::ALL; + let mut total_elapsed = Duration::default(); + + for db_table in db_tables { + info!("Calculating checksum for table: {}", db_table); + + let viewer = ChecksumViewer::new(tool); + let (checksum, elapsed) = match db_table { + Tables::AccountsHistory => viewer.get_checksum::().unwrap(), + Tables::AccountChangeSets => viewer.get_checksum::().unwrap(), + Tables::AccountsTrie => viewer.get_checksum::().unwrap(), + Tables::BlockBodyIndices => viewer.get_checksum::().unwrap(), + Tables::BlockOmmers => viewer.get_checksum::().unwrap(), + Tables::BlockWithdrawals => viewer.get_checksum::().unwrap(), + Tables::Bytecodes => viewer.get_checksum::().unwrap(), + Tables::CanonicalHeaders => viewer.get_checksum::().unwrap(), + Tables::HashedAccounts => viewer.get_checksum::().unwrap(), + Tables::HashedStorages => viewer.get_checksum::().unwrap(), + Tables::HeaderNumbers => viewer.get_checksum::().unwrap(), + Tables::HeaderTerminalDifficulties => { + viewer.get_checksum::().unwrap() + } + Tables::Headers => viewer.get_checksum::().unwrap(), + Tables::PlainAccountState => viewer.get_checksum::().unwrap(), + Tables::PlainStorageState => viewer.get_checksum::().unwrap(), + Tables::PruneCheckpoints => viewer.get_checksum::().unwrap(), + Tables::Receipts => viewer.get_checksum::().unwrap(), + Tables::StageCheckpointProgresses => { + viewer.get_checksum::().unwrap() + } + Tables::StageCheckpoints => viewer.get_checksum::().unwrap(), + Tables::StorageChangeSets => viewer.get_checksum::().unwrap(), + Tables::StoragesHistory => viewer.get_checksum::().unwrap(), + Tables::StoragesTrie => viewer.get_checksum::().unwrap(), + Tables::TransactionBlocks => viewer.get_checksum::().unwrap(), + Tables::TransactionHashNumbers => { + viewer.get_checksum::().unwrap() + } + Tables::TransactionSenders => viewer.get_checksum::().unwrap(), + Tables::Transactions => viewer.get_checksum::().unwrap(), + Tables::VersionHistory => viewer.get_checksum::().unwrap(), + }; + + // increment duration for final report + total_elapsed += elapsed; + + // add rows containing checksums to the table + let mut row = Row::new(); + row.add_cell(Cell::new(db_table)); + row.add_cell(Cell::new(format!("{:x}", checksum))); + row.add_cell(Cell::new(format!("{:?}", elapsed))); + table.add_row(row); + } + + // add a separator for the final report + let max_widths = table.column_max_content_widths(); + let mut separator = Row::new(); + for width in max_widths { + separator.add_cell(Cell::new(&"-".repeat(width as usize))); + } + table.add_row(separator); + + // add the final report + let mut row = Row::new(); + row.add_cell(Cell::new("Total elapsed")); + row.add_cell(Cell::new("")); + row.add_cell(Cell::new(format!("{:?}", total_elapsed))); + table.add_row(row); + + Ok(table) + } } From fe58f35027b5747a581596aee7f5cd3011a0f2ae Mon Sep 17 00:00:00 2001 From: KallyDev Date: Fri, 12 Apr 2024 21:14:20 +0800 Subject: [PATCH 131/700] docs(docker): improper docker volume path causes data loss (#7590) --- book/installation/docker.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/installation/docker.md b/book/installation/docker.md index e9411f816bf15..6ce2ae50a5b48 100644 --- a/book/installation/docker.md +++ b/book/installation/docker.md @@ -61,7 +61,7 @@ To run Reth with Docker, run: ```bash docker run \ - -v rethdata:/root/.local/share/reth/mainnet/db \ + -v rethdata:/root/.local/share/reth/mainnet \ -d \ -p 9001:9001 \ -p 30303:30303 \ From 9b5fc9fd8d9f33dfcccaced9fe201cb360de0f9f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 12 Apr 2024 15:34:40 +0200 Subject: [PATCH 132/700] chore: extract engine type defs to dedicated crate (#7589) --- Cargo.lock | 42 +++++++++++++------ Cargo.toml | 4 ++ crates/engine-primitives/Cargo.toml | 20 +++++++++ .../engine => engine-primitives/src}/error.rs | 0 .../mod.rs => engine-primitives/src/lib.rs} | 10 ++++- .../src}/payload.rs | 0 .../src}/traits.rs | 0 crates/ethereum-engine-primitives/Cargo.toml | 27 ++++++++++++ .../src/lib.rs} | 16 ++++++- .../src/payload.rs | 2 +- crates/node-api/Cargo.toml | 7 +--- crates/node-api/src/lib.rs | 11 +---- crates/node-ethereum/Cargo.toml | 5 +-- crates/node-ethereum/src/lib.rs | 8 +--- crates/payload/builder/Cargo.toml | 9 +--- crates/payload/builder/src/events.rs | 2 +- crates/payload/builder/src/lib.rs | 6 ++- crates/payload/builder/src/noop.rs | 2 +- crates/payload/builder/src/service.rs | 2 +- crates/payload/builder/src/test_utils.rs | 2 +- crates/payload/builder/src/traits.rs | 2 +- 21 files changed, 121 insertions(+), 56 deletions(-) create mode 100644 crates/engine-primitives/Cargo.toml rename crates/{node-api/src/engine => engine-primitives/src}/error.rs (100%) rename crates/{node-api/src/engine/mod.rs => engine-primitives/src/lib.rs} (96%) rename crates/{node-api/src/engine => engine-primitives/src}/payload.rs (100%) rename crates/{node-api/src/engine => engine-primitives/src}/traits.rs (100%) create mode 100644 crates/ethereum-engine-primitives/Cargo.toml rename crates/{node-ethereum/src/engine.rs => ethereum-engine-primitives/src/lib.rs} (70%) rename crates/{payload/builder => ethereum-engine-primitives}/src/payload.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index e4e910fe9642f..6ae7d5b0a1f3b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6443,6 +6443,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "reth-engine-primitives" +version = "0.2.0-beta.5" +dependencies = [ + "reth-primitives", + "reth-rpc-types", + "serde", + "thiserror", +] + [[package]] name = "reth-eth-wire" version = "0.2.0-beta.5" @@ -6500,6 +6510,21 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "reth-ethereum-engine-primitives" +version = "0.2.0-beta.5" +dependencies = [ + "alloy-rlp", + "reth-engine-primitives", + "reth-primitives", + "reth-rpc-types", + "reth-rpc-types-compat", + "revm-primitives", + "serde", + "serde_json", + "sha2 0.10.8", +] + [[package]] name = "reth-ethereum-forks" version = "0.2.0-beta.5" @@ -6788,12 +6813,9 @@ name = "reth-node-api" version = "0.2.0-beta.5" dependencies = [ "reth-db", + "reth-engine-primitives", "reth-evm", - "reth-primitives", "reth-provider", - "reth-rpc-types", - "serde", - "thiserror", ] [[package]] @@ -6902,19 +6924,16 @@ dependencies = [ "futures", "reth-basic-payload-builder", "reth-db", + "reth-ethereum-engine-primitives", "reth-ethereum-payload-builder", "reth-evm-ethereum", "reth-exex", "reth-network", - "reth-node-api", "reth-node-builder", "reth-payload-builder", - "reth-primitives", "reth-provider", - "reth-rpc-types", "reth-tracing", "reth-transaction-pool", - "serde", ] [[package]] @@ -6975,21 +6994,18 @@ dependencies = [ name = "reth-payload-builder" version = "0.2.0-beta.5" dependencies = [ - "alloy-rlp", "futures-util", "metrics", + "reth-engine-primitives", + "reth-ethereum-engine-primitives", "reth-interfaces", "reth-metrics", - "reth-node-api", "reth-primitives", "reth-provider", "reth-rpc-types", - "reth-rpc-types-compat", "reth-transaction-pool", "revm", - "revm-primitives", "serde_json", - "sha2 0.10.8", "thiserror", "tokio", "tokio-stream", diff --git a/Cargo.toml b/Cargo.toml index fb17f01e33ed0..98b18dd6d80b3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,8 @@ members = [ "crates/rpc/rpc-testing-util/", "crates/rpc/rpc-types/", "crates/rpc/rpc-types-compat/", + "crates/engine-primitives/", + "crates/ethereum-engine-primitives/", "crates/node-ethereum/", "crates/node-builder/", "crates/node-optimism/", @@ -206,6 +208,8 @@ reth-db = { path = "crates/storage/db" } reth-discv4 = { path = "crates/net/discv4" } reth-discv5 = { path = "crates/net/discv5" } reth-dns-discovery = { path = "crates/net/dns" } +reth-engine-primitives = { path = "crates/engine-primitives" } +reth-ethereum-engine-primitives = { path = "crates/ethereum-engine-primitives" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } reth-node-optimism = { path = "crates/node-optimism" } diff --git a/crates/engine-primitives/Cargo.toml b/crates/engine-primitives/Cargo.toml new file mode 100644 index 0000000000000..86f2087740a7a --- /dev/null +++ b/crates/engine-primitives/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "reth-engine-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true +reth-rpc-types.workspace = true + +# misc +serde.workspace = true +thiserror.workspace = true diff --git a/crates/node-api/src/engine/error.rs b/crates/engine-primitives/src/error.rs similarity index 100% rename from crates/node-api/src/engine/error.rs rename to crates/engine-primitives/src/error.rs diff --git a/crates/node-api/src/engine/mod.rs b/crates/engine-primitives/src/lib.rs similarity index 96% rename from crates/node-api/src/engine/mod.rs rename to crates/engine-primitives/src/lib.rs index 4d549be329beb..e144d0fcd9f10 100644 --- a/crates/node-api/src/engine/mod.rs +++ b/crates/engine-primitives/src/lib.rs @@ -1,4 +1,12 @@ -//! This contains the [EngineTypes] trait and implementations for ethereum mainnet types. +//! Traits, validation methods, and helper types used to abstract over engine types. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use core::fmt; use reth_primitives::ChainSpec; diff --git a/crates/node-api/src/engine/payload.rs b/crates/engine-primitives/src/payload.rs similarity index 100% rename from crates/node-api/src/engine/payload.rs rename to crates/engine-primitives/src/payload.rs diff --git a/crates/node-api/src/engine/traits.rs b/crates/engine-primitives/src/traits.rs similarity index 100% rename from crates/node-api/src/engine/traits.rs rename to crates/engine-primitives/src/traits.rs diff --git a/crates/ethereum-engine-primitives/Cargo.toml b/crates/ethereum-engine-primitives/Cargo.toml new file mode 100644 index 0000000000000..e384884136a64 --- /dev/null +++ b/crates/ethereum-engine-primitives/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "reth-ethereum-engine-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true +reth-engine-primitives.workspace = true +reth-rpc-types.workspace = true +reth-rpc-types-compat.workspace = true +revm-primitives.workspace = true +alloy-rlp.workspace = true + +# misc +serde.workspace = true +sha2.workspace = true + +[dev-dependencies] +serde_json.workspace = true diff --git a/crates/node-ethereum/src/engine.rs b/crates/ethereum-engine-primitives/src/lib.rs similarity index 70% rename from crates/node-ethereum/src/engine.rs rename to crates/ethereum-engine-primitives/src/lib.rs index 1f2dc54668630..6b030a9c553d0 100644 --- a/crates/node-ethereum/src/engine.rs +++ b/crates/ethereum-engine-primitives/src/lib.rs @@ -1,8 +1,20 @@ -use reth_node_api::{ +//! Ethereum specifc + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod payload; +pub use payload::{EthBuiltPayload, EthPayloadBuilderAttributes}; + +use reth_engine_primitives::{ validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, EngineTypes, PayloadOrAttributes, }; -use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_primitives::ChainSpec; use reth_rpc_types::{ engine::{ diff --git a/crates/payload/builder/src/payload.rs b/crates/ethereum-engine-primitives/src/payload.rs similarity index 99% rename from crates/payload/builder/src/payload.rs rename to crates/ethereum-engine-primitives/src/payload.rs index bb739dddc155c..6e8c820a7e4d2 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/ethereum-engine-primitives/src/payload.rs @@ -1,7 +1,7 @@ //! Contains types required for building a payload. use alloy_rlp::Encodable; -use reth_node_api::{BuiltPayload, PayloadBuilderAttributes}; +use reth_engine_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{ constants::EIP1559_INITIAL_BASE_FEE, revm::config::revm_spec_by_timestamp_after_merge, Address, BlobTransactionSidecar, ChainSpec, Hardfork, Header, SealedBlock, Withdrawals, B256, U256, diff --git a/crates/node-api/Cargo.toml b/crates/node-api/Cargo.toml index 323f730d923d4..2d8e1aa6b369c 100644 --- a/crates/node-api/Cargo.toml +++ b/crates/node-api/Cargo.toml @@ -12,12 +12,7 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true -reth-rpc-types.workspace = true reth-evm.workspace = true reth-provider.workspace = true reth-db.workspace = true - -# misc -serde.workspace = true -thiserror.workspace = true +reth-engine-primitives.workspace = true diff --git a/crates/node-api/src/lib.rs b/crates/node-api/src/lib.rs index 2f189732b60a7..c1088f2426806 100644 --- a/crates/node-api/src/lib.rs +++ b/crates/node-api/src/lib.rs @@ -9,15 +9,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] /// Traits, validation methods, and helper types used to abstract over engine types. -/// -/// Notably contains the [EngineTypes] trait and implementations for ethereum mainnet types. -pub mod engine; -pub use engine::{ - validate_payload_timestamp, validate_version_specific_fields, validate_withdrawals_presence, - BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, EngineTypes, - MessageValidationKind, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, - VersionSpecificValidationError, -}; +pub use reth_engine_primitives as engine; +pub use reth_engine_primitives::*; /// Traits and helper types used to abstract over EVM methods and types. pub use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; diff --git a/crates/node-ethereum/Cargo.toml b/crates/node-ethereum/Cargo.toml index a8862749149ce..d5a7c6a829056 100644 --- a/crates/node-ethereum/Cargo.toml +++ b/crates/node-ethereum/Cargo.toml @@ -12,12 +12,10 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true reth-payload-builder.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true -reth-rpc-types.workspace = true -reth-node-api.workspace = true reth-node-builder.workspace = true reth-tracing.workspace = true reth-provider.workspace = true @@ -27,7 +25,6 @@ reth-evm-ethereum.workspace = true # misc eyre.workspace = true -serde.workspace = true [dev-dependencies] reth-db.workspace = true diff --git a/crates/node-ethereum/src/lib.rs b/crates/node-ethereum/src/lib.rs index e2b36800484e2..cea2e7be0d02a 100644 --- a/crates/node-ethereum/src/lib.rs +++ b/crates/node-ethereum/src/lib.rs @@ -8,14 +8,10 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// Exports commonly used concrete instances of the [EngineTypes](reth_node_api::EngineTypes) -/// trait. -pub mod engine; -pub use engine::EthEngineTypes; +pub use reth_ethereum_engine_primitives::EthEngineTypes; -/// Exports commonly used concrete instances of the -/// [ConfigureEvmEnv](reth_node_api::ConfigureEvmEnv) trait. pub mod evm; pub use evm::EthEvmConfig; + pub mod node; pub use node::EthereumNode; diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 207fbc43fd734..aa4b785cd370e 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -17,13 +17,9 @@ reth-primitives.workspace = true reth-rpc-types.workspace = true reth-transaction-pool.workspace = true reth-interfaces.workspace = true -reth-rpc-types-compat.workspace = true reth-provider.workspace = true -reth-node-api.workspace = true - -# ethereum -alloy-rlp.workspace = true -revm-primitives.workspace = true +reth-engine-primitives.workspace = true +reth-ethereum-engine-primitives.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -36,7 +32,6 @@ metrics.workspace = true # misc thiserror.workspace = true -sha2.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/payload/builder/src/events.rs b/crates/payload/builder/src/events.rs index 8b4bb97f1e2ab..66f299bbb31f4 100644 --- a/crates/payload/builder/src/events.rs +++ b/crates/payload/builder/src/events.rs @@ -1,5 +1,5 @@ use futures_util::Stream; -use reth_node_api::EngineTypes; +use reth_engine_primitives::EngineTypes; use tokio::sync::broadcast; use tokio_stream::{ wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}, diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 3455775454877..b3baf11991deb 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -105,7 +105,6 @@ pub mod database; pub mod error; mod events; mod metrics; -mod payload; mod service; mod traits; @@ -115,7 +114,10 @@ pub mod noop; pub mod test_utils; pub use events::Events; -pub use payload::{EthBuiltPayload, EthPayloadBuilderAttributes}; pub use reth_rpc_types::engine::PayloadId; pub use service::{PayloadBuilderHandle, PayloadBuilderService, PayloadStore}; pub use traits::{KeepPayloadJobAlive, PayloadJob, PayloadJobGenerator}; + +// re-export the Ethereum engine primitives for convenience +#[doc(inline)] +pub use reth_ethereum_engine_primitives::{EthBuiltPayload, EthPayloadBuilderAttributes}; diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index 5c4c0b8e57e1d..bdfadf7667004 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -2,7 +2,7 @@ use crate::{service::PayloadServiceCommand, PayloadBuilderHandle}; use futures_util::{ready, StreamExt}; -use reth_node_api::{EngineTypes, PayloadBuilderAttributes}; +use reth_engine_primitives::{EngineTypes, PayloadBuilderAttributes}; use std::{ future::Future, pin::Pin, diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 10534b48d1445..81d3445b02c82 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -11,7 +11,7 @@ use crate::{ KeepPayloadJobAlive, PayloadJob, }; use futures_util::{future::FutureExt, Stream, StreamExt}; -use reth_node_api::{BuiltPayload, EngineTypes, PayloadBuilderAttributes}; +use reth_engine_primitives::{BuiltPayload, EngineTypes, PayloadBuilderAttributes}; use reth_provider::CanonStateNotification; use reth_rpc_types::engine::PayloadId; use std::{ diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index be5422073e7db..40237aa927661 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -5,7 +5,7 @@ use crate::{ EthPayloadBuilderAttributes, PayloadBuilderHandle, PayloadBuilderService, PayloadJob, PayloadJobGenerator, }; -use reth_node_api::EngineTypes; +use reth_engine_primitives::EngineTypes; use reth_primitives::{Block, U256}; use reth_provider::CanonStateNotification; use std::{ diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index 09bc6432576c2..cf747b0dabe42 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -1,7 +1,7 @@ //! Trait abstractions used by the payload crate. use crate::error::PayloadBuilderError; -use reth_node_api::{BuiltPayload, PayloadBuilderAttributes}; +use reth_engine_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_provider::CanonStateNotification; use std::future::Future; From dd28a4ca730f5d213861ab23ca012896c1d19c34 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 12 Apr 2024 15:34:42 +0200 Subject: [PATCH 133/700] fix(cmd): get tip block (#7591) --- crates/net/downloaders/src/file_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 8f9122090304e..1f09ad787b8fd 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -116,7 +116,7 @@ impl FileClient { /// Get the tip hash of the chain. pub fn tip(&self) -> Option { - self.headers.get(&(self.headers.len() as u64)).map(|h| h.hash_slow()) + self.headers.get(&((self.headers.len() - 1) as u64)).map(|h| h.hash_slow()) } /// Returns the highest block number of this client has or `None` if empty From 9a4c01fbeec0e5788fc54c2d7a0d2288250f491a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 12 Apr 2024 16:11:54 +0200 Subject: [PATCH 134/700] chore: rm node api deps (#7593) --- Cargo.lock | 5 +++-- crates/consensus/auto-seal/Cargo.toml | 3 ++- crates/consensus/auto-seal/src/lib.rs | 3 ++- crates/consensus/auto-seal/src/task.rs | 3 ++- crates/revm/Cargo.toml | 2 +- crates/revm/src/factory.rs | 2 +- crates/revm/src/optimism/processor.rs | 2 +- crates/revm/src/processor.rs | 2 +- crates/revm/src/test_utils.rs | 2 +- 9 files changed, 14 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6ae7d5b0a1f3b..ae67f44a49fe9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6121,8 +6121,9 @@ version = "0.2.0-beta.5" dependencies = [ "futures-util", "reth-beacon-consensus", + "reth-engine-primitives", + "reth-evm", "reth-interfaces", - "reth-node-api", "reth-primitives", "reth-provider", "reth-revm", @@ -7132,8 +7133,8 @@ name = "reth-revm" version = "0.2.0-beta.5" dependencies = [ "reth-consensus-common", + "reth-evm", "reth-interfaces", - "reth-node-api", "reth-primitives", "reth-provider", "reth-trie", diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 62ef0e9f12297..72a593b5a64fb 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -20,7 +20,8 @@ reth-provider.workspace = true reth-stages.workspace = true reth-revm.workspace = true reth-transaction-pool.workspace = true -reth-node-api.workspace = true +reth-evm.workspace = true +reth-engine-primitives.workspace = true # async futures-util.workspace = true diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index f48c5cea1360d..59efc0d48a5e9 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -16,11 +16,12 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use reth_evm::ConfigureEvm; use reth_interfaces::{ consensus::{Consensus, ConsensusError}, executor::{BlockExecutionError, BlockValidationError}, }; -use reth_node_api::{ConfigureEvm, EngineTypes}; use reth_primitives::{ constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 0084583b6288f..6d7a29a330038 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -1,8 +1,9 @@ use crate::{mode::MiningMode, Storage}; use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; +use reth_engine_primitives::EngineTypes; +use reth_evm::ConfigureEvm; use reth_interfaces::consensus::ForkchoiceState; -use reth_node_api::{ConfigureEvm, EngineTypes}; use reth_primitives::{Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders}; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; use reth_stages::PipelineEvent; diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 74818a20a622c..2bd04d27847bd 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -17,7 +17,7 @@ reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true reth-consensus-common.workspace = true -reth-node-api.workspace = true +reth-evm.workspace = true # revm revm.workspace = true diff --git a/crates/revm/src/factory.rs b/crates/revm/src/factory.rs index bf8117e8b3a4f..61e43cc1809cc 100644 --- a/crates/revm/src/factory.rs +++ b/crates/revm/src/factory.rs @@ -3,8 +3,8 @@ use crate::{ processor::EVMProcessor, stack::{InspectorStack, InspectorStackConfig}, }; +use reth_evm::ConfigureEvm; use reth_interfaces::executor::BlockExecutionError; -use reth_node_api::ConfigureEvm; use reth_primitives::ChainSpec; use reth_provider::{ExecutorFactory, PrunableBlockExecutor, StateProvider}; use std::sync::Arc; diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs index 9fdbe45c951dd..ef421d46a2d17 100644 --- a/crates/revm/src/optimism/processor.rs +++ b/crates/revm/src/optimism/processor.rs @@ -3,7 +3,7 @@ use reth_interfaces::executor::{ BlockExecutionError, BlockValidationError, OptimismBlockExecutionError, }; -use reth_node_api::ConfigureEvm; +use reth_evm::ConfigureEvm; use reth_primitives::{ proofs::calculate_receipt_root_optimism, revm_primitives::ResultAndState, BlockWithSenders, Bloom, ChainSpec, Hardfork, Receipt, ReceiptWithBloom, TxType, B256, U256, diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index ec698bcd177b1..38bacd7f1d728 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -11,8 +11,8 @@ use std::{sync::Arc, time::Instant}; #[cfg(not(feature = "optimism"))] use tracing::{debug, trace}; +use reth_evm::ConfigureEvm; use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -use reth_node_api::ConfigureEvm; #[cfg(feature = "optimism")] use reth_primitives::revm::env::fill_op_tx_env; #[cfg(not(feature = "optimism"))] diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 535d7919ffd72..cf0b8299807f9 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,5 +1,5 @@ +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_interfaces::provider::ProviderResult; -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ keccak256, revm::config::revm_spec, trie::AccountProof, Account, Address, BlockNumber, Bytecode, Bytes, ChainSpec, Head, Header, StorageKey, Transaction, B256, U256, From e1ebc2f06b781a651dea073fa479c0c367604ec1 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 12 Apr 2024 15:39:02 +0100 Subject: [PATCH 135/700] chore(node-api): move `FullNodeComponents` from builder (#7597) Co-authored-by: Matthias Seitz --- Cargo.lock | 5 + bin/reth/src/lib.rs | 5 + crates/node-api/Cargo.toml | 4 + crates/node-api/src/node.rs | 109 +++++++++++++ crates/node-builder/src/builder.rs | 9 +- crates/node-builder/src/components/builder.rs | 42 ++++- crates/node-builder/src/components/mod.rs | 2 - crates/node-builder/src/components/traits.rs | 144 ------------------ crates/node-builder/src/handle.rs | 3 +- crates/node-builder/src/hooks.rs | 3 +- crates/node-builder/src/node.rs | 3 +- crates/node-builder/src/rpc.rs | 2 +- crates/node-e2e-tests/tests/it/dev.rs | 5 +- crates/node-ethereum/Cargo.toml | 2 + crates/node-ethereum/tests/it/builder.rs | 3 +- crates/node-optimism/tests/it/builder.rs | 3 +- 16 files changed, 182 insertions(+), 162 deletions(-) delete mode 100644 crates/node-builder/src/components/traits.rs diff --git a/Cargo.lock b/Cargo.lock index ae67f44a49fe9..8ce28f68565ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6816,7 +6816,11 @@ dependencies = [ "reth-db", "reth-engine-primitives", "reth-evm", + "reth-network", + "reth-payload-builder", "reth-provider", + "reth-tasks", + "reth-transaction-pool", ] [[package]] @@ -6930,6 +6934,7 @@ dependencies = [ "reth-evm-ethereum", "reth-exex", "reth-network", + "reth-node-api", "reth-node-builder", "reth-payload-builder", "reth-provider", diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index c1538413fb502..00e2c586a20b9 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -39,6 +39,11 @@ pub mod payload { pub use reth_payload_validator::ExecutionPayloadValidator; } +/// Re-exported from `reth_node_api`. +pub mod api { + pub use reth_node_api::*; +} + /// Re-exported from `reth_node_core`. pub mod core { pub use reth_node_core::*; diff --git a/crates/node-api/Cargo.toml b/crates/node-api/Cargo.toml index 2d8e1aa6b369c..e456f1850a59b 100644 --- a/crates/node-api/Cargo.toml +++ b/crates/node-api/Cargo.toml @@ -16,3 +16,7 @@ reth-evm.workspace = true reth-provider.workspace = true reth-db.workspace = true reth-engine-primitives.workspace = true +reth-transaction-pool.workspace = true +reth-network.workspace = true +reth-payload-builder.workspace = true +reth-tasks.workspace = true diff --git a/crates/node-api/src/node.rs b/crates/node-api/src/node.rs index d1ca897cb0cef..1304d77d183fe 100644 --- a/crates/node-api/src/node.rs +++ b/crates/node-api/src/node.rs @@ -2,7 +2,11 @@ use crate::{primitives::NodePrimitives, ConfigureEvm, EngineTypes}; use reth_db::database::Database; +use reth_network::NetworkHandle; +use reth_payload_builder::PayloadBuilderHandle; use reth_provider::FullProvider; +use reth_tasks::TaskExecutor; +use reth_transaction_pool::TransactionPool; use std::marker::PhantomData; /// The type that configures the essential types of an ethereum like node. @@ -72,3 +76,108 @@ where type DB = DB; type Provider = Provider; } + +/// Encapsulates all types and components of the node. +pub trait FullNodeComponents: FullNodeTypes + 'static { + /// The transaction pool of the node. + type Pool: TransactionPool; + + /// Returns the transaction pool of the node. + fn pool(&self) -> &Self::Pool; + + /// Returns the provider of the node. + fn provider(&self) -> &Self::Provider; + + /// Returns the handle to the network + fn network(&self) -> &NetworkHandle; + + /// Returns the handle to the payload builder service. + fn payload_builder(&self) -> &PayloadBuilderHandle; + + /// Returns the task executor. + fn task_executor(&self) -> &TaskExecutor; +} + +/// A type that encapsulates all the components of the node. +#[derive(Debug)] +pub struct FullNodeComponentsAdapter { + /// The EVM configuration of the node. + pub evm_config: Node::Evm, + /// The transaction pool of the node. + pub pool: Pool, + /// The network handle of the node. + pub network: NetworkHandle, + /// The provider of the node. + pub provider: Node::Provider, + /// The payload builder service handle of the node. + pub payload_builder: PayloadBuilderHandle, + /// The task executor of the node. + pub executor: TaskExecutor, +} + +impl FullNodeTypes for FullNodeComponentsAdapter +where + Node: FullNodeTypes, + Pool: TransactionPool + 'static, +{ + type DB = Node::DB; + type Provider = Node::Provider; +} + +impl NodeTypes for FullNodeComponentsAdapter +where + Node: FullNodeTypes, + Pool: TransactionPool + 'static, +{ + type Primitives = Node::Primitives; + type Engine = Node::Engine; + type Evm = Node::Evm; + + fn evm_config(&self) -> Self::Evm { + self.evm_config.clone() + } +} + +impl FullNodeComponents for FullNodeComponentsAdapter +where + Node: FullNodeTypes, + Pool: TransactionPool + 'static, +{ + type Pool = Pool; + + fn pool(&self) -> &Self::Pool { + &self.pool + } + + fn provider(&self) -> &Self::Provider { + &self.provider + } + + fn network(&self) -> &NetworkHandle { + &self.network + } + + fn payload_builder(&self) -> &PayloadBuilderHandle { + &self.payload_builder + } + + fn task_executor(&self) -> &TaskExecutor { + &self.executor + } +} + +impl Clone for FullNodeComponentsAdapter +where + Pool: Clone, +{ + fn clone(&self) -> Self { + Self { + evm_config: self.evm_config.clone(), + pool: self.pool.clone(), + network: self.network.clone(), + provider: self.provider.clone(), + payload_builder: self.payload_builder.clone(), + executor: self.executor.clone(), + } + } +} diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index a487c93d664d2..77f7c67afd01c 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -3,10 +3,7 @@ #![allow(clippy::type_complexity, missing_debug_implementations)] use crate::{ - components::{ - ComponentsBuilder, FullNodeComponents, FullNodeComponentsAdapter, NodeComponents, - NodeComponentsBuilder, PoolBuilder, - }, + components::{ComponentsBuilder, NodeComponents, NodeComponentsBuilder, PoolBuilder}, exex::BoxedLaunchExEx, hooks::NodeHooks, node::FullNode, @@ -33,7 +30,9 @@ use reth_db::{ use reth_exex::{ExExContext, ExExHandle, ExExManager}; use reth_interfaces::p2p::either::EitherDownloader; use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; -use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; +use reth_node_api::{ + FullNodeComponents, FullNodeComponentsAdapter, FullNodeTypes, FullNodeTypesAdapter, NodeTypes, +}; use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethRpcConfig, RethTransactionPoolConfig}, dirs::{ChainPath, DataDirPath, MaybePlatformPath}, diff --git a/crates/node-builder/src/components/builder.rs b/crates/node-builder/src/components/builder.rs index f145846035fc5..6abdca96c5c0c 100644 --- a/crates/node-builder/src/components/builder.rs +++ b/crates/node-builder/src/components/builder.rs @@ -1,11 +1,10 @@ //! A generic [NodeComponentsBuilder] use crate::{ - components::{ - NetworkBuilder, NodeComponents, NodeComponentsBuilder, PayloadServiceBuilder, PoolBuilder, - }, + components::{NetworkBuilder, NodeComponents, PayloadServiceBuilder, PoolBuilder}, BuilderContext, FullNodeTypes, }; +use reth_transaction_pool::TransactionPool; use std::marker::PhantomData; /// A generic, customizable [`NodeComponentsBuilder`]. @@ -162,3 +161,40 @@ impl Default for ComponentsBuilder<(), (), (), ()> { } } } + +/// A type that configures all the customizable components of the node and knows how to build them. +/// +/// Implementors of this trait are responsible for building all the components of the node: See +/// [NodeComponents]. +/// +/// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize +/// certain components of the node using the builder pattern and defaults, e.g. Ethereum and +/// Optimism. +pub trait NodeComponentsBuilder { + /// The transaction pool to use. + type Pool: TransactionPool + Unpin + 'static; + + /// Builds the components of the node. + fn build_components( + self, + context: &BuilderContext, + ) -> impl std::future::Future>> + Send; +} + +impl NodeComponentsBuilder for F +where + Node: FullNodeTypes, + F: FnOnce(&BuilderContext) -> Fut + Send, + Fut: std::future::Future>> + Send, + Pool: TransactionPool + Unpin + 'static, +{ + type Pool = Pool; + + fn build_components( + self, + ctx: &BuilderContext, + ) -> impl std::future::Future>> + Send + { + self(ctx) + } +} diff --git a/crates/node-builder/src/components/mod.rs b/crates/node-builder/src/components/mod.rs index c0d2bc8906d0c..4aa73f0fffcde 100644 --- a/crates/node-builder/src/components/mod.rs +++ b/crates/node-builder/src/components/mod.rs @@ -14,13 +14,11 @@ pub use payload::*; pub use pool::*; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; -pub use traits::*; mod builder; mod network; mod payload; mod pool; -mod traits; /// All the components of the node. /// diff --git a/crates/node-builder/src/components/traits.rs b/crates/node-builder/src/components/traits.rs deleted file mode 100644 index f73de22304518..0000000000000 --- a/crates/node-builder/src/components/traits.rs +++ /dev/null @@ -1,144 +0,0 @@ -//! Traits for the builder - -use crate::{components::NodeComponents, BuilderContext}; -use reth_network::NetworkHandle; -use reth_node_api::{FullNodeTypes, NodeTypes}; -use reth_payload_builder::PayloadBuilderHandle; -use reth_tasks::TaskExecutor; -use reth_transaction_pool::TransactionPool; - -/// Encapsulates all types and components of the node. -pub trait FullNodeComponents: FullNodeTypes + 'static { - /// The transaction pool of the node. - type Pool: TransactionPool; - - /// Returns the transaction pool of the node. - fn pool(&self) -> &Self::Pool; - - /// Returns the provider of the node. - fn provider(&self) -> &Self::Provider; - - /// Returns the handle to the network - fn network(&self) -> &NetworkHandle; - - /// Returns the handle to the payload builder service. - fn payload_builder(&self) -> &PayloadBuilderHandle; - - /// Returns the task executor. - fn task_executor(&self) -> &TaskExecutor; -} - -/// A type that encapsulates all the components of the node. -#[derive(Debug)] -pub struct FullNodeComponentsAdapter { - pub(crate) evm_config: Node::Evm, - pub(crate) pool: Pool, - pub(crate) network: NetworkHandle, - pub(crate) provider: Node::Provider, - pub(crate) payload_builder: PayloadBuilderHandle, - pub(crate) executor: TaskExecutor, -} - -impl FullNodeTypes for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type DB = Node::DB; - type Provider = Node::Provider; -} - -impl NodeTypes for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type Primitives = Node::Primitives; - type Engine = Node::Engine; - type Evm = Node::Evm; - - fn evm_config(&self) -> Self::Evm { - self.evm_config.clone() - } -} - -impl FullNodeComponents for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type Pool = Pool; - - fn pool(&self) -> &Self::Pool { - &self.pool - } - - fn provider(&self) -> &Self::Provider { - &self.provider - } - - fn network(&self) -> &NetworkHandle { - &self.network - } - - fn payload_builder(&self) -> &PayloadBuilderHandle { - &self.payload_builder - } - - fn task_executor(&self) -> &TaskExecutor { - &self.executor - } -} - -impl Clone for FullNodeComponentsAdapter -where - Pool: Clone, -{ - fn clone(&self) -> Self { - Self { - evm_config: self.evm_config.clone(), - pool: self.pool.clone(), - network: self.network.clone(), - provider: self.provider.clone(), - payload_builder: self.payload_builder.clone(), - executor: self.executor.clone(), - } - } -} - -/// A type that configures all the customizable components of the node and knows how to build them. -/// -/// Implementors of this trait are responsible for building all the components of the node: See -/// [NodeComponents]. -/// -/// The [ComponentsBuilder](crate::components::builder::ComponentsBuilder) is a generic -/// implementation of this trait that can be used to customize certain components of the node using -/// the builder pattern and defaults, e.g. Ethereum and Optimism. -pub trait NodeComponentsBuilder { - /// The transaction pool to use. - type Pool: TransactionPool + Unpin + 'static; - - /// Builds the components of the node. - fn build_components( - self, - context: &BuilderContext, - ) -> impl std::future::Future>> + Send; -} - -impl NodeComponentsBuilder for F -where - Node: FullNodeTypes, - F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: std::future::Future>> + Send, - Pool: TransactionPool + Unpin + 'static, -{ - type Pool = Pool; - - fn build_components( - self, - ctx: &BuilderContext, - ) -> impl std::future::Future>> + Send - { - self(ctx) - } -} diff --git a/crates/node-builder/src/handle.rs b/crates/node-builder/src/handle.rs index c5174632ece75..cbdce0c8b59fb 100644 --- a/crates/node-builder/src/handle.rs +++ b/crates/node-builder/src/handle.rs @@ -1,4 +1,5 @@ -use crate::{components::FullNodeComponents, node::FullNode}; +use crate::node::FullNode; +use reth_node_api::FullNodeComponents; use reth_node_core::exit::NodeExitFuture; use std::fmt; diff --git a/crates/node-builder/src/hooks.rs b/crates/node-builder/src/hooks.rs index 4233e9c73f114..9d2127f5a5822 100644 --- a/crates/node-builder/src/hooks.rs +++ b/crates/node-builder/src/hooks.rs @@ -1,4 +1,5 @@ -use crate::{components::FullNodeComponents, node::FullNode}; +use crate::node::FullNode; +use reth_node_api::FullNodeComponents; use std::fmt; /// Container for all the configurable hook functions. diff --git a/crates/node-builder/src/node.rs b/crates/node-builder/src/node.rs index c17914002150f..766bae14fe88c 100644 --- a/crates/node-builder/src/node.rs +++ b/crates/node-builder/src/node.rs @@ -1,8 +1,9 @@ use crate::{ - components::{ComponentsBuilder, FullNodeComponents}, + components::ComponentsBuilder, rpc::{RethRpcServerHandles, RpcRegistry}, }; use reth_network::NetworkHandle; +use reth_node_api::FullNodeComponents; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, diff --git a/crates/node-builder/src/rpc.rs b/crates/node-builder/src/rpc.rs index 671d96ad7ceb2..d6e2eb0f239d6 100644 --- a/crates/node-builder/src/rpc.rs +++ b/crates/node-builder/src/rpc.rs @@ -1,8 +1,8 @@ //! Builder support for rpc components. -use crate::components::FullNodeComponents; use futures::TryFutureExt; use reth_network::NetworkHandle; +use reth_node_api::FullNodeComponents; use reth_node_core::{ cli::config::RethRpcConfig, node_config::NodeConfig, diff --git a/crates/node-e2e-tests/tests/it/dev.rs b/crates/node-e2e-tests/tests/it/dev.rs index ef579b1a71f4c..b096bda5aab0b 100644 --- a/crates/node-e2e-tests/tests/it/dev.rs +++ b/crates/node-e2e-tests/tests/it/dev.rs @@ -1,6 +1,7 @@ use futures_util::StreamExt; use reth::{ - builder::{components::FullNodeComponents, FullNode, NodeBuilder, NodeHandle}, + api::FullNodeComponents, + builder::{FullNode, NodeBuilder, NodeHandle}, providers::CanonStateSubscriptions, rpc::eth::EthTransactions, tasks::TaskManager, @@ -55,7 +56,7 @@ async fn assert_chain_advances(mut node: FullNode Arc { let custom_genesis = r#" { - + "nonce": "0x42", "timestamp": "0x0", "extraData": "0x5343", diff --git a/crates/node-ethereum/Cargo.toml b/crates/node-ethereum/Cargo.toml index d5a7c6a829056..4380e57377be1 100644 --- a/crates/node-ethereum/Cargo.toml +++ b/crates/node-ethereum/Cargo.toml @@ -29,4 +29,6 @@ eyre.workspace = true [dev-dependencies] reth-db.workspace = true reth-exex.workspace = true +reth-node-api.workspace = true + futures.workspace = true diff --git a/crates/node-ethereum/tests/it/builder.rs b/crates/node-ethereum/tests/it/builder.rs index 1998d9bade28b..7cfc0d705b03b 100644 --- a/crates/node-ethereum/tests/it/builder.rs +++ b/crates/node-ethereum/tests/it/builder.rs @@ -1,7 +1,8 @@ //! Node builder setup tests. use reth_db::test_utils::create_test_rw_db; -use reth_node_builder::{components::FullNodeComponents, NodeBuilder, NodeConfig}; +use reth_node_api::FullNodeComponents; +use reth_node_builder::{NodeBuilder, NodeConfig}; use reth_node_ethereum::node::EthereumNode; #[test] diff --git a/crates/node-optimism/tests/it/builder.rs b/crates/node-optimism/tests/it/builder.rs index 01bed6b9e17e2..64f96bd2d96c1 100644 --- a/crates/node-optimism/tests/it/builder.rs +++ b/crates/node-optimism/tests/it/builder.rs @@ -1,7 +1,8 @@ //! Node builder setup tests. use reth_db::test_utils::create_test_rw_db; -use reth_node_builder::{components::FullNodeComponents, NodeBuilder, NodeConfig}; +use reth_node_api::FullNodeComponents; +use reth_node_builder::{NodeBuilder, NodeConfig}; use reth_node_optimism::node::OptimismNode; #[test] From 1edd9d1e4369a138fb3559d4cc11c6593507390f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 12 Apr 2024 16:49:00 +0200 Subject: [PATCH 136/700] fix(test): add missing dep (#7595) --- crates/net/downloaders/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 8f1d5612b1b0e..83aadb85326f2 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -45,6 +45,7 @@ itertools.workspace = true [dev-dependencies] reth-db = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } +reth-provider = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true assert_matches.workspace = true From d950bce3f5a5ec3e619bcf4115cddebc892a5126 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Fri, 12 Apr 2024 17:11:14 +0200 Subject: [PATCH 137/700] refactor: use async fns in minimal example (#7600) --- Cargo.toml | 13 +++++-- examples/exex/minimal/src/main.rs | 65 +++++++++++++------------------ 2 files changed, 38 insertions(+), 40 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 98b18dd6d80b3..df9416813f9d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -260,8 +260,13 @@ reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } # revm -revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } -revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } +revm = { version = "8.0.0", features = [ + "std", + "secp256k1", +], default-features = false } +revm-primitives = { version = "3.1.0", features = [ + "std", +], default-features = false } revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "21f8f3d" } # eth @@ -276,7 +281,9 @@ alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "987b alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "987b393", default-features = false, features = ["reqwest"] } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "987b393", default-features = false, features = [ + "reqwest", +] } alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } diff --git a/examples/exex/minimal/src/main.rs b/examples/exex/minimal/src/main.rs index 3cc8c6b06fc62..410a4a4ea5766 100644 --- a/examples/exex/minimal/src/main.rs +++ b/examples/exex/minimal/src/main.rs @@ -1,55 +1,46 @@ -use std::{ - pin::Pin, - task::{ready, Context, Poll}, -}; - use futures::Future; use reth::builder::FullNodeTypes; -use reth_exex::{ExExContext, ExExEvent}; +use reth_exex::ExExContext; use reth_node_ethereum::EthereumNode; use reth_provider::CanonStateNotification; -/// A minimal example of an ExEx that simply prints out commit and reorg notifications. -struct MinimalExEx { +/// The initialization logic of the ExEx is just an async function. +/// +/// During initialization you can wait for resources you need to be up for the ExEx to function, +/// like a database connection. +async fn exex_init( ctx: ExExContext, +) -> eyre::Result>> { + Ok(exex(ctx)) } -impl Future for MinimalExEx { - type Output = eyre::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - // Process all new chain state notifications until there are no more - while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) { - // Process one notification - match ¬ification { - CanonStateNotification::Commit { new } => { - println!("Received commit: {:?}", new.first().number..=new.tip().number); - } - CanonStateNotification::Reorg { old, new } => { - println!( - "Received reorg: {:?} -> {:?}", - old.first().number..=old.tip().number, - new.first().number..=new.tip().number - ); - } - }; - - // Send a finished height event, signaling the node that we don't need any blocks below - // this height anymore - this.ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; - } - - Poll::Pending +/// An ExEx is just a future, which means you can implement all of it in an async function! +/// +/// This ExEx just prints out whenever a state transition happens, either a new chain segment being +/// added, or a chain segment being re-orged. +async fn exex(mut ctx: ExExContext) -> eyre::Result<()> { + while let Some(notification) = ctx.notifications.recv().await { + match ¬ification { + CanonStateNotification::Commit { new } => { + println!("Received commit: {:?}", new.first().number..=new.tip().number); + } + CanonStateNotification::Reorg { old, new } => { + println!( + "Received reorg: {:?} -> {:?}", + old.first().number..=old.tip().number, + new.first().number..=new.tip().number + ); + } + }; } + Ok(()) } fn main() -> eyre::Result<()> { reth::cli::Cli::parse_args().run(|builder, _| async move { let handle = builder .node(EthereumNode::default()) - .install_exex("Minimal", move |ctx| async { Ok(MinimalExEx { ctx }) }) + .install_exex("Minimal", exex_init) .launch() .await?; From bc4119b3890c6c71ebe951cd915c4d99fdbe2bff Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 12 Apr 2024 17:33:05 +0200 Subject: [PATCH 138/700] fix(cli): import cmd logs (#7592) --- bin/reth/src/commands/import.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 032a22dd4e96d..578b1ff8eb670 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -123,7 +123,7 @@ impl ImportCommand { // override the tip let tip = file_client.tip().expect("file client has no tip"); - info!(target: "reth::cli", "Chain file imported"); + info!(target: "reth::cli", "Chain file read"); let (mut pipeline, events) = self .build_import_pipeline( @@ -162,7 +162,7 @@ impl ImportCommand { _ = tokio::signal::ctrl_c() => {}, } - info!(target: "reth::cli", "Finishing up"); + info!(target: "reth::cli", "Chain file imported"); Ok(()) } From d970e51c88ebb521ebb71fff88e38cda16da212d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 12 Apr 2024 17:33:51 +0200 Subject: [PATCH 139/700] fix(cli): bug fix import cmd (#7602) --- bin/reth/src/commands/import.rs | 17 ++++++++++++--- crates/net/downloaders/src/file_client.rs | 25 ++++++++++++++++++++++- 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 578b1ff8eb670..9f6161a2cf316 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -18,7 +18,13 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; -use reth_interfaces::consensus::Consensus; +use reth_interfaces::{ + consensus::Consensus, + p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, + }, +}; use reth_node_core::{events::node::NodeEvent, init::init_genesis}; use reth_node_ethereum::EthEvmConfig; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256, OP_RETH_MAINNET_BELOW_BEDROCK}; @@ -183,13 +189,18 @@ impl ImportCommand { eyre::bail!("unable to import non canonical blocks"); } - let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) .build(file_client.clone(), consensus.clone()) .into_task(); + header_downloader.update_local_head(file_client.tip_header().unwrap()); + header_downloader.update_sync_target(SyncTarget::Tip(file_client.start().unwrap())); - let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) + let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) .build(file_client.clone(), consensus.clone(), provider_factory.clone()) .into_task(); + body_downloader + .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) + .expect("failed to set download range"); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); let factory = diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 1f09ad787b8fd..9ebde1b70513b 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -8,7 +8,8 @@ use reth_interfaces::p2p::{ priority::Priority, }; use reth_primitives::{ - BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header, HeadersDirection, PeerId, B256, + BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header, HeadersDirection, PeerId, + SealedHeader, B256, }; use std::{collections::HashMap, path::Path}; use thiserror::Error; @@ -119,11 +120,33 @@ impl FileClient { self.headers.get(&((self.headers.len() - 1) as u64)).map(|h| h.hash_slow()) } + /// Get the start hash of the chain. + pub fn start(&self) -> Option { + self.headers.get(&self.min_block()?).map(|h| h.hash_slow()) + } + /// Returns the highest block number of this client has or `None` if empty pub fn max_block(&self) -> Option { self.headers.keys().max().copied() } + /// Returns the lowest block number of this client has or `None` if empty + pub fn min_block(&self) -> Option { + self.headers.keys().min().copied() + } + + /// Clones and returns the highest header of this client has or `None` if empty. Seals header + /// before returning. + pub fn tip_header(&self) -> Option { + self.headers.get(&self.max_block()?).map(|h| h.clone().seal_slow()) + } + + /// Clones and returns the lowest header of this client has or `None` if empty. Seals header + /// before returning. + pub fn start_header(&self) -> Option { + self.headers.get(&self.min_block()?).map(|h| h.clone().seal_slow()) + } + /// Returns true if all blocks are canonical (no gaps) pub fn has_canonical_blocks(&self) -> bool { if self.headers.is_empty() { From aa1fbfcba63a89d799faa6a1ea1cc41fdde858e7 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Fri, 12 Apr 2024 17:49:50 +0200 Subject: [PATCH 140/700] refactor(exex): use async fns in op bridge example (#7601) --- examples/exex/op-bridge/src/main.rs | 217 ++++++++++++++-------------- 1 file changed, 105 insertions(+), 112 deletions(-) diff --git a/examples/exex/op-bridge/src/main.rs b/examples/exex/op-bridge/src/main.rs index 814ffce689d97..84f2d47b134d3 100644 --- a/examples/exex/op-bridge/src/main.rs +++ b/examples/exex/op-bridge/src/main.rs @@ -1,8 +1,3 @@ -use std::{ - pin::Pin, - task::{ready, Context, Poll}, -}; - use alloy_sol_types::{sol, SolEventInterface}; use futures::Future; use reth::builder::FullNodeTypes; @@ -16,18 +11,23 @@ use rusqlite::Connection; sol!(L1StandardBridge, "l1_standard_bridge_abi.json"); use crate::L1StandardBridge::{ETHBridgeFinalized, ETHBridgeInitiated, L1StandardBridgeEvents}; -/// An example of ExEx that listens to ETH bridging events from OP Stack chains -/// and stores deposits and withdrawals in a SQLite database. -struct OPBridgeExEx { +/// Initializes the ExEx. +/// +/// Opens up a SQLite database and creates the tables (if they don't exist). +async fn init( ctx: ExExContext, - connection: Connection, + mut connection: Connection, +) -> eyre::Result>> { + create_tables(&mut connection)?; + + Ok(op_bridge_exex(ctx, connection)) } -impl OPBridgeExEx { - fn new(ctx: ExExContext, connection: Connection) -> eyre::Result { - // Create deposits and withdrawals tables - connection.execute( - r#" +/// Create SQLite tables if they do not exist. +fn create_tables(connection: &mut Connection) -> rusqlite::Result<()> { + // Create deposits and withdrawals tables + connection.execute( + r#" CREATE TABLE IF NOT EXISTS deposits ( id INTEGER PRIMARY KEY, block_number INTEGER NOT NULL, @@ -38,10 +38,10 @@ impl OPBridgeExEx { amount TEXT NOT NULL ); "#, - (), - )?; - connection.execute( - r#" + (), + )?; + connection.execute( + r#" CREATE TABLE IF NOT EXISTS withdrawals ( id INTEGER PRIMARY KEY, block_number INTEGER NOT NULL, @@ -52,23 +52,23 @@ impl OPBridgeExEx { amount TEXT NOT NULL ); "#, - (), - )?; + (), + )?; - // Create a bridge contract addresses table and insert known ones with their respective - // names - connection.execute( - r#" + // Create a bridge contract addresses table and insert known ones with their respective + // names + connection.execute( + r#" CREATE TABLE IF NOT EXISTS contracts ( id INTEGER PRIMARY KEY, address TEXT NOT NULL UNIQUE, name TEXT NOT NULL ); "#, - (), - )?; - connection.execute( - r#" + (), + )?; + connection.execute( + r#" INSERT OR IGNORE INTO contracts (address, name) VALUES ('0x3154Cf16ccdb4C6d922629664174b904d80F2C35', 'Base'), @@ -78,76 +78,70 @@ impl OPBridgeExEx { ('0x735aDBbE72226BD52e818E7181953f42E3b0FF21', 'Mode'), ('0x3B95bC951EE0f553ba487327278cAc44f29715E5', 'Manta'); "#, - (), - )?; + (), + )?; - info!("Initialized database tables"); + info!("Initialized database tables"); - Ok(Self { ctx, connection }) - } + Ok(()) } -impl Future for OPBridgeExEx { - type Output = eyre::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - // Process all new chain state notifications until there are no more - while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) { - // If there was a reorg, delete all deposits and withdrawals that were reverted - if let Some(reverted_chain) = notification.reverted() { - let events = decode_chain_into_events(&reverted_chain); - - let mut deposits = 0; - let mut withdrawals = 0; - - for (_, tx, _, event) in events { - match event { - // L1 -> L2 deposit - L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { - .. - }) => { - let deleted = this.connection.execute( - "DELETE FROM deposits WHERE tx_hash = ?;", - (tx.hash().to_string(),), - )?; - deposits += deleted; - } - // L2 -> L1 withdrawal - L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { - .. - }) => { - let deleted = this.connection.execute( - "DELETE FROM withdrawals WHERE tx_hash = ?;", - (tx.hash().to_string(),), - )?; - withdrawals += deleted; - } - _ => continue, - }; - } - - info!(block_range = ?reverted_chain.range(), %deposits, %withdrawals, "Reverted chain events"); - } - - // Insert all new deposits and withdrawals - let committed_chain = notification.committed(); - let events = decode_chain_into_events(&committed_chain); +/// An example of ExEx that listens to ETH bridging events from OP Stack chains +/// and stores deposits and withdrawals in a SQLite database. +async fn op_bridge_exex( + mut ctx: ExExContext, + connection: Connection, +) -> eyre::Result<()> { + // Process all new chain state notifications + while let Some(notification) = ctx.notifications.recv().await { + if let Some(reverted_chain) = notification.reverted() { + let events = decode_chain_into_events(&reverted_chain); let mut deposits = 0; let mut withdrawals = 0; - for (block, tx, log, event) in events { + for (_, tx, _, event) in events { match event { // L1 -> L2 deposit - L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { - amount, - from, - to, - .. - }) => { - let inserted = this.connection.execute( + L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { .. }) => { + let deleted = connection.execute( + "DELETE FROM deposits WHERE tx_hash = ?;", + (tx.hash().to_string(),), + )?; + deposits += deleted; + } + // L2 -> L1 withdrawal + L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { .. }) => { + let deleted = connection.execute( + "DELETE FROM withdrawals WHERE tx_hash = ?;", + (tx.hash().to_string(),), + )?; + withdrawals += deleted; + } + _ => continue, + } + } + + info!(block_range = ?reverted_chain.range(), %deposits, %withdrawals, "Reverted chain events"); + } + + // Insert all new deposits and withdrawals + let committed_chain = notification.committed(); + let events = decode_chain_into_events(&committed_chain); + + let mut deposits = 0; + let mut withdrawals = 0; + + for (block, tx, log, event) in events { + match event { + // L1 -> L2 deposit + L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { + amount, + from, + to, + .. + }) => { + let inserted = connection.execute( r#" INSERT INTO deposits (block_number, tx_hash, contract_address, "from", "to", amount) VALUES (?, ?, ?, ?, ?, ?) @@ -161,16 +155,16 @@ impl Future for OPBridgeExEx { amount.to_string(), ), )?; - deposits += inserted; - } - // L2 -> L1 withdrawal - L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { - amount, - from, - to, - .. - }) => { - let inserted = this.connection.execute( + deposits += inserted; + } + // L2 -> L1 withdrawal + L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { + amount, + from, + to, + .. + }) => { + let inserted = connection.execute( r#" INSERT INTO withdrawals (block_number, tx_hash, contract_address, "from", "to", amount) VALUES (?, ?, ?, ?, ?, ?) @@ -184,21 +178,20 @@ impl Future for OPBridgeExEx { amount.to_string(), ), )?; - withdrawals += inserted; - } - _ => continue, - }; - } - - info!(block_range = ?committed_chain.range(), %deposits, %withdrawals, "Committed chain events"); - - // Send a finished height event, signaling the node that we don't need any blocks below - // this height anymore - this.ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; + withdrawals += inserted; + } + _ => continue, + }; } - Poll::Pending + info!(block_range = ?committed_chain.range(), %deposits, %withdrawals, "Committed chain events"); + + // Send a finished height event, signaling the node that we don't need any blocks below + // this height anymore + ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; } + + Ok(()) } /// Decode chain of blocks into a flattened list of receipt logs, and filter only @@ -232,9 +225,9 @@ fn main() -> eyre::Result<()> { reth::cli::Cli::parse_args().run(|builder, _| async move { let handle = builder .node(EthereumNode::default()) - .install_exex("OPBridge", move |ctx| async { + .install_exex("OPBridge", |ctx| async move { let connection = Connection::open("op_bridge.db")?; - OPBridgeExEx::new(ctx, connection) + init(ctx, connection).await }) .launch() .await?; From 7918759b2f7e51428fc9173faf0d1705fb1a7232 Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Fri, 12 Apr 2024 23:57:00 +0800 Subject: [PATCH 141/700] feat: Implement BlockReader::block_with_senders_range (#7402) --- .../provider/src/providers/database/mod.rs | 7 + .../src/providers/database/provider.rs | 165 ++++++++++++------ crates/storage/provider/src/providers/mod.rs | 7 + .../src/providers/static_file/manager.rs | 7 + .../storage/provider/src/test_utils/mock.rs | 7 + .../storage/provider/src/test_utils/noop.rs | 11 +- crates/storage/provider/src/traits/block.rs | 9 + 7 files changed, 159 insertions(+), 54 deletions(-) diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 5d9d8ae6b25ef..a2bf883d5b177 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -331,6 +331,13 @@ impl BlockReader for ProviderFactory { fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.provider()?.block_range(range) } + + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.provider()?.block_with_senders_range(range) + } } impl TransactionsProvider for ProviderFactory { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 98e3055e4577c..01c03e9564d5b 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1286,6 +1286,67 @@ impl BlockNumReader for DatabaseProvider { } } +impl DatabaseProvider { + fn process_block_range( + &self, + range: RangeInclusive, + mut assemble_block: F, + ) -> ProviderResult> + where + F: FnMut(Range, Header, Vec
, Option) -> ProviderResult, + { + if range.is_empty() { + return Ok(Vec::new()) + } + + let len = range.end().saturating_sub(*range.start()) as usize; + let mut blocks = Vec::with_capacity(len); + + let headers = self.headers_range(range)?; + let mut ommers_cursor = self.tx.cursor_read::()?; + let mut withdrawals_cursor = self.tx.cursor_read::()?; + let mut block_body_cursor = self.tx.cursor_read::()?; + + for header in headers { + // If the body indices are not found, this means that the transactions either do + // not exist in the database yet, or they do exit but are + // not indexed. If they exist but are not indexed, we don't + // have enough information to return the block anyways, so + // we skip the block. + if let Some((_, block_body_indices)) = block_body_cursor.seek_exact(header.number)? { + let tx_range = block_body_indices.tx_num_range(); + + // If we are past shanghai, then all blocks should have a withdrawal list, + // even if empty + let withdrawals = + if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { + Some( + withdrawals_cursor + .seek_exact(header.number)? + .map(|(_, w)| w.withdrawals) + .unwrap_or_default(), + ) + } else { + None + }; + let ommers = + if self.chain_spec.final_paris_total_difficulty(header.number).is_some() { + Vec::new() + } else { + ommers_cursor + .seek_exact(header.number)? + .map(|(_, o)| o.ommers) + .unwrap_or_default() + }; + if let Ok(b) = assemble_block(tx_range, header, ommers, withdrawals) { + blocks.push(b); + } + } + } + Ok(blocks) + } +} + impl BlockReader for DatabaseProvider { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_database() { @@ -1409,63 +1470,63 @@ impl BlockReader for DatabaseProvider { } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - if range.is_empty() { - return Ok(Vec::new()) - } - - let len = range.end().saturating_sub(*range.start()) as usize; - let mut blocks = Vec::with_capacity(len); + let mut tx_cursor = self.tx.cursor_read::()?; + self.process_block_range(range, |tx_range, header, ommers, withdrawals| { + let body = if tx_range.is_empty() { + Vec::new() + } else { + self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? + .into_iter() + .map(Into::into) + .collect() + }; + Ok(Block { header, body, ommers, withdrawals }) + }) + } - let headers = self.headers_range(range)?; - let mut ommers_cursor = self.tx.cursor_read::()?; - let mut withdrawals_cursor = self.tx.cursor_read::()?; - let mut block_body_cursor = self.tx.cursor_read::()?; + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { let mut tx_cursor = self.tx.cursor_read::()?; + let mut senders_cursor = self.tx.cursor_read::()?; - for header in headers { - // If the body indices are not found, this means that the transactions either do - // not exist in the database yet, or they do exit but are - // not indexed. If they exist but are not indexed, we don't - // have enough information to return the block anyways, so - // we skip the block. - if let Some((_, block_body_indices)) = block_body_cursor.seek_exact(header.number)? { - let tx_range = block_body_indices.tx_num_range(); - let body = if tx_range.is_empty() { - Vec::new() - } else { - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect() - }; + self.process_block_range(range, |tx_range, header, ommers, withdrawals| { + let (body, senders) = if tx_range.is_empty() { + (Vec::new(), Vec::new()) + } else { + let body = self + .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? + .into_iter() + .map(Into::into) + .collect::>(); + // fetch senders from the senders table + let known_senders = + senders_cursor + .walk_range(tx_range.clone())? + .collect::, _>>()?; + + let mut senders = Vec::with_capacity(body.len()); + for (tx_num, tx) in tx_range.zip(body.iter()) { + match known_senders.get(&tx_num) { + None => { + // recover the sender from the transaction if not found + let sender = tx + .recover_signer_unchecked() + .ok_or_else(|| ProviderError::SenderRecoveryError)?; + senders.push(sender); + } + Some(sender) => senders.push(*sender), + } + } - // If we are past shanghai, then all blocks should have a withdrawal list, - // even if empty - let withdrawals = - if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { - Some( - withdrawals_cursor - .seek_exact(header.number)? - .map(|(_, w)| w.withdrawals) - .unwrap_or_default(), - ) - } else { - None - }; - let ommers = - if self.chain_spec.final_paris_total_difficulty(header.number).is_some() { - Vec::new() - } else { - ommers_cursor - .seek_exact(header.number)? - .map(|(_, o)| o.ommers) - .unwrap_or_default() - }; + (body, senders) + }; - blocks.push(Block { header, body, ommers, withdrawals }); - } - } - Ok(blocks) + Block { header, body, ommers, withdrawals } + .try_with_senders_unchecked(senders) + .map_err(|_| ProviderError::SenderRecoveryError) + }) } } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 4cd7fb6ac5952..73ea827d1d0c6 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -311,6 +311,13 @@ where fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.database.block_range(range) } + + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.database.block_with_senders_range(range) + } } impl TransactionsProvider for BlockchainProvider diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e669275d9ce47..7814a709768ce 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1103,6 +1103,13 @@ impl BlockReader for StaticFileProvider { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } + + fn block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } } impl WithdrawalsProvider for StaticFileProvider { diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index aa756f19513c9..db490bd37ca74 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -483,6 +483,13 @@ impl BlockReader for MockEthProvider { Ok(blocks) } + + fn block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(vec![]) + } } impl BlockReaderIdExt for MockEthProvider { diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 13a2f3b4018f5..626bd535115e9 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -12,8 +12,8 @@ use reth_interfaces::provider::ProviderResult; use reth_primitives::{ stage::{StageCheckpoint, StageId}, trie::AccountProof, - Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, Bytecode, - ChainInfo, ChainSpec, Header, PruneCheckpoint, PruneSegment, Receipt, SealedBlock, + Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, BlockWithSenders, + Bytecode, ChainInfo, ChainSpec, Header, PruneCheckpoint, PruneSegment, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageKey, StorageValue, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, MAINNET, U256, @@ -116,6 +116,13 @@ impl BlockReader for NoopProvider { fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { Ok(vec![]) } + + fn block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(vec![]) + } } impl BlockReaderIdExt for NoopProvider { diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index f7af38166e25d..b8ae1bdb145b9 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -133,6 +133,15 @@ pub trait BlockReader: /// /// Note: returns only available blocks fn block_range(&self, range: RangeInclusive) -> ProviderResult>; + + /// retrieves a range of blocks from the database, along with the senders of each + /// transaction in the blocks. + /// + /// The `transaction_kind` parameter determines whether to return its hash + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>; } /// Trait extension for `BlockReader`, for types that implement `BlockId` conversion. From 2dcc01210c2002a5bf85f91a09721a45b6c20bb3 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 12 Apr 2024 17:34:18 +0100 Subject: [PATCH 142/700] feat(exex): add `Pool` to `ExExContext` (#7573) Co-authored-by: Matthias Seitz --- crates/exex/src/context.rs | 6 ++++-- crates/node-builder/src/builder.rs | 1 + crates/node-builder/src/exex.rs | 10 +++++----- crates/node-ethereum/tests/it/exex.rs | 10 +++++++--- examples/exex/minimal/src/main.rs | 6 +++--- examples/exex/op-bridge/src/main.rs | 6 +++--- 6 files changed, 23 insertions(+), 16 deletions(-) diff --git a/crates/exex/src/context.rs b/crates/exex/src/context.rs index 69b48f14e9347..619679e85fae7 100644 --- a/crates/exex/src/context.rs +++ b/crates/exex/src/context.rs @@ -1,4 +1,4 @@ -use reth_node_api::FullNodeTypes; +use reth_node_api::FullNodeComponents; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, @@ -12,7 +12,7 @@ use crate::ExExEvent; /// Captures the context that an ExEx has access to. #[derive(Debug)] -pub struct ExExContext { +pub struct ExExContext { /// The current head of the blockchain at launch. pub head: Head, /// The configured provider to interact with the blockchain. @@ -25,6 +25,8 @@ pub struct ExExContext { pub config: NodeConfig, /// The loaded node config pub reth_config: reth_config::Config, + /// The transaction pool of the node. + pub pool: Node::Pool, /// Channel used to send [`ExExEvent`]s to the rest of the node. /// /// # Important diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 77f7c67afd01c..2c387161fb0d1 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -610,6 +610,7 @@ where data_dir: data_dir.clone(), config: config.clone(), reth_config: reth_config.clone(), + pool: transaction_pool.clone(), events, notifications, }; diff --git a/crates/node-builder/src/exex.rs b/crates/node-builder/src/exex.rs index ff2d0f84a3d18..ae9ace8cd4408 100644 --- a/crates/node-builder/src/exex.rs +++ b/crates/node-builder/src/exex.rs @@ -1,11 +1,11 @@ //! Types for launching execution extensions (ExEx). -use crate::FullNodeTypes; use futures::{future::BoxFuture, FutureExt}; use reth_exex::ExExContext; +use reth_node_api::FullNodeComponents; use std::future::Future; /// A trait for launching an ExEx. -trait LaunchExEx: Send { +trait LaunchExEx: Send { /// Launches the ExEx. /// /// The ExEx should be able to run independently and emit events on the channels provided in @@ -19,7 +19,7 @@ trait LaunchExEx: Send { type BoxExEx = BoxFuture<'static, eyre::Result<()>>; /// A version of [LaunchExEx] that returns a boxed future. Makes the trait object-safe. -pub(crate) trait BoxedLaunchExEx: Send { +pub(crate) trait BoxedLaunchExEx: Send { fn launch(self: Box, ctx: ExExContext) -> BoxFuture<'static, eyre::Result>; } @@ -30,7 +30,7 @@ pub(crate) trait BoxedLaunchExEx: Send { impl BoxedLaunchExEx for E where E: LaunchExEx + Send + 'static, - Node: FullNodeTypes, + Node: FullNodeComponents, { fn launch( self: Box, @@ -48,7 +48,7 @@ where /// resolving to an ExEx. impl LaunchExEx for F where - Node: FullNodeTypes, + Node: FullNodeComponents, F: FnOnce(ExExContext) -> Fut + Send, Fut: Future> + Send, E: Future> + Send, diff --git a/crates/node-ethereum/tests/it/exex.rs b/crates/node-ethereum/tests/it/exex.rs index b1f7a92f762cf..bbab6d9dc744e 100644 --- a/crates/node-ethereum/tests/it/exex.rs +++ b/crates/node-ethereum/tests/it/exex.rs @@ -1,7 +1,8 @@ use futures::future; use reth_db::test_utils::create_test_rw_db; use reth_exex::ExExContext; -use reth_node_builder::{FullNodeTypes, NodeBuilder, NodeConfig}; +use reth_node_api::FullNodeComponents; +use reth_node_builder::{NodeBuilder, NodeConfig}; use reth_node_ethereum::EthereumNode; use std::{ future::Future, @@ -9,11 +10,14 @@ use std::{ task::{Context, Poll}, }; -struct DummyExEx { +struct DummyExEx { _ctx: ExExContext, } -impl Future for DummyExEx { +impl Future for DummyExEx +where + Node: FullNodeComponents, +{ type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll { diff --git a/examples/exex/minimal/src/main.rs b/examples/exex/minimal/src/main.rs index 410a4a4ea5766..ff71a71aca409 100644 --- a/examples/exex/minimal/src/main.rs +++ b/examples/exex/minimal/src/main.rs @@ -1,6 +1,6 @@ use futures::Future; -use reth::builder::FullNodeTypes; use reth_exex::ExExContext; +use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; use reth_provider::CanonStateNotification; @@ -8,7 +8,7 @@ use reth_provider::CanonStateNotification; /// /// During initialization you can wait for resources you need to be up for the ExEx to function, /// like a database connection. -async fn exex_init( +async fn exex_init( ctx: ExExContext, ) -> eyre::Result>> { Ok(exex(ctx)) @@ -18,7 +18,7 @@ async fn exex_init( /// /// This ExEx just prints out whenever a state transition happens, either a new chain segment being /// added, or a chain segment being re-orged. -async fn exex(mut ctx: ExExContext) -> eyre::Result<()> { +async fn exex(mut ctx: ExExContext) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.recv().await { match ¬ification { CanonStateNotification::Commit { new } => { diff --git a/examples/exex/op-bridge/src/main.rs b/examples/exex/op-bridge/src/main.rs index 84f2d47b134d3..6f7dd6d60f500 100644 --- a/examples/exex/op-bridge/src/main.rs +++ b/examples/exex/op-bridge/src/main.rs @@ -1,7 +1,7 @@ use alloy_sol_types::{sol, SolEventInterface}; use futures::Future; -use reth::builder::FullNodeTypes; use reth_exex::{ExExContext, ExExEvent}; +use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; use reth_primitives::{Log, SealedBlockWithSenders, TransactionSigned}; use reth_provider::Chain; @@ -14,7 +14,7 @@ use crate::L1StandardBridge::{ETHBridgeFinalized, ETHBridgeInitiated, L1Standard /// Initializes the ExEx. /// /// Opens up a SQLite database and creates the tables (if they don't exist). -async fn init( +async fn init( ctx: ExExContext, mut connection: Connection, ) -> eyre::Result>> { @@ -88,7 +88,7 @@ fn create_tables(connection: &mut Connection) -> rusqlite::Result<()> { /// An example of ExEx that listens to ETH bridging events from OP Stack chains /// and stores deposits and withdrawals in a SQLite database. -async fn op_bridge_exex( +async fn op_bridge_exex( mut ctx: ExExContext, connection: Connection, ) -> eyre::Result<()> { From 234d258ce75de9be410ad0b4cb316caf5c3436c7 Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Sat, 13 Apr 2024 01:05:12 +0800 Subject: [PATCH 143/700] feat: impl TryFrom for Transaction (#7551) --- Cargo.lock | 35 ++++----- Cargo.toml | 24 +++--- crates/primitives/src/transaction/mod.rs | 97 ++++++++++++++++++++++++ 3 files changed, 127 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8ce28f68565ec..55deaa145fad7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-eips", "alloy-primitives", @@ -178,7 +178,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -196,7 +196,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-primitives", "alloy-serde", @@ -218,18 +218,19 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-primitives", "serde", "serde_json", "thiserror", + "tracing", ] [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -245,7 +246,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -287,7 +288,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -337,7 +338,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -357,7 +358,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -379,7 +380,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -397,7 +398,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -409,7 +410,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-primitives", "serde", @@ -419,7 +420,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-primitives", "async-trait", @@ -432,7 +433,7 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-consensus", "alloy-network", @@ -507,7 +508,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -525,7 +526,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=987b393#987b3936f78c067baedcf811620975d3a4a26443" +source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -7536,7 +7537,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=21f8f3d#21f8f3d266b05d1084e06f0c5331f2f1f4ed0905" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=413b892#413b892dd936d117c52d47ba07d195b09a7f1216" dependencies = [ "alloy-primitives", "alloy-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index df9416813f9d4..cc9fbe489cd09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -267,7 +267,7 @@ revm = { version = "8.0.0", features = [ revm-primitives = { version = "3.1.0", features = [ "std", ], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "21f8f3d" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "413b892" } # eth alloy-chains = "0.1.15" @@ -276,19 +276,19 @@ alloy-dyn-abi = "0.7.0" alloy-sol-types = "0.7.0" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "987b393", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "987b393" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } # misc aquamarine = "0.5" diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index a08f7775f2db7..2d3d664b2184c 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -2,6 +2,7 @@ use crate::compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}; use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, B256, U256}; +use alloy_eips::eip2718::Eip2718Error; use alloy_rlp::{ Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, }; @@ -10,6 +11,7 @@ use derive_more::{AsRef, Deref}; use once_cell::sync::Lazy; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; +use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::mem; @@ -612,6 +614,101 @@ impl From for Transaction { } } +impl TryFrom for Transaction { + type Error = ConversionError; + + fn try_from(tx: reth_rpc_types::Transaction) -> Result { + match tx.transaction_type { + None | Some(0) => { + // legacy + if tx.max_fee_per_gas.is_some() || tx.max_priority_fee_per_gas.is_some() { + return Err(ConversionError::Eip2718Error( + RlpError::Custom("EIP-1559 fields are present in a legacy transaction") + .into(), + )) + } + Ok(Transaction::Legacy(TxLegacy { + chain_id: tx.chain_id, + nonce: tx.nonce, + gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, + gas_limit: tx + .gas + .try_into() + .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + value: tx.value, + input: tx.input, + })) + } + Some(1u8) => { + // eip2930 + Ok(Transaction::Eip2930(TxEip2930 { + chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, + nonce: tx.nonce, + gas_limit: tx + .gas + .try_into() + .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + value: tx.value, + input: tx.input, + access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?.into(), + gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, + })) + } + Some(2u8) => { + // EIP-1559 + Ok(Transaction::Eip1559(TxEip1559 { + chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, + nonce: tx.nonce, + max_priority_fee_per_gas: tx + .max_priority_fee_per_gas + .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, + max_fee_per_gas: tx + .max_fee_per_gas + .ok_or(ConversionError::MissingMaxFeePerGas)?, + gas_limit: tx + .gas + .try_into() + .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + value: tx.value, + access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?.into(), + input: tx.input, + })) + } + Some(3u8) => { + // EIP-4844 + Ok(Transaction::Eip4844(TxEip4844 { + chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, + nonce: tx.nonce, + max_priority_fee_per_gas: tx + .max_priority_fee_per_gas + .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, + max_fee_per_gas: tx + .max_fee_per_gas + .ok_or(ConversionError::MissingMaxFeePerGas)?, + gas_limit: tx + .gas + .try_into() + .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + value: tx.value, + access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?.into(), + input: tx.input, + blob_versioned_hashes: tx + .blob_versioned_hashes + .ok_or(ConversionError::MissingBlobVersionedHashes)?, + max_fee_per_blob_gas: tx + .max_fee_per_blob_gas + .ok_or(ConversionError::MissingMaxFeePerBlobGas)?, + })) + } + Some(tx_type) => Err(Eip2718Error::UnexpectedType(tx_type).into()), + } + } +} + impl Compact for Transaction { // Serializes the TxType to the buffer if necessary, returning 2 bits of the type as an // identifier instead of the length. From 041e29347b2a01b9e971ebea988561f4dae13f30 Mon Sep 17 00:00:00 2001 From: cairo <101215230+cairoeth@users.noreply.github.com> Date: Fri, 12 Apr 2024 22:17:02 +0200 Subject: [PATCH 144/700] feat(rpc): add block timestamp to logs (#7606) --- crates/rpc/rpc/src/eth/filter.rs | 19 +++++++++++++------ crates/rpc/rpc/src/eth/logs_utils.rs | 3 ++- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 8ae7f6c55c5fe..b14c20451c7ec 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -349,11 +349,12 @@ where async fn logs_for_filter(&self, filter: Filter) -> Result, FilterError> { match filter.block_option { FilterBlockOption::AtBlockHash(block_hash) => { - // all matching logs in the block - let block_number = self + // for all matching logs in the block + // get the block header with the hash + let block = self .provider - .block_number_for_id(block_hash.into())? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + .header_by_hash_or_number(block_hash.into())? + .ok_or(ProviderError::HeaderNotFound(block_hash.into()))?; // we also need to ensure that the receipts are available and return an error if // not, in case the block hash been reorged @@ -369,9 +370,10 @@ where &mut all_logs, &self.provider, &filter, - (block_hash, block_number).into(), + (block_hash, block.number).into(), &receipts, false, + block.timestamp, )?; Ok(all_logs) @@ -440,7 +442,10 @@ where // only one block to check and it's the current best block which we can fetch directly // Note: In case of a reorg, the best block's hash might have changed, hence we only // return early of we were able to fetch the best block's receipts - if let Some(receipts) = self.eth_cache.get_receipts(chain_info.best_hash).await? { + // perf: we're fetching the best block here which is expected to be cached + if let Some((block, receipts)) = + self.eth_cache.get_block_and_receipts(chain_info.best_hash).await? + { logs_utils::append_matching_block_logs( &mut all_logs, &self.provider, @@ -448,6 +453,7 @@ where chain_info.into(), &receipts, false, + block.header.timestamp, )?; } return Ok(all_logs) @@ -487,6 +493,7 @@ where BlockNumHash::new(header.number, block_hash), &receipts, false, + header.timestamp, )?; // size check but only if range is multiple blocks, so we always return all diff --git a/crates/rpc/rpc/src/eth/logs_utils.rs b/crates/rpc/rpc/src/eth/logs_utils.rs index 6e58040f0b678..4a7a0b6ae07c4 100644 --- a/crates/rpc/rpc/src/eth/logs_utils.rs +++ b/crates/rpc/rpc/src/eth/logs_utils.rs @@ -49,6 +49,7 @@ pub(crate) fn append_matching_block_logs( block_num_hash: BlockNumHash, receipts: &[Receipt], removed: bool, + block_timestamp: u64, ) -> Result<(), FilterError> { // Tracks the index of a log in the entire block. let mut log_index: u64 = 0; @@ -97,7 +98,7 @@ pub(crate) fn append_matching_block_logs( transaction_index: Some(receipt_idx as u64), log_index: Some(log_index), removed, - block_timestamp: None, + block_timestamp: Some(block_timestamp), }; all_logs.push(log); } From a89add094f26431e6cf4516332aa0bb9c57f5c3e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 13 Apr 2024 08:33:21 +0200 Subject: [PATCH 145/700] chore(deps): bump alloy+evm inspectors (#7616) --- Cargo.lock | 34 +++++++++++----------- Cargo.toml | 24 +++++++-------- crates/rpc/rpc/src/eth/api/transactions.rs | 2 +- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55deaa145fad7..60f43e84daa32 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-eips", "alloy-primitives", @@ -178,7 +178,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -196,7 +196,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-primitives", "alloy-serde", @@ -218,7 +218,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-primitives", "serde", @@ -230,7 +230,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-consensus", "alloy-eips", @@ -246,7 +246,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -288,7 +288,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -338,7 +338,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -358,7 +358,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-consensus", "alloy-eips", @@ -380,7 +380,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-consensus", "alloy-eips", @@ -398,7 +398,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -410,7 +410,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-primitives", "serde", @@ -420,7 +420,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-primitives", "async-trait", @@ -433,7 +433,7 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-consensus", "alloy-network", @@ -508,7 +508,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -526,7 +526,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=7629f79#7629f79e3ffb6abd0be901a06deed2ab9f695e4e" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -7537,7 +7537,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=413b892#413b892dd936d117c52d47ba07d195b09a7f1216" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=f604dc4#f604dc4d4cd1f013c8bd488b2f28356a77fa2094" dependencies = [ "alloy-primitives", "alloy-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index cc9fbe489cd09..e096084155a4d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -267,7 +267,7 @@ revm = { version = "8.0.0", features = [ revm-primitives = { version = "3.1.0", features = [ "std", ], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "413b892" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "f604dc4" } # eth alloy-chains = "0.1.15" @@ -276,19 +276,19 @@ alloy-dyn-abi = "0.7.0" alloy-sol-types = "0.7.0" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "7629f79" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } # misc aquamarine = "0.5" diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 9860191c2b151..95bf1528212dc 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -1732,7 +1732,7 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( r#type: transaction.transaction.tx_type().into(), }, transaction_hash: meta.tx_hash, - transaction_index: meta.index, + transaction_index: Some(meta.index), block_hash: Some(meta.block_hash), block_number: Some(meta.block_number), from, From 3237ad2a8ca3e3810a69c3f9fd6c4e93af2f87c6 Mon Sep 17 00:00:00 2001 From: KallyDev Date: Sat, 13 Apr 2024 14:58:11 +0800 Subject: [PATCH 146/700] feat(rpc): add anvil and hardhat and ganache trait bindings (#7495) Co-authored-by: Matthias Seitz --- Cargo.lock | 13 ++- Cargo.toml | 1 + crates/rpc/rpc-api/src/anvil.rs | 167 ++++++++++++++++++++++++++++++ crates/rpc/rpc-api/src/ganache.rs | 75 ++++++++++++++ crates/rpc/rpc-api/src/hardhat.rs | 83 +++++++++++++++ crates/rpc/rpc-api/src/lib.rs | 6 ++ crates/rpc/rpc-types/Cargo.toml | 1 + crates/rpc/rpc-types/src/lib.rs | 4 + 8 files changed, 349 insertions(+), 1 deletion(-) create mode 100644 crates/rpc/rpc-api/src/anvil.rs create mode 100644 crates/rpc/rpc-api/src/ganache.rs create mode 100644 crates/rpc/rpc-api/src/hardhat.rs diff --git a/Cargo.lock b/Cargo.lock index 60f43e84daa32..e0c193c814a89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -377,6 +377,16 @@ dependencies = [ "thiserror", ] +[[package]] +name = "alloy-rpc-types-anvil" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" @@ -3452,7 +3462,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -7300,6 +7310,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types", + "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", "arbitrary", diff --git a/Cargo.toml b/Cargo.toml index e096084155a4d..7765c76920889 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -277,6 +277,7 @@ alloy-sol-types = "0.7.0" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } diff --git a/crates/rpc/rpc-api/src/anvil.rs b/crates/rpc/rpc-api/src/anvil.rs new file mode 100644 index 0000000000000..56416d094136f --- /dev/null +++ b/crates/rpc/rpc-api/src/anvil.rs @@ -0,0 +1,167 @@ +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + +use reth_primitives::{Address, Bytes, B256, U256}; +use reth_rpc_types::{ + anvil::{Forking, Metadata, MineOptions, NodeInfo}, + Block, +}; + +/// Anvil rpc interface. +/// https://book.getfoundry.sh/reference/anvil/#custom-methods +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "anvil"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "anvil"))] +pub trait AnvilApi { + /// Sends transactions impersonating specific account and contract addresses. + #[method(name = "impersonateAccount")] + async fn anvil_impersonate_account(&self, address: Address) -> RpcResult<()>; + + /// Stops impersonating an account if previously set with `anvil_impersonateAccount`. + #[method(name = "stopImpersonatingAccount")] + async fn anvil_stop_impersonating_account(&self, address: Address) -> RpcResult<()>; + + /// If set to true will make every account impersonated. + #[method(name = "autoImpersonateAccount")] + async fn anvil_auto_impersonate_account(&self, enabled: bool) -> RpcResult<()>; + + /// Returns `true` if auto mining is enabled, and `false`. + #[method(name = "getAutomine")] + async fn anvil_get_automine(&self) -> RpcResult; + + /// Mines a series of blocks. + #[method(name = "mine")] + async fn anvil_mine(&self, blocks: Option, interval: Option) -> RpcResult<()>; + + /// Enables or disables, based on the single boolean argument, the automatic mining of new + /// blocks with each new transaction submitted to the network. + #[method(name = "setAutomine")] + async fn anvil_set_automine(&self, enabled: bool) -> RpcResult<()>; + + /// Sets the mining behavior to interval with the given interval (seconds). + #[method(name = "setIntervalMining")] + async fn anvil_set_interval_mining(&self, interval: u64) -> RpcResult<()>; + + /// Removes transactions from the pool. + #[method(name = "anvil_dropTransaction")] + async fn anvil_drop_transaction(&self, tx_hash: B256) -> RpcResult>; + + /// Resets the fork to a fresh forked state, and optionally update the fork config. + /// + /// If `forking` is `None` then this will disable forking entirely. + #[method(name = "reset")] + async fn anvil_reset(&self, fork: Option) -> RpcResult<()>; + + /// Sets the backend rpc url. + #[method(name = "setRpcUrl")] + async fn anvil_set_rpc_url(&self, url: String) -> RpcResult<()>; + + /// Modifies the balance of an account. + #[method(name = "setBalance")] + async fn anvil_set_balance(&self, address: Address, balance: U256) -> RpcResult<()>; + + /// Sets the code of a contract. + #[method(name = "setCode")] + async fn anvil_set_code(&self, address: Address, code: Bytes) -> RpcResult<()>; + + /// Sets the nonce of an address. + #[method(name = "setNonce")] + async fn anvil_set_nonce(&self, address: Address, nonce: U256) -> RpcResult<()>; + + /// Writes a single slot of the account's storage. + #[method(name = "setStorageAt")] + async fn anvil_set_storage_at( + &self, + address: Address, + slot: U256, + value: B256, + ) -> RpcResult; + + /// Sets the coinbase address. + #[method(name = "setCoinbase")] + async fn anvil_set_coinbase(&self, address: Address) -> RpcResult<()>; + + /// Sets the chain id. + #[method(name = "setChainId")] + async fn anvil_set_chain_id(&self, chain_id: u64) -> RpcResult<()>; + + /// Enables or disable logging. + #[method(name = "setLoggingEnabled")] + async fn anvil_set_logging_enabled(&self, enabled: bool) -> RpcResult<()>; + + /// Sets the minimum gas price for the node. + #[method(name = "setMinGasPrice")] + async fn anvil_set_min_gas_price(&self, gas_price: U256) -> RpcResult<()>; + + /// Sets the base fee of the next block. + #[method(name = "setNextBlockBaseFeePerGas")] + async fn anvil_set_next_block_base_fee_per_gas(&self, base_fee: U256) -> RpcResult<()>; + + /// Sets the minimum gas price for the node. + #[method(name = "setTime")] + async fn anvil_set_time(&self, timestamp: u64) -> RpcResult; + + /// Creates a buffer that represents all state on the chain, which can be loaded to separate + /// process by calling `anvil_loadState`. + #[method(name = "dumpState")] + async fn anvil_dump_state(&self) -> RpcResult; + + /// Append chain state buffer to current chain.Will overwrite any conflicting addresses or + /// storage. + #[method(name = "loadState")] + async fn anvil_load_state(&self, state: Bytes) -> RpcResult; + + /// Retrieves the Anvil node configuration params. + #[method(name = "nodeInfo")] + async fn anvil_node_info(&self) -> RpcResult; + + /// Retrieves metadata about the Anvil instance. + #[method(name = "metadata")] + async fn anvil_metadata(&self) -> RpcResult; + + /// Snapshot the state of the blockchain at the current block. + #[method(name = "snapshot")] + async fn anvil_snapshot(&self) -> RpcResult; + + /// Revert the state of the blockchain to a previous snapshot. + /// Takes a single parameter, which is the snapshot id to revert to. + #[method(name = "revert")] + async fn anvil_revert(&self, id: U256) -> RpcResult; + + /// Jump forward in time by the given amount of time, in seconds. + #[method(name = "increaseTime")] + async fn anvil_increase_time(&self, seconds: U256) -> RpcResult; + + /// Similar to `evm_increaseTime` but takes the exact timestamp that you want in the next block. + #[method(name = "setNextBlockTimestamp")] + async fn anvil_set_next_block_timestamp(&self, seconds: u64) -> RpcResult<()>; + + /// Sets the next block gas limit. + #[method(name = "setBlockGasLimit")] + async fn anvil_set_block_gas_limit(&self, gas_limit: U256) -> RpcResult; + + /// Sets an interval for the block timestamp. + #[method(name = "setBlockTimestampInterval")] + async fn anvil_set_block_timestamp_interval(&self, seconds: u64) -> RpcResult<()>; + + /// Sets an interval for the block timestamp. + #[method(name = "removeBlockTimestampInterval")] + async fn anvil_remove_block_timestamp_interval(&self) -> RpcResult; + + /// Mine blocks, instantly and return the mined blocks. + /// + /// This will mine the blocks regardless of the configured mining mode. + /// + /// **Note**: This behaves exactly as `evm_mine` but returns different output, for + /// compatibility reasons, this is a separate call since `evm_mine` is not an anvil original. + /// and `ganache` may change the `0x0` placeholder. + #[method(name = "mine_detailed")] // This method requires using `snake_case`. + async fn anvil_mine_detailed(&self, opts: Option) -> RpcResult>; + + /// Turn on call traces for transactions that are returned to the user when they execute a + /// transaction (instead of just txhash/receipt). + #[method(name = "enableTraces")] + async fn anvil_enable_traces(&self) -> RpcResult<()>; + + /// Removes all transactions for that address from the transaction pool. + #[method(name = "removePoolTransactions")] + async fn anvil_remove_pool_transactions(&self, address: Address) -> RpcResult<()>; +} diff --git a/crates/rpc/rpc-api/src/ganache.rs b/crates/rpc/rpc-api/src/ganache.rs new file mode 100644 index 0000000000000..0156f074acee7 --- /dev/null +++ b/crates/rpc/rpc-api/src/ganache.rs @@ -0,0 +1,75 @@ +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives::U256; +use reth_rpc_types::anvil::MineOptions; + +/// Ganache rpc interface. +/// https://github.com/trufflesuite/ganache/tree/develop/docs +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "evm"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "evm"))] +pub trait GanacheApi { + // TODO Ganache is deprecated and this method is not implemented by Anvil and Hardhat. + // #[method(name = "addAccount")] + // async fn evm_add_account(&self, address: Address, passphrase: B256) -> RpcResult; + + /// Jump forward in time by the given amount of time, in seconds. + /// + /// Returns the total time adjustment, in seconds. + #[method(name = "increaseTime")] + async fn evm_increase_time(&self, seconds: U256) -> RpcResult; + + /// Force a single block to be mined. + /// + /// Mines a block independent of whether or not mining is started or stopped. Will mine an empty + /// block if there are no available transactions to mine. + /// + /// Returns "0x0". May return additional meta-data in the future. + #[method(name = "mine")] + async fn evm_mine(&self, opts: Option) -> RpcResult; + + // TODO Ganache is deprecated and this method is not implemented by Anvil and Hardhat. + // #[method(name = "removeAccount")] + // async fn evm_remove_account(address: Address, passphrase: B256) -> RpcResult; + + /// Revert the state of the blockchain to a previous snapshot. Takes a single parameter, which + /// is the snapshot id to revert to. This deletes the given snapshot, as well as any snapshots + /// taken after (e.g.: reverting to id 0x1 will delete snapshots with ids 0x1, 0x2, etc.). + /// + /// Reutnrs `true` if a snapshot was reverted, otherwise `false`. + #[method(name = "revert")] + async fn evm_revert(&self, snapshot_id: U256) -> RpcResult; + + // TODO Ganache is deprecated and this method is not implemented by Anvil and Hardhat. + // #[method(name = "setAccountBalance")] + // async fn evm_set_account_balance(address: Address, balance: U256) -> RpcResult; + + // TODO Ganache is deprecated and this method is not implemented by Anvil and Hardhat. + // #[method(name = "setAccountCode")] + // async fn evm_set_account_code(address: Address, code: Bytes) -> RpcResult; + + // TODO Ganache is deprecated and this method is not implemented by Anvil and Hardhat. + // #[method(name = "setAccountNonce")] + // async fn evm_set_account_nonce(address: Address, nonce: U256) -> RpcResult; + + // TODO Ganache is deprecated and this method is not implemented by Anvil and Hardhat. + // #[method(name = "setAccountStorageAt")] + // async fn evm_set_account_storage_at(address: Address, slot: U256, value: B256) -> + // RpcResult; + + /// Sets the internal clock time to the given timestamp. + /// + /// **Warning** This will allow you to move backwards in time, which may cause new blocks to + /// appear to be mined before old blocks. This will result in an invalid state. + /// + /// Returns the amount of seconds between the given timestamp and now. + #[method(name = "setTime")] + async fn evm_set_time(&self, timestamp: u64) -> RpcResult; + + /// Snapshot the state of the blockchain at the current block. Takes no parameters. Returns the + /// id of the snapshot that was created. A snapshot can only be reverted once. After a + /// successful evm_revert, the same snapshot id cannot be used again. Consider creating a new + /// snapshot after each evm_revert if you need to revert to the same point multiple times. + /// + /// Returns the hex-encoded identifier for this snapshot. + #[method(name = "snapshot")] + async fn evm_snapshot(&self) -> RpcResult; +} diff --git a/crates/rpc/rpc-api/src/hardhat.rs b/crates/rpc/rpc-api/src/hardhat.rs new file mode 100644 index 0000000000000..1620bdb59671a --- /dev/null +++ b/crates/rpc/rpc-api/src/hardhat.rs @@ -0,0 +1,83 @@ +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives::{Address, Bytes, B256, U256}; +use reth_rpc_types::anvil::{Forking, Metadata}; + +/// Hardhat rpc interface. +/// https://hardhat.org/hardhat-network/docs/reference#hardhat-network-methods +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "hardhat"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "hardhat"))] +pub trait HardhatApi { + /// Removes the given transaction from the mempool, if it exists. + /// + /// Returns `true` if successful, otherwise `false`. + #[method(name = "hardhat_dropTransaction")] + async fn hardhat_drop_transaction(&self, tx_hash: B256) -> RpcResult; + + /// Allows Hardhat Network to sign transactions as the given address. + #[method(name = "impersonateAccount")] + async fn hardhat_impersonate_account(&self, address: Address) -> RpcResult<()>; + + /// Returns `true` if automatic mining is enabled, and `false` otherwise. + #[method(name = "getAutomine")] + async fn hardhat_get_automine(&self) -> RpcResult; + + /// Returns an object with metadata about the instance of the Hardhat network. + #[method(name = "metadata")] + async fn hardhat_metadata(&self) -> RpcResult; + + /// Mines a specified number of blocks at a given interval. + #[method(name = "mine")] + async fn hardhat_mine(&self, blocks: Option, interval: Option) -> RpcResult<()>; + + /// Resets back to a fresh forked state, fork from another block number or disable forking. + #[method(name = "reset")] + async fn hardhat_reset(&self, fork: Option) -> RpcResult<()>; + + /// Sets the balance for the given address. + #[method(name = "setBalance")] + async fn hardhat_set_balance(&self, address: Address, balance: U256) -> RpcResult<()>; + + /// Modifies the bytecode stored at an account's address. + #[method(name = "setCode")] + async fn hardhat_set_code(&self, address: Address, code: Bytes) -> RpcResult<()>; + + /// Sets the coinbase address to be used in new blocks. + #[method(name = "setCoinbase")] + async fn hardhat_set_coinbase(&self, address: Address) -> RpcResult<()>; + + /// Enables or disables logging. + #[method(name = "setLoggingEnabled")] + async fn hardhat_set_logging_enabled(&self, enabled: bool) -> RpcResult<()>; + + /// Changes the minimum gas price accepted by the network (in wei). + #[method(name = "setMinGasPrice")] + async fn hardhat_set_min_gas_price(&self, gas_price: U256) -> RpcResult<()>; + + /// Sets the base fee of the next block. + #[method(name = "setNextBlockBaseFeePerGas")] + async fn hardhat_set_next_block_base_fee_per_gas( + &self, + base_fee_per_gas: U256, + ) -> RpcResult<()>; + + /// Sets the `PREVRANDAO` value of the next block. + #[method(name = "setPrevRandao")] + async fn hardhat_set_prev_randao(&self, prev_randao: B256) -> RpcResult<()>; + + /// Modifies an account's nonce by overwriting it. + #[method(name = "setNonce")] + async fn hardhat_set_nonce(&self, address: Address, nonce: U256) -> RpcResult<()>; + + /// Writes a single position of an account's storage. + #[method(name = "setStorageAt")] + async fn hardhat_set_storage_at( + &self, + address: Address, + slot: U256, + value: B256, + ) -> RpcResult<()>; + + /// Stops impersonating the given address. + #[method(name = "stopImpersonatingAccount")] + async fn hardhat_stop_impersonating_account(&self, address: Address) -> RpcResult<()>; +} diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index ec8757d836960..f97c0b19feff4 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -15,12 +15,15 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod admin; +mod anvil; mod bundle; mod debug; mod engine; mod eth; mod eth_filter; mod eth_pubsub; +mod ganache; +mod hardhat; mod mev; mod net; mod otterscan; @@ -65,11 +68,14 @@ pub use clients::*; pub mod clients { pub use crate::{ admin::AdminApiClient, + anvil::AnvilApiClient, bundle::{EthBundleApiClient, EthCallBundleApiClient}, debug::DebugApiClient, engine::{EngineApiClient, EngineEthApiClient}, eth::EthApiClient, eth_filter::EthFilterApiClient, + ganache::GanacheApiClient, + hardhat::HardhatApiClient, mev::MevApiClient, net::NetApiClient, otterscan::OtterscanClient, diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index dfb27e9a7fa08..5f87e9482daa7 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -16,6 +16,7 @@ workspace = true alloy-rlp = { workspace = true, features = ["arrayvec", "derive"] } alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde"] } alloy-rpc-types = { workspace = true, features = ["jsonrpsee-types"] } +alloy-rpc-types-anvil.workspace = true alloy-rpc-types-trace.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } ethereum_ssz_derive = { version = "0.5", optional = true } diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 45ab62f0ca59c..964144ed65bfc 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -27,6 +27,10 @@ pub mod trace { //! RPC types for trace endpoints and inspectors. pub use alloy_rpc_types_trace::*; } + +// Anvil specific rpc types coming from alloy. +pub use alloy_rpc_types_anvil as anvil; + // Ethereum specific rpc types related to typed transaction requests and the engine API. pub use eth::{ engine, From 987e597442d2816ebc9ba20bdf9e4dc067fc547d Mon Sep 17 00:00:00 2001 From: jn Date: Sat, 13 Apr 2024 05:53:42 -0700 Subject: [PATCH 147/700] Implement compact codec for alloy eip access_list (#7554) --- Cargo.lock | 1 + .../primitives/src/transaction/access_list.rs | 27 + crates/storage/codecs/Cargo.toml | 2 + .../storage/codecs/src/alloy/access_list.rs | 99 ++ crates/storage/codecs/src/alloy/log.rs | 3 +- crates/storage/codecs/src/alloy/mod.rs | 1 + .../codecs/testdata/access_list_compact.json | 1055 +++++++++++++++++ 7 files changed, 1187 insertions(+), 1 deletion(-) create mode 100644 crates/storage/codecs/src/alloy/access_list.rs create mode 100644 crates/storage/codecs/testdata/access_list_compact.json diff --git a/Cargo.lock b/Cargo.lock index e0c193c814a89..9ca68955e9d36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6240,6 +6240,7 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.5" dependencies = [ + "alloy-eips", "alloy-primitives", "arbitrary", "bytes", diff --git a/crates/primitives/src/transaction/access_list.rs b/crates/primitives/src/transaction/access_list.rs index a20ac67cf420c..c15646b4be5ee 100644 --- a/crates/primitives/src/transaction/access_list.rs +++ b/crates/primitives/src/transaction/access_list.rs @@ -173,3 +173,30 @@ impl From for reth_rpc_types::AccessList { ) } } + +#[cfg(test)] +mod tests { + use super::*; + + use proptest::proptest; + + proptest!( + #[test] + fn test_roundtrip_accesslist_conversion(access_list: AccessList) { + // Convert access_list to buffer and then create alloy_access_list from buffer and + // compare + let mut compacted_access_list = Vec::::new(); + let len = access_list.clone().to_compact(&mut compacted_access_list); + + let alloy_access_list = AccessList::from_compact(&compacted_access_list, len).0; + assert_eq!(access_list, alloy_access_list); + + // Create alloy_access_list from access_list and then convert it to buffer and compare + // compacted_alloy_access_list and compacted_access_list + let alloy_access_list = AccessList(access_list.0); + let mut compacted_alloy_access_list = Vec::::new(); + let _len = alloy_access_list.to_compact(&mut compacted_alloy_access_list); + assert_eq!(compacted_access_list, compacted_alloy_access_list); + } + ); +} diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 3c991d1411a08..b9cba081151af 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -13,10 +13,12 @@ workspace = true [dependencies] reth-codecs-derive = { path = "./derive", default-features = false } +alloy-eips.workspace = true alloy-primitives.workspace = true bytes.workspace = true [dev-dependencies] +alloy-eips = { workspace = true, features = ["arbitrary", "serde"] } alloy-primitives = { workspace = true, features = ["arbitrary", "serde"] } serde.workspace = true modular-bitfield = "0.11.2" diff --git a/crates/storage/codecs/src/alloy/access_list.rs b/crates/storage/codecs/src/alloy/access_list.rs new file mode 100644 index 0000000000000..22439f827a7bb --- /dev/null +++ b/crates/storage/codecs/src/alloy/access_list.rs @@ -0,0 +1,99 @@ +use crate::Compact; +use alloy_eips::eip2930::{AccessList, AccessListItem}; +use alloy_primitives::Address; + +/// Implement `Compact` for `AccessListItem` and `AccessList`. +impl Compact for AccessListItem { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let mut buffer = bytes::BytesMut::new(); + self.address.to_compact(&mut buffer); + self.storage_keys.specialized_to_compact(&mut buffer); + let total_length = buffer.len(); + buf.put(buffer); + total_length + } + + fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { + let (address, new_buf) = Address::from_compact(buf, buf.len()); + buf = new_buf; + let (storage_keys, new_buf) = Vec::specialized_from_compact(buf, buf.len()); + buf = new_buf; + let access_list_item = AccessListItem { address, storage_keys }; + (access_list_item, buf) + } +} + +impl Compact for AccessList { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let mut buffer = bytes::BytesMut::new(); + self.0.to_compact(&mut buffer); + let total_length = buffer.len(); + buf.put(buffer); + total_length + } + + fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { + let (access_list_items, new_buf) = Vec::from_compact(buf, buf.len()); + buf = new_buf; + let access_list = AccessList(access_list_items); + (access_list, buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::Bytes; + use proptest::proptest; + use serde::Deserialize; + + proptest! { + #[test] + fn test_roundtrip_compact_access_list_item(access_list_item: AccessListItem) { + let mut compacted_access_list_item = Vec::::new(); + let len = access_list_item.clone().to_compact(&mut compacted_access_list_item); + + let (decoded_access_list_item, _) = AccessListItem::from_compact(&compacted_access_list_item, len); + assert_eq!(access_list_item, decoded_access_list_item); + } + } + + proptest! { + #[test] + fn test_roundtrip_compact_access_list(access_list: AccessList) { + let mut compacted_access_list = Vec::::new(); + let len = access_list.clone().to_compact(&mut compacted_access_list); + + let (decoded_access_list, _) = AccessList::from_compact(&compacted_access_list, len); + assert_eq!(access_list, decoded_access_list); + } + } + + #[derive(Deserialize)] + struct CompactAccessListTestVector { + access_list: AccessList, + encoded_bytes: Bytes, + } + + #[test] + fn test_compact_access_list_codec() { + let test_vectors: Vec = + serde_json::from_str(include_str!("../../testdata/access_list_compact.json")) + .expect("Failed to parse test vectors"); + + for test_vector in test_vectors { + let mut buf = Vec::::new(); + let len = test_vector.access_list.clone().to_compact(&mut buf); + assert_eq!(test_vector.encoded_bytes.0, buf); + + let (decoded, _) = AccessList::from_compact(&test_vector.encoded_bytes, len); + assert_eq!(test_vector.access_list, decoded); + } + } +} diff --git a/crates/storage/codecs/src/alloy/log.rs b/crates/storage/codecs/src/alloy/log.rs index 3ac2851aa3034..762a1eae0234a 100644 --- a/crates/storage/codecs/src/alloy/log.rs +++ b/crates/storage/codecs/src/alloy/log.rs @@ -77,7 +77,8 @@ mod tests { #[test] fn test_compact_log_codec() { let test_vectors: Vec = - serde_json::from_str(include_str!("../../testdata/log_compact.json")).unwrap(); + serde_json::from_str(include_str!("../../testdata/log_compact.json")) + .expect("Failed to parse test vectors"); for test_vector in test_vectors { let log_data = LogData::new_unchecked(test_vector.topics, test_vector.data); diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index aad31d7a62c0a..5785546c319e6 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1 +1,2 @@ +mod access_list; mod log; diff --git a/crates/storage/codecs/testdata/access_list_compact.json b/crates/storage/codecs/testdata/access_list_compact.json new file mode 100644 index 0000000000000..4888f309b6c84 --- /dev/null +++ b/crates/storage/codecs/testdata/access_list_compact.json @@ -0,0 +1,1055 @@ +[ + { + "access_list": [], + "encoded_bytes": "0x00" + }, + { + "access_list": [ + { + "address": "0x4d221c07fcedf250e2f0dd5ed06bffc1847658db", + "storageKeys": [ + "0x2638dd977ab8e00e99343317e39bc38526dda3f37b955e07757b75e5abe1963a" + ] + }, + { + "address": "0x89d8a495c8bf636ff93d22921936f5b6d90f45e5", + "storageKeys": [ + "0xc6dc5fe61dec594cc449317a3c4203405a56b3ab32a63a1c9a7593862b6dee4b", + "0xa6f2829a9e0ae7e1f85298507777055910aeeea2a8e3ae5b642cddba912ce50f", + "0xfd3d51ce8515c534f956c54f9f39740e6840b39228d74f24227cb6912dacd7d4", + "0x23085ecb129b8b8d8f724c41008fab05d4e1ef0a5a46c8e4f3fd04a2d0b5162e", + "0x97f427fc34307bc32d064c1dd72a2cdebff69a175578b4f032c8a9488e54a3f4", + "0x04521b98cb79afd4d846e2d065581e19327fde8a5134c975593448a30af45649", + "0xd040157efbb241db690c93a64eda804dcbb11c0f7b97b4174a9ec12a3341893b", + "0x05434975204e501feb9d5b817d3cffb896f7b06a4aa68dbb9977d824238f390a", + "0xf7f3c93f1eec9ce223c9e5ded42bd7e2d82974d563197c1f7ca4ee2f2dbd1694", + "0x18d0391e08ec558d9db425dc21aa6c5d2b91aa62ea66cbe448fc13c7416c3212", + "0x4295be81d66314bde15e007f95e32dfc4100ae74863b326c036e4ab0e83cd375", + "0x55851c2baa77ff84c8cf41ef5417f689f6a11726c926514b15216c0343c05878", + "0x0309a8dc6df5109b5295e4d1cbcef38ba4be4a2e8cdb3552ff0bdfc18f41e872", + "0xcea248b6a2d26fce1a05db50f48ec280591477c789514e999c29c4108ccd6b73", + "0x9a3636c1c9cc3d14440ffeb7fe6bd7021d59858e1a47a38e7c2856656397d772", + "0x8e5938d47b031fd9b706ee403a041d5d4bea72aa2f1d0ddeb0ff3db04e917e72" + ] + }, + { + "address": "0xf6ff0ab6af64a1b5504191b7a681d293a1baa59a", + "storageKeys": [ + "0xde9e0489198d158969094d1877ac98cac440b64e209edaa59b79ac2b9ef63293", + "0x2682292d3d4484cc8531b5adf7dcf328a067049516c1dcfb0a4e29ce1fe6f356", + "0xbfcb8e6cfe92065059bd71fe976049f295a52a96ae10d16a636c7bfbfe96b6a5", + "0x0afac090a410f33766695856f193bcad26c5875423d14b4a702deba18f682f4c", + "0x381652ca0056dd989b2db23a5126613c18331969907072fbc2af3dd8c408d53b", + "0xfe689c484c1f7bb066349f75ff7648db9dc590570b96aa4864e230f13b6a6921", + "0xab2e8c54acf81a763c749f06b9299109d0269c81af4853a6bb6c74bfbd0e0495", + "0x0ec7dd5b851332d88a33bacec91d03073c44940b04d7707b0ea525fc7b072180", + "0x699ed29d5e8c2da83e5f4e1d3ed64cafff5d8f98240d65aed624ec5ec314b5f4", + "0xae50b75b2a66c04aa39cbfbb5483a039b63a5c0e69b0b1b689f3ec335a113add", + "0xf79ea1ca7e2d785861a40e6f45260388c3e33a254ae29d3d92625021bbb02c82", + "0x91a4617ac2e57c12491043d613966d26a794e17dfe29410efad6abcc64ed8005", + "0x53333d423b42c3d3aec2b082c950c7afd387ae9f594dd686e3bcfc803d0fd00c", + "0x67eea65c204fde533f38d94229eb752538ac5e1073b52e0d9105c41a5d418675" + ] + }, + { + "address": "0xb017dfab75745cd666c3e9b06b9aca9f5eb98639", + "storageKeys": [ + "0x1c9b4fd8558758383abb53e1ec6336610d5c122205a107ae32fd98b45b3e5f50", + "0x88def78728ea135312257a5bd0ffc46e5b1c1d4c2ba324de1a6a8feda586533b", + "0xcbc9214dfef2f4a0c36294afdf1082ee3dd7ad53e47420343097bfc1f71b23cc", + "0xeaecc195d4d1f6810019a4ed1929749b1050ff8f309690d8733fddcbba50a7a3", + "0x60b8ae6a1920ca414716df5f76ab4e5e2f1cf0eb1380da62934e0e763190fb76", + "0xbdd40ab2afc181457f54a0f57c0ed5310ab05a317e297c9c6bba2c3e7e5f7a02", + "0x44cbde6f2b04d3e2ee1d9fa2f86f576a2b166b84156a4ffb241ac93e3d3b8b97" + ] + }, + { + "address": "0x40be29fa621491603f88161da0ce4ec7cd60008b", + "storageKeys": [ + "0xc376956085b1cc24743cd04f1fc0c8569491fc87fae64590de41fc429e4de4ba" + ] + }, + { + "address": "0xf747b4f1ad98db061f15b4462354322879902fef", + "storageKeys": [ + "0xf090d8b712d7fbbd4f0b55ffe57c3a714a278834a32253eb4d71deac562e22ea", + "0xb83e30f1792f2b7f8a7d4fd2ff9aa00ea0a10b377a6793847a25217da18c4c95", + "0xe3c385c6a51c6d79bb5c005dc07ebabc222e8a19b64ef84abcece223f983da13", + "0x591ce90877de7ba5808ae3472de6fdc74eb7940e6bc43853dd1bacd663fcd3db", + "0xc78eb091ae8111e290b8c0e3d7576e0d0b6eda6864836a8bc48d4bcbeb5e7550", + "0x8eb42191b461459a35adfc91a17b32ac69a95574564051adda2f7bcb3b9ec87f", + "0x474fad93b36d97bebf443481185b5ba4ec3b03f4e994d960f0de6f158ed66446", + "0xb2a3036d3717fef3a94bd41aebe5e0708a0e9af89b6443c18489d2246214c618", + "0x62f1ed88f3ce40a8e10320defa170c788603bce779668f7c09dca3f72f59c6a9", + "0xa769f95f405054ef07c5f943128641b37868a397c84fff26f8774eb6d5c9607a", + "0x11a9bf2dfc41d2dd5a59281cc0d2e038cdec42d387c2fd280be2635ef1faf7d0", + "0xb33191c06c3a5148a9e674a445a1d40e442c6b20714255dbb5039096cd42699f", + "0x85400a82161a4119b58cc0c6e9edca6e7cee3972238e4227a6507bbfa67e0c4e" + ] + }, + { + "address": "0x5ee650573e7939ddf76b30348ee236a98157d3f6", + "storageKeys": [ + "0x66298c01e409468f4f0dbb7a77a64bac1821fb228caa98a9bce15791f948b837", + "0x9582da38cb9e8a327683c482c7e86ca541944762cc01305e7cd11862fcbe871c", + "0xf6300b9f1924c88692ed85e65a38a602f5fc6295730922619a1f18dfa6e42627", + "0x94b1bd2aa170f94b9970f32af4563501f9a195d4807533f05b90c4398d1591b6", + "0x98b525f055a51ce5a4221917627c3b96a6ff5741c121eb2d61a1133e0d83571b", + "0x35f67d67b72c9c8533e058952b527670095fe8824f07ea0d6e0dbad309176d92", + "0x857dfc4d5d3d13e73c396d5203975e8d5379cd209698bddff646990202973e39", + "0x73cf30bf1af72b46297e7466bfee2ff9d8621904bbc476053149d13fc812a2a2" + ] + }, + { + "address": "0x9e0b782fceffe70c89f8c63740a6ac74c5934700", + "storageKeys": [ + "0xd31fa571b6892341cceede1a69f962731aa513bafc7b5451cb648b795fa3d257", + "0x066cc6bfa2e442fafe113e1848a9738d737d703a36cd024daccdc4db2b4b70dd", + "0x6207c14dd2436e78e49b49589bbbb317a77f9477682c2aeb938fab694be47864", + "0x2ab6873a09ac6c73c26931e247e85cbad133e7baf51fe7bec2e6a1e50930c57e", + "0xfab7da434fb535915da2d107706522461b3fcaadaece2774129d422e99265108", + "0xb9be7c9056eb93896a67ce2a7dbde84bd227de695200ba8564e7a09a1ae75727", + "0x88c5290db5fd6012d911c21ff9a5a835fd1947fd835976ee44292990a9d7f386", + "0xf1986dab5921eab3bfd50fcdc6ad18ae233c7ef01c9e52927967e30c3036d0e2", + "0x92effcde94c65bead7b83319d346f0a219808ec8e246e1650ec18af75c60bc0e", + "0xb358efcba2d5b35215828d59ee631f5157bffc1fbbf6081eca4ed2d23c1a2445", + "0xdc8ba3d7ab3ced201a1569f8c4d71e00d020c2ca617579487860ee8dbded351e", + "0xf6e2eada035e39cf5793355f12b807835f9c2f31b79370e0d55555505507bec0", + "0xbd2252c2ec58031a18f1935a8b7120a9c1e5283368f4ae08b323b330ae9aa479" + ] + }, + { + "address": "0x7fca19be0c9c232adcecff0046b4c9409497134c", + "storageKeys": [ + "0x96568a9bc2933bd8f650abfec40226de34e71d0d4dc656c06ad3bd0262054020", + "0x5724d4d610561f4f6094a95b0934c298caddf8f11727d7c73060ff024445929c", + "0xe69d59c243271085d231330b2485e73c12dc7561f0a7023aae55a3be3fc91aa1", + "0xebe395473c4a71122defcb8dfa178f69dc10bd1d1f0eb96d4f9e476187e98a55", + "0xd64fccbbccf37f6f55bfd92ddf0d84b7f2c4b66b462776adb474454557dbc6c1", + "0xa41c92281448eb6d7af07dd8c603ac3613b5217d8cc888319a25b546e9e82b81", + "0x75775110e0ccb802c8d6bef975eb034762ddee6a58d5abeba89365acb43a0eda", + "0x4c361ca296ea618efc9bb14a278a35506e25441018de08dd8f64afea873bf05e" + ] + }, + { + "address": "0x87416faed43a04c5e290596d5e2604607cd98702", + "storageKeys": [ + "0xe3e11ca4c6b919afe8d8f49bc9b186e03396711abf987d7cd73e015c626140d9", + "0x2c2bfcca1fb1ea67d4731f0887283812a77bbbd73138efb4811eb64a61fdf617", + "0x10e1e6b7b2d142e0459ee2f2ae7046546cad54cb02bd900fa0ee6b719bc95d47", + "0xe125d39facfe152856dca666b1239f3d94eeb7c5ff2732d74f5684cc34584874", + "0xbe22b6f2320be8d843160d0718630d7a79c7c74309eca4551e5fa35f322a86e2", + "0xc274ead74d1caae775e7f727be2c4cdd12eff69b34c51f2727ca9a24a50d4cf8", + "0x3e9027b02926063ac57fae10ce1bd980edcc79d416c1a31aaaf5442a9237359b", + "0xe73e1224e183f06144a6cda4ff06524d68eda57414e029c35eb5d774f4d2f10f", + "0x5b375a1419fe19f1c622242c6d6bb145bf601bab143bfd62bdefdd3d1c137af3", + "0x7a5d4c72a5a1f227987ae4286152e1b1378863368af9e4fabd65c8220002f900", + "0xb7adc1020b3b2ad495f0c730cdc63a85275e9f351f0ac74a3994be327522d06d", + "0xbdfa2645962828c70295d2100fb5fe8d2f27b0e7f3270b629cadcb999a4a7401" + ] + }, + { + "address": "0xa2a8c38eda12e4a0b144b7b7765ac7e04e9bf318", + "storageKeys": [ + "0x2d9dc530cb4b9aa650d01629f2ceeb4b462637cdaac695b75ef0439363b068f1", + "0xa2e24cfdb499fb40a864e28ca1f8405bf3c3b036b25b8e27102b583c4ffa7901", + "0x8975117ea9c911d46ca8f4069a98a696cc4e455e3d1d9dd0cdecad8e9c3cf613", + "0x2bd97c132863c47a77c8186cf8898cb8c56e6975569b61508d8dd8994b3629cc", + "0xdfe8333242c0385a690852beb733b8ac71e68be82d58044f9dccbe1fae9f370c", + "0x385cb502ccea548e310e5b8c1145ecdbe6f3988e406f7af1f3d910b6355c4cae" + ] + }, + { + "address": "0x3c8008a5438297e755025e7a5093d4f79ef01b3a", + "storageKeys": [ + "0x868ecdd24f5abf256a80a5632d22ea6130305656256f07653d7086ca85bb300d", + "0x5077752df93d48467b3fd935e7a28fa2533d0701586bfa10806b1cc696532894", + "0x77213cb4736b8dd4c72c19cc27a60e98cd61076e1a82ce8724e51c98eb3fae40", + "0x0481104bc4f4e61342f83d841feb77a20777df2b0db76de686c5800ddc8af241" + ] + }, + { + "address": "0xa8d8d3a926d45f2f8ff4dbc802c5ae0f427eaca9", + "storageKeys": [ + "0x9cdebc417c6ecfd1a0ecf864db3ed6bd56fa5865fe0e12c0737c54bc57d71b09", + "0x3e3aa36fa74e80ff855917d8a5f5ef118beae2d8387bea44a1116fbe15544fe1", + "0x9abdd3f21eb8c78ab3906967ab08c69a41f4fffd2a9da28156c3d4c5a71708b4" + ] + }, + { + "address": "0x5b40f278cae65ee022466fb031af5e52c4c45dec", + "storageKeys": [ + "0xf6ab6ffe4f4b0d67f595c7102cccca88045a37728df3c2f9ca7a72863a10b4c2", + "0x278c02bf1f8cd16f6e61bf1d41575a7380107fceabfcdbf7d5ae5e1db396017f", + "0x96e196db77cf33de8ea5cd78749ee9d9a219dc3f96833d3cd2ced91bb3721f55", + "0xe2b6f28461eb66e038c5c0cb243ce253ecddb88c32c40cbfb465a7096cef69bb", + "0xbb5661293eed9ce5c483aa0ac82399ef6dc3058e214531cb529b0ccea99493f4", + "0x2f6dad10edf29c2a65339d468ba4e7990056bef4348801593353352417e3e167", + "0xbb7af0590beee112fc212990c2c5d79666e4f9fdac98158c602b28045e30d211", + "0x9cac9f242a175e4c31341904379a6c5d18c1771da6451e60c3ca9e32ac444a82", + "0xbf4981d07ecad203cb6a4ee49a7767f59e2b478318eda0f8af35f8cf4c136f71", + "0x4ab1c3d7aeb45c1c094aa60dbfa4e1218dacfc3fdc1b316d2be171dec26e0950", + "0x0fab0a28482ae076ae977dc4b2d43d5906c327346b8498b77791a249d6d51fec" + ] + }, + { + "address": "0x86378d39e1e1a3c7a2e424fab3b9ceb3f3b3b967", + "storageKeys": [ + "0xf32da26ecf190162a57fbc2257fa896c97bd5c4bb53f6338ce09c77bc017fce5", + "0xb489dad7d3a237209ccd8780ff3a91c7410b91d7450342e08187ba132fd8647a", + "0x8f8a59f85f6110870577ca168426a4a98befd34d7264a579c4a7d2a914b530f5", + "0xbf1b4b743c969564b906b2629f927c78bbfcb5617d3ace5bf4d3ae09e799af4a", + "0x8f3a85aa7fd574acbe6fdb580c20d6005c395a35b51b01be8262cffb83ef9519" + ] + }, + { + "address": "0xb65895524678ee60a115d82dce614d047fbf976c", + "storageKeys": [ + "0x61bf717fe3d7f1077fc0fc0fe924c5582bdfd9d1e25d24c9c691b0b7fadc1d24", + "0x803f4fa9bb795785f30e58618566c8c4c129d96e75f17f5e77cb91cf00954c44", + "0xd8ad01919de4305a3cd8f5c3cee9f97df7ab43d521626a44a2a6190c06083e95", + "0x904b7a29329ff089bcb42d68aa278ef3545a5403929cac99863a42bdb715b2b3", + "0x403c4a2842f66a07747be35ca1bc9dc0e2ddf2d7c1697eb849ed562c77bcdf2a", + "0xe0da1d0c62191bd80af8320814e39ad1268541ea963a9aaf9ac00f0dfd89f889", + "0x87b37d79e14632aab7a48f69c0882ca45fa4f7db8c1aa5dea8651737ebbb5cc5", + "0xd90b08b31a8731667fbfaf625850de0429afede98f978b916e40a2340ea00a7e", + "0x746193f0f7f55942da2efef7f8bd93dc3c9d5fc8791a93dacc862160a7a25826", + "0x4b526d0693b47acccd25de1e77cd99f04d75b45b55243ca58a421222b229a27c", + "0x3b918b2908f9ff5751b5831ee1cfb9fc434a7017c76c93cf7313438e9eab4cec", + "0xa77e59a2e20f51c1a40156ec5c766a2c0f998deddde8821704f72e2a95e97801", + "0xbb85a4f830193f60104aa53d737b08c05dc2e1a1bc55a805edfd44ef2de967bf", + "0xba69bb885870da7326726baf87a25831df78d5a2dde28e3f3482b26639f74e6c" + ] + }, + { + "address": "0xc54e6059d14beed4843d9549ccb499be27575672", + "storageKeys": [ + "0xfe56974eebb287aeec4103931878e0014f44eba6aae3aa8e99dd4fd8198e7b5f", + "0xc9f1b73d8e1648c14f642099d82d6ee289c0e3b69b0b7939b2aff00625abba1d", + "0x3c013ac9eec9a47d9836368a9badb62417615a9f1e631669ae196d4d5d618ef1", + "0xfd191cf448b89e574116c3ab48f1acf794d34eaf5d7eea11ff1364ea3c769741", + "0x7a252dca348f88173bc17a3335b10d6d48faba19e02bfd6fb275bc4b33a645d6", + "0x364caebc7dc8821278535b95021eb2a43d2e7a93787fd717ad7b69e96be79f60", + "0xa8b97df19a4bc259486da0b56895256de53ba4bcbcfb0c32c1d9604dd46b2e93", + "0x8f8b69bea2c815b141cc110b0579adb69b1c6cb76526ff93f986d417514ef11d", + "0x1512e06d614de8c8800f2fb9a3d2a3c3139edeeb31664e76473647a5989518d3", + "0x134195016815e70c2c143ce2e22041aaba657100c6725e179c02f67457140b24", + "0x4a10ccef3f2c56277658817cd855a26594351a862efeb280aa9453068cdcd532", + "0x551bb650e6afcb7f9634a2d056af75884d4ea7e055db42342109e34788c2d051" + ] + }, + { + "address": "0x20d81d38799cbd942bed6695d1f22da9f9e8c746", + "storageKeys": [ + "0x959b32957311c6b5a4a81d213aa88aae2c8f1c34baee1b3f424b3b8579257a27", + "0x87058d1fe8e7812298610f7b03a4828b67876307867fea544355eacb20473f7e", + "0x984015c257a36b80e08caa023b123daa1ec1b9ebf7aededa20c3fca1d702027d", + "0x404a79312df54ed209fad239c33ea81f53d3cafb03fcb328b96777b315f2d9da", + "0x94885858e572209bf352e80b108921dd4cd9bb40708678110a16fbd9b318485d", + "0x428f9d1b50a9051a8a491983f5e025c10df66d58d8e1cfda955b4c7b4ba077f8", + "0xf6a4007f6363b22ed764d94c1b9583297445f7e8aa33abfa1ba9373e310a750a", + "0x9046a92d8f9153f5f48c7278f58c53ff973b630c3854202b5d20f3550929e053", + "0xa7b9fd2e476fd056880b1ea517e82047fb55503e62cb243c791e9d12b7cdd46c", + "0x060c55ed74a81555128ad4c77ec9ff99a720331ea79a935a29ea7398cbf75e97", + "0xe765561d1cfe3b5debe053140f567431042787e9f94d7596f394190e21e592b4", + "0x3e5de1bc7b57fbc108247418281e03b1d97f4df3368992e73bf2d4a385fd6ff1", + "0xcc3993ead511a808271d42297936f1fdebed63e854158ef2ad25368dce70f627", + "0x1039987352619ce22c193c446c56401a413a2b7d16517904335e3b897959ceb9", + "0xa916030534458ece88152ce2f7fc76df658c47ef816ebf9c211e728a26706d4f", + "0x770f4047050b7a6b46aef9292a1aea52ee786f1382af9cddc8dd24e929366ed2", + "0xaa8d5e377395f25f86f830b94c5d0d3d857c32e1ea37ad4bee269c9d07810ae8", + "0xc50c87faa6dfe24550976620821811655a15e8560b9f1933f2d2c454ab0456bb", + "0xb0578d42fd527536993c3b93f9f62acd0ea65823e4064eafc2b03584363eb1ef", + "0x35b8db24511c5f581b992ac24f5b899fefecdf4d347a5da86f1d2454d43fb69a" + ] + } + ], + "encoded_bytes": "0x12354d221c07fcedf250e2f0dd5ed06bffc1847658db012638dd977ab8e00e99343317e39bc38526dda3f37b955e07757b75e5abe1963a950489d8a495c8bf636ff93d22921936f5b6d90f45e510c6dc5fe61dec594cc449317a3c4203405a56b3ab32a63a1c9a7593862b6dee4ba6f2829a9e0ae7e1f85298507777055910aeeea2a8e3ae5b642cddba912ce50ffd3d51ce8515c534f956c54f9f39740e6840b39228d74f24227cb6912dacd7d423085ecb129b8b8d8f724c41008fab05d4e1ef0a5a46c8e4f3fd04a2d0b5162e97f427fc34307bc32d064c1dd72a2cdebff69a175578b4f032c8a9488e54a3f404521b98cb79afd4d846e2d065581e19327fde8a5134c975593448a30af45649d040157efbb241db690c93a64eda804dcbb11c0f7b97b4174a9ec12a3341893b05434975204e501feb9d5b817d3cffb896f7b06a4aa68dbb9977d824238f390af7f3c93f1eec9ce223c9e5ded42bd7e2d82974d563197c1f7ca4ee2f2dbd169418d0391e08ec558d9db425dc21aa6c5d2b91aa62ea66cbe448fc13c7416c32124295be81d66314bde15e007f95e32dfc4100ae74863b326c036e4ab0e83cd37555851c2baa77ff84c8cf41ef5417f689f6a11726c926514b15216c0343c058780309a8dc6df5109b5295e4d1cbcef38ba4be4a2e8cdb3552ff0bdfc18f41e872cea248b6a2d26fce1a05db50f48ec280591477c789514e999c29c4108ccd6b739a3636c1c9cc3d14440ffeb7fe6bd7021d59858e1a47a38e7c2856656397d7728e5938d47b031fd9b706ee403a041d5d4bea72aa2f1d0ddeb0ff3db04e917e72d503f6ff0ab6af64a1b5504191b7a681d293a1baa59a0ede9e0489198d158969094d1877ac98cac440b64e209edaa59b79ac2b9ef632932682292d3d4484cc8531b5adf7dcf328a067049516c1dcfb0a4e29ce1fe6f356bfcb8e6cfe92065059bd71fe976049f295a52a96ae10d16a636c7bfbfe96b6a50afac090a410f33766695856f193bcad26c5875423d14b4a702deba18f682f4c381652ca0056dd989b2db23a5126613c18331969907072fbc2af3dd8c408d53bfe689c484c1f7bb066349f75ff7648db9dc590570b96aa4864e230f13b6a6921ab2e8c54acf81a763c749f06b9299109d0269c81af4853a6bb6c74bfbd0e04950ec7dd5b851332d88a33bacec91d03073c44940b04d7707b0ea525fc7b072180699ed29d5e8c2da83e5f4e1d3ed64cafff5d8f98240d65aed624ec5ec314b5f4ae50b75b2a66c04aa39cbfbb5483a039b63a5c0e69b0b1b689f3ec335a113addf79ea1ca7e2d785861a40e6f45260388c3e33a254ae29d3d92625021bbb02c8291a4617ac2e57c12491043d613966d26a794e17dfe29410efad6abcc64ed800553333d423b42c3d3aec2b082c950c7afd387ae9f594dd686e3bcfc803d0fd00c67eea65c204fde533f38d94229eb752538ac5e1073b52e0d9105c41a5d418675f501b017dfab75745cd666c3e9b06b9aca9f5eb98639071c9b4fd8558758383abb53e1ec6336610d5c122205a107ae32fd98b45b3e5f5088def78728ea135312257a5bd0ffc46e5b1c1d4c2ba324de1a6a8feda586533bcbc9214dfef2f4a0c36294afdf1082ee3dd7ad53e47420343097bfc1f71b23cceaecc195d4d1f6810019a4ed1929749b1050ff8f309690d8733fddcbba50a7a360b8ae6a1920ca414716df5f76ab4e5e2f1cf0eb1380da62934e0e763190fb76bdd40ab2afc181457f54a0f57c0ed5310ab05a317e297c9c6bba2c3e7e5f7a0244cbde6f2b04d3e2ee1d9fa2f86f576a2b166b84156a4ffb241ac93e3d3b8b973540be29fa621491603f88161da0ce4ec7cd60008b01c376956085b1cc24743cd04f1fc0c8569491fc87fae64590de41fc429e4de4bab503f747b4f1ad98db061f15b4462354322879902fef0df090d8b712d7fbbd4f0b55ffe57c3a714a278834a32253eb4d71deac562e22eab83e30f1792f2b7f8a7d4fd2ff9aa00ea0a10b377a6793847a25217da18c4c95e3c385c6a51c6d79bb5c005dc07ebabc222e8a19b64ef84abcece223f983da13591ce90877de7ba5808ae3472de6fdc74eb7940e6bc43853dd1bacd663fcd3dbc78eb091ae8111e290b8c0e3d7576e0d0b6eda6864836a8bc48d4bcbeb5e75508eb42191b461459a35adfc91a17b32ac69a95574564051adda2f7bcb3b9ec87f474fad93b36d97bebf443481185b5ba4ec3b03f4e994d960f0de6f158ed66446b2a3036d3717fef3a94bd41aebe5e0708a0e9af89b6443c18489d2246214c61862f1ed88f3ce40a8e10320defa170c788603bce779668f7c09dca3f72f59c6a9a769f95f405054ef07c5f943128641b37868a397c84fff26f8774eb6d5c9607a11a9bf2dfc41d2dd5a59281cc0d2e038cdec42d387c2fd280be2635ef1faf7d0b33191c06c3a5148a9e674a445a1d40e442c6b20714255dbb5039096cd42699f85400a82161a4119b58cc0c6e9edca6e7cee3972238e4227a6507bbfa67e0c4e95025ee650573e7939ddf76b30348ee236a98157d3f60866298c01e409468f4f0dbb7a77a64bac1821fb228caa98a9bce15791f948b8379582da38cb9e8a327683c482c7e86ca541944762cc01305e7cd11862fcbe871cf6300b9f1924c88692ed85e65a38a602f5fc6295730922619a1f18dfa6e4262794b1bd2aa170f94b9970f32af4563501f9a195d4807533f05b90c4398d1591b698b525f055a51ce5a4221917627c3b96a6ff5741c121eb2d61a1133e0d83571b35f67d67b72c9c8533e058952b527670095fe8824f07ea0d6e0dbad309176d92857dfc4d5d3d13e73c396d5203975e8d5379cd209698bddff646990202973e3973cf30bf1af72b46297e7466bfee2ff9d8621904bbc476053149d13fc812a2a2b5039e0b782fceffe70c89f8c63740a6ac74c59347000dd31fa571b6892341cceede1a69f962731aa513bafc7b5451cb648b795fa3d257066cc6bfa2e442fafe113e1848a9738d737d703a36cd024daccdc4db2b4b70dd6207c14dd2436e78e49b49589bbbb317a77f9477682c2aeb938fab694be478642ab6873a09ac6c73c26931e247e85cbad133e7baf51fe7bec2e6a1e50930c57efab7da434fb535915da2d107706522461b3fcaadaece2774129d422e99265108b9be7c9056eb93896a67ce2a7dbde84bd227de695200ba8564e7a09a1ae7572788c5290db5fd6012d911c21ff9a5a835fd1947fd835976ee44292990a9d7f386f1986dab5921eab3bfd50fcdc6ad18ae233c7ef01c9e52927967e30c3036d0e292effcde94c65bead7b83319d346f0a219808ec8e246e1650ec18af75c60bc0eb358efcba2d5b35215828d59ee631f5157bffc1fbbf6081eca4ed2d23c1a2445dc8ba3d7ab3ced201a1569f8c4d71e00d020c2ca617579487860ee8dbded351ef6e2eada035e39cf5793355f12b807835f9c2f31b79370e0d55555505507bec0bd2252c2ec58031a18f1935a8b7120a9c1e5283368f4ae08b323b330ae9aa47995027fca19be0c9c232adcecff0046b4c9409497134c0896568a9bc2933bd8f650abfec40226de34e71d0d4dc656c06ad3bd02620540205724d4d610561f4f6094a95b0934c298caddf8f11727d7c73060ff024445929ce69d59c243271085d231330b2485e73c12dc7561f0a7023aae55a3be3fc91aa1ebe395473c4a71122defcb8dfa178f69dc10bd1d1f0eb96d4f9e476187e98a55d64fccbbccf37f6f55bfd92ddf0d84b7f2c4b66b462776adb474454557dbc6c1a41c92281448eb6d7af07dd8c603ac3613b5217d8cc888319a25b546e9e82b8175775110e0ccb802c8d6bef975eb034762ddee6a58d5abeba89365acb43a0eda4c361ca296ea618efc9bb14a278a35506e25441018de08dd8f64afea873bf05e950387416faed43a04c5e290596d5e2604607cd987020ce3e11ca4c6b919afe8d8f49bc9b186e03396711abf987d7cd73e015c626140d92c2bfcca1fb1ea67d4731f0887283812a77bbbd73138efb4811eb64a61fdf61710e1e6b7b2d142e0459ee2f2ae7046546cad54cb02bd900fa0ee6b719bc95d47e125d39facfe152856dca666b1239f3d94eeb7c5ff2732d74f5684cc34584874be22b6f2320be8d843160d0718630d7a79c7c74309eca4551e5fa35f322a86e2c274ead74d1caae775e7f727be2c4cdd12eff69b34c51f2727ca9a24a50d4cf83e9027b02926063ac57fae10ce1bd980edcc79d416c1a31aaaf5442a9237359be73e1224e183f06144a6cda4ff06524d68eda57414e029c35eb5d774f4d2f10f5b375a1419fe19f1c622242c6d6bb145bf601bab143bfd62bdefdd3d1c137af37a5d4c72a5a1f227987ae4286152e1b1378863368af9e4fabd65c8220002f900b7adc1020b3b2ad495f0c730cdc63a85275e9f351f0ac74a3994be327522d06dbdfa2645962828c70295d2100fb5fe8d2f27b0e7f3270b629cadcb999a4a7401d501a2a8c38eda12e4a0b144b7b7765ac7e04e9bf318062d9dc530cb4b9aa650d01629f2ceeb4b462637cdaac695b75ef0439363b068f1a2e24cfdb499fb40a864e28ca1f8405bf3c3b036b25b8e27102b583c4ffa79018975117ea9c911d46ca8f4069a98a696cc4e455e3d1d9dd0cdecad8e9c3cf6132bd97c132863c47a77c8186cf8898cb8c56e6975569b61508d8dd8994b3629ccdfe8333242c0385a690852beb733b8ac71e68be82d58044f9dccbe1fae9f370c385cb502ccea548e310e5b8c1145ecdbe6f3988e406f7af1f3d910b6355c4cae95013c8008a5438297e755025e7a5093d4f79ef01b3a04868ecdd24f5abf256a80a5632d22ea6130305656256f07653d7086ca85bb300d5077752df93d48467b3fd935e7a28fa2533d0701586bfa10806b1cc69653289477213cb4736b8dd4c72c19cc27a60e98cd61076e1a82ce8724e51c98eb3fae400481104bc4f4e61342f83d841feb77a20777df2b0db76de686c5800ddc8af24175a8d8d3a926d45f2f8ff4dbc802c5ae0f427eaca9039cdebc417c6ecfd1a0ecf864db3ed6bd56fa5865fe0e12c0737c54bc57d71b093e3aa36fa74e80ff855917d8a5f5ef118beae2d8387bea44a1116fbe15544fe19abdd3f21eb8c78ab3906967ab08c69a41f4fffd2a9da28156c3d4c5a71708b4f5025b40f278cae65ee022466fb031af5e52c4c45dec0bf6ab6ffe4f4b0d67f595c7102cccca88045a37728df3c2f9ca7a72863a10b4c2278c02bf1f8cd16f6e61bf1d41575a7380107fceabfcdbf7d5ae5e1db396017f96e196db77cf33de8ea5cd78749ee9d9a219dc3f96833d3cd2ced91bb3721f55e2b6f28461eb66e038c5c0cb243ce253ecddb88c32c40cbfb465a7096cef69bbbb5661293eed9ce5c483aa0ac82399ef6dc3058e214531cb529b0ccea99493f42f6dad10edf29c2a65339d468ba4e7990056bef4348801593353352417e3e167bb7af0590beee112fc212990c2c5d79666e4f9fdac98158c602b28045e30d2119cac9f242a175e4c31341904379a6c5d18c1771da6451e60c3ca9e32ac444a82bf4981d07ecad203cb6a4ee49a7767f59e2b478318eda0f8af35f8cf4c136f714ab1c3d7aeb45c1c094aa60dbfa4e1218dacfc3fdc1b316d2be171dec26e09500fab0a28482ae076ae977dc4b2d43d5906c327346b8498b77791a249d6d51fecb50186378d39e1e1a3c7a2e424fab3b9ceb3f3b3b96705f32da26ecf190162a57fbc2257fa896c97bd5c4bb53f6338ce09c77bc017fce5b489dad7d3a237209ccd8780ff3a91c7410b91d7450342e08187ba132fd8647a8f8a59f85f6110870577ca168426a4a98befd34d7264a579c4a7d2a914b530f5bf1b4b743c969564b906b2629f927c78bbfcb5617d3ace5bf4d3ae09e799af4a8f3a85aa7fd574acbe6fdb580c20d6005c395a35b51b01be8262cffb83ef9519d503b65895524678ee60a115d82dce614d047fbf976c0e61bf717fe3d7f1077fc0fc0fe924c5582bdfd9d1e25d24c9c691b0b7fadc1d24803f4fa9bb795785f30e58618566c8c4c129d96e75f17f5e77cb91cf00954c44d8ad01919de4305a3cd8f5c3cee9f97df7ab43d521626a44a2a6190c06083e95904b7a29329ff089bcb42d68aa278ef3545a5403929cac99863a42bdb715b2b3403c4a2842f66a07747be35ca1bc9dc0e2ddf2d7c1697eb849ed562c77bcdf2ae0da1d0c62191bd80af8320814e39ad1268541ea963a9aaf9ac00f0dfd89f88987b37d79e14632aab7a48f69c0882ca45fa4f7db8c1aa5dea8651737ebbb5cc5d90b08b31a8731667fbfaf625850de0429afede98f978b916e40a2340ea00a7e746193f0f7f55942da2efef7f8bd93dc3c9d5fc8791a93dacc862160a7a258264b526d0693b47acccd25de1e77cd99f04d75b45b55243ca58a421222b229a27c3b918b2908f9ff5751b5831ee1cfb9fc434a7017c76c93cf7313438e9eab4ceca77e59a2e20f51c1a40156ec5c766a2c0f998deddde8821704f72e2a95e97801bb85a4f830193f60104aa53d737b08c05dc2e1a1bc55a805edfd44ef2de967bfba69bb885870da7326726baf87a25831df78d5a2dde28e3f3482b26639f74e6c9503c54e6059d14beed4843d9549ccb499be275756720cfe56974eebb287aeec4103931878e0014f44eba6aae3aa8e99dd4fd8198e7b5fc9f1b73d8e1648c14f642099d82d6ee289c0e3b69b0b7939b2aff00625abba1d3c013ac9eec9a47d9836368a9badb62417615a9f1e631669ae196d4d5d618ef1fd191cf448b89e574116c3ab48f1acf794d34eaf5d7eea11ff1364ea3c7697417a252dca348f88173bc17a3335b10d6d48faba19e02bfd6fb275bc4b33a645d6364caebc7dc8821278535b95021eb2a43d2e7a93787fd717ad7b69e96be79f60a8b97df19a4bc259486da0b56895256de53ba4bcbcfb0c32c1d9604dd46b2e938f8b69bea2c815b141cc110b0579adb69b1c6cb76526ff93f986d417514ef11d1512e06d614de8c8800f2fb9a3d2a3c3139edeeb31664e76473647a5989518d3134195016815e70c2c143ce2e22041aaba657100c6725e179c02f67457140b244a10ccef3f2c56277658817cd855a26594351a862efeb280aa9453068cdcd532551bb650e6afcb7f9634a2d056af75884d4ea7e055db42342109e34788c2d051950520d81d38799cbd942bed6695d1f22da9f9e8c74614959b32957311c6b5a4a81d213aa88aae2c8f1c34baee1b3f424b3b8579257a2787058d1fe8e7812298610f7b03a4828b67876307867fea544355eacb20473f7e984015c257a36b80e08caa023b123daa1ec1b9ebf7aededa20c3fca1d702027d404a79312df54ed209fad239c33ea81f53d3cafb03fcb328b96777b315f2d9da94885858e572209bf352e80b108921dd4cd9bb40708678110a16fbd9b318485d428f9d1b50a9051a8a491983f5e025c10df66d58d8e1cfda955b4c7b4ba077f8f6a4007f6363b22ed764d94c1b9583297445f7e8aa33abfa1ba9373e310a750a9046a92d8f9153f5f48c7278f58c53ff973b630c3854202b5d20f3550929e053a7b9fd2e476fd056880b1ea517e82047fb55503e62cb243c791e9d12b7cdd46c060c55ed74a81555128ad4c77ec9ff99a720331ea79a935a29ea7398cbf75e97e765561d1cfe3b5debe053140f567431042787e9f94d7596f394190e21e592b43e5de1bc7b57fbc108247418281e03b1d97f4df3368992e73bf2d4a385fd6ff1cc3993ead511a808271d42297936f1fdebed63e854158ef2ad25368dce70f6271039987352619ce22c193c446c56401a413a2b7d16517904335e3b897959ceb9a916030534458ece88152ce2f7fc76df658c47ef816ebf9c211e728a26706d4f770f4047050b7a6b46aef9292a1aea52ee786f1382af9cddc8dd24e929366ed2aa8d5e377395f25f86f830b94c5d0d3d857c32e1ea37ad4bee269c9d07810ae8c50c87faa6dfe24550976620821811655a15e8560b9f1933f2d2c454ab0456bbb0578d42fd527536993c3b93f9f62acd0ea65823e4064eafc2b03584363eb1ef35b8db24511c5f581b992ac24f5b899fefecdf4d347a5da86f1d2454d43fb69a" + }, + { + "access_list": [ + { + "address": "0xd16672ea333e0a3f3c16849ec2e454c5376359f5", + "storageKeys": [ + "0x7e0e31340216539f7d418855a7076aefc69541f8d4a476bfd441ce1d4d2b32fb", + "0x0bcf2a67cb95803344901c75d7133eab9be683f7906ed41b6c38f2e522488043", + "0x5e101414916d282df4756d8ffd6ea66555753339e3174853a8d6edd9912e04c4", + "0x35bdf40620c4a838696343fa3ddb323bb9d9cccf043812ca2d82528110f350bd", + "0x4eff526cc913072c45037cc654ef624e08de226dbab3720f49e7e5a40a425454", + "0x46d35f365e41726f82165f32ab8e0f72c7bf23e5a49bf6fbf8063c159c491284", + "0xb87c0e8416f59b918c97cf7621c883ca202eaca33ffddb4ebef63e4e19a2ba72", + "0xe11a685472535a256bb213e3c817f1daa048557b1fd39a5ccdc4106c72361e9d", + "0x1966458732440bebc595abc6804099d6020dc0e9de604a9655e2a768a92aa06b", + "0xea8669b961fa9f12be2fce4cf0f9cf596c29dd206babadf2041d8b1e871b80f8", + "0x269897d2116e2446001252fb7af45005fa6109c2f29bc1ab816861e63984fe96", + "0x38aa86a670972032a2b4cf25061899c43746adcbbefd065cac424b5366505cd3", + "0x62f7d6500c7bf549d287ddec2de21dfce4bac7ccbbb5e103ff702a365838b1f8", + "0x6555fef8b30084ff128fed79cceffd37dd91293056caeb861bbe63a81eb6cc39", + "0xf350cf25300f3c66791124c7bae51d91353baf584881ade44d857bda9d23b067", + "0xd698e6e080b05c38dc33495231bfbc362ab4a4e3bb1b436c12f0e4f7856ee9bb", + "0xdef22a87e7b51c5afe9721373a003f1ecc6ca3ef1b65100f47dc0fdd9b706b90", + "0x689280dfb72e33a343ec019860cfb2c59ead73d61fd27ecd053d537a5e3be0ca", + "0x1f38fe2e982150099e90a07256f341034aa3d7d802041d1c10fe8afb00cb9b81", + "0x1636bb289aeff4e3d10764448a0b4bdcf6d05f62e863b26da96124de87ae5447" + ] + }, + { + "address": "0x38d7fd09e6137f287e1cc9a1f60fa9b0a7eed6a4", + "storageKeys": [ + "0xea8482b6d2f3c1ae98491112017d7e8d39afc0b5f19e97c86da09f9df01d340d", + "0x530a8f8103f7ffebefe3ecfa95eca3c66a6e51067bf695d356d15482eb7652e4", + "0xbeae6829b0ecad1c2cdb7eae0d57d2afa1529706f9729e9be65f187acbeed495", + "0x64b6f047e2858c9c15e6df86650c2de757b1b34cf17f896e9ad08b746c6ba016" + ] + }, + { + "address": "0x7ba9c03444f41d0dcdced0de9c44033bf8f75095", + "storageKeys": [ + "0xbf3c5b2fed79f6f13c882668d8dc169d1e83713d76c3cf36ff0b2fecc83f3b34", + "0x9610d3763c0fe8f4a27d60da6f341af183cf787b3b9c598e380af4c0d5c9d4c7", + "0x5367554f5b1de33f2eabb5f720f4a7c52b639a5552b58764bc9b40d534e2974c", + "0x5b24de37c6638a65aec658e37c13c75a496d18a7620c23c6219afe3d8fc1e492", + "0x0c13b39003b528d5f43e5722276ffd69ba7657e650cae52a580b1627bc6745f9", + "0xedac3b374bd271b55230e44c967410c6b3af827e1b203fa293cf1e6bada7f122", + "0xa34ce5167a218fad321f6f09d70df5930b7b0665f76c4762b871a61b3906a502" + ] + } + ], + "encoded_bytes": "0x039505d16672ea333e0a3f3c16849ec2e454c5376359f5147e0e31340216539f7d418855a7076aefc69541f8d4a476bfd441ce1d4d2b32fb0bcf2a67cb95803344901c75d7133eab9be683f7906ed41b6c38f2e5224880435e101414916d282df4756d8ffd6ea66555753339e3174853a8d6edd9912e04c435bdf40620c4a838696343fa3ddb323bb9d9cccf043812ca2d82528110f350bd4eff526cc913072c45037cc654ef624e08de226dbab3720f49e7e5a40a42545446d35f365e41726f82165f32ab8e0f72c7bf23e5a49bf6fbf8063c159c491284b87c0e8416f59b918c97cf7621c883ca202eaca33ffddb4ebef63e4e19a2ba72e11a685472535a256bb213e3c817f1daa048557b1fd39a5ccdc4106c72361e9d1966458732440bebc595abc6804099d6020dc0e9de604a9655e2a768a92aa06bea8669b961fa9f12be2fce4cf0f9cf596c29dd206babadf2041d8b1e871b80f8269897d2116e2446001252fb7af45005fa6109c2f29bc1ab816861e63984fe9638aa86a670972032a2b4cf25061899c43746adcbbefd065cac424b5366505cd362f7d6500c7bf549d287ddec2de21dfce4bac7ccbbb5e103ff702a365838b1f86555fef8b30084ff128fed79cceffd37dd91293056caeb861bbe63a81eb6cc39f350cf25300f3c66791124c7bae51d91353baf584881ade44d857bda9d23b067d698e6e080b05c38dc33495231bfbc362ab4a4e3bb1b436c12f0e4f7856ee9bbdef22a87e7b51c5afe9721373a003f1ecc6ca3ef1b65100f47dc0fdd9b706b90689280dfb72e33a343ec019860cfb2c59ead73d61fd27ecd053d537a5e3be0ca1f38fe2e982150099e90a07256f341034aa3d7d802041d1c10fe8afb00cb9b811636bb289aeff4e3d10764448a0b4bdcf6d05f62e863b26da96124de87ae5447950138d7fd09e6137f287e1cc9a1f60fa9b0a7eed6a404ea8482b6d2f3c1ae98491112017d7e8d39afc0b5f19e97c86da09f9df01d340d530a8f8103f7ffebefe3ecfa95eca3c66a6e51067bf695d356d15482eb7652e4beae6829b0ecad1c2cdb7eae0d57d2afa1529706f9729e9be65f187acbeed49564b6f047e2858c9c15e6df86650c2de757b1b34cf17f896e9ad08b746c6ba016f5017ba9c03444f41d0dcdced0de9c44033bf8f7509507bf3c5b2fed79f6f13c882668d8dc169d1e83713d76c3cf36ff0b2fecc83f3b349610d3763c0fe8f4a27d60da6f341af183cf787b3b9c598e380af4c0d5c9d4c75367554f5b1de33f2eabb5f720f4a7c52b639a5552b58764bc9b40d534e2974c5b24de37c6638a65aec658e37c13c75a496d18a7620c23c6219afe3d8fc1e4920c13b39003b528d5f43e5722276ffd69ba7657e650cae52a580b1627bc6745f9edac3b374bd271b55230e44c967410c6b3af827e1b203fa293cf1e6bada7f122a34ce5167a218fad321f6f09d70df5930b7b0665f76c4762b871a61b3906a502" + }, + { + "access_list": [ + { + "address": "0x87c3de62f4c65310ba3b7eece046318ffbd2132b", + "storageKeys": [ + "0x87a55c2f68449b4009b3c11a2888100e91f1ea135462cce3e82b2fbe7ea4587e", + "0x753a6ba0430ec08db12585193f0c84ba0f05e45a1e42cf9283235e13832cd363", + "0x6be20d460b2d3f0e116c12bfebf57649a0bb037262ee179cb6819a32b634ca31", + "0x6b44ff7ec003716b9e71730b4402e93b5a3e353c02f70757ed09a9ab830cfce8", + "0xd311d106c19b4cf3c0e9e8fa9d23cb336675e686d2c71b6e234e8c98aabae460", + "0x6caaae4d65e889fd89930454b76889cd895857e6826f11a32dceecff16462f12", + "0x2d26e085c56dc0301ff6a782a8e4c3c3557b0ad91742925fded00179bd082f33", + "0x5a96a41f4c638bbc8823f5cfa4e6d6b16d22dceba3b5107f66b59825e4b427af", + "0xca41d2aba2126b6674beaa01a1fd014bdccb0622b3c513ae62b9acb172b0bc15", + "0x0cf9f0913d240c3cb4ef546b37eb0612852dc40555040e583811f69edb49c102", + "0xa6449ade6b1eca540396a0b2fd9346424525f7a4d70311490ebb8072c87a9dbb", + "0xfc137c562b2a9c98f0df7d590e59562c0a12c52073779b25e4ffd6e6d7afdab9", + "0x48a27ba444db932e65957eff7ba2a2599e7b733f75d81bb1755a9b7beeeb28a0", + "0x1ca4a880f4013c126213205651bf43dc4699ad59e0ca20448aba63ef120a6974", + "0xe5699c79ef7426f5ce1c8b447a906308213faa3e1a8083b0f960fc74ce0fc154", + "0x41d50aff43e0b112bc0623ffee4e55ff612719618ca8675aa22af0558c632faf", + "0x36521eac364b15393c05431d60b9229eb02e112433a5bcd5cff24cbe5915207e", + "0x8b59d4a09734a130d03ac8a2a772a700f3af5592a49056675249df60faea8663", + "0x58e0b36a4eaa7530ae53c514fa706f099d76dfe88679cf1d0cf19798363016f4", + "0x38450fc4b3598d9b7073f3501964b5fddc39725dd80d26d4c5382cdad7f00f49" + ] + }, + { + "address": "0x336931c92da05a54bc90be949e0c1ff3ea5364d0", + "storageKeys": [ + "0xd3f6243cf9996c3208af5390dc2e7fa94001b7cb038a9d18a09755e3266321aa", + "0x226f0e089988a862a05ab3d2670357a3c73ccb05b434748cf306290fe39409ad", + "0x18cafdfeec53550dfeba16b259c63c0416668106579d8509447d9023061da0d3", + "0x1a5101b02ad72ac3946c2a4f47460791fab12db7fff6c07265d416c391c5dbaa", + "0xd8dadf236634b85cbd7521017cd84b9e6987da4a96593e203fec965497dad8ce", + "0x750e24a80b7c5b258782fceee146a53eb4d743d50d0a80b612986eb2737a122d", + "0x7b10043486ce84a568f13ccfc1c8507387eb92a9ef6974f20e8356c2110c7b25", + "0x8857f47e933685a27a84c00d3aa93689736a8e0131f81863e58e4ab327e66e09", + "0x2cab3cd2082b7259587ffe66d4a41e022c34af7d01128e713739938ce73ed1f9", + "0xd1ceb9a20876f3a3c04268161d4ac0f9f0c59badadf45a9e54d291de3e77f137", + "0xdff333e98733363b69571a92b2f8e8d2f024d8f377de6bc762e79e4eb4644669", + "0x53a6d4f9b5112e0b900464311c59efe4e89ddfe3f10ab7acd26a5609008f2b5b", + "0x2d9ce4ff5174c420f2d464a9199d81ebbb6becd6a86c31ab35b276eb5f54aecf", + "0x10cfd50ef0fba81914c94e813828e622d0be40551ca27b8ddf7c782f833c6f0e", + "0x7787f82f232bc396776f1091074087d9f78f71b26343bd97b180ef2e961665d5", + "0x1c11e9e83af5872cdc40a425a3a7524fc8926bfd70e1b3a7b2b93b3e02e328c4", + "0x45258e3d653556b1f4a4ddcd4ed81059f7ff7dce9ea434f4299c23074b462eca", + "0xcfe36c90cbb4dfffe2caa47f4933420ac65164791702f846d2248b85257e760c" + ] + }, + { + "address": "0x8ebf07a81653b141902aaae46777547ab4c66186", + "storageKeys": [ + "0x03c1ba22d3aaf36fd56e010ca07723f2bd0c2bb935a5c7dc22c3b31dd3f95c08", + "0x94a80fd77d8c82bece225d4347a0468a6a49815710dfb50127c7d6a904f3b7e7", + "0x9b54ce2a97c1d6c936e21b11c6fa4d6a601dc68cd32b11a20948c7271b556179", + "0xa7d89f800b37c5a21f46fe8902a6c6c8891cbf4c9e285cec819563363fae20b6", + "0x4f4981ce4d554bd4582630c47bfcf9789a0d64907cd45bc214e3b5615adfddf7", + "0xab2e410c161fb5399cb8aff9caee529d3c1007df321e98fc6ab911a6a34d05c4", + "0x907eadfda7a55ca7b862fca2cca8193991619cda46c1b6f1f6a0abd5ccc6acb5", + "0x40b3fdaa6dacf5101530dd1dde6ffba50a361adade3ae2797ac419a9ffc04c2c", + "0x7547fd2752763dff78d93f4cbbd3e8c62806cf05c753c3a164085af877538747", + "0xa17aa5364056ca52d517497f03ed7c40360b3f0de2850b584d038ec2ae92d950", + "0x36edad4f5962a553d7bc46b48204999f42bee1c0aa52c8e5cd26ca0221f21193", + "0x7bb34c65c8b4327b73dd327727df6a616843ecddd3d88a359eff9d8d41acc0ab", + "0xb360ef358a0019207831e8fcc12829b2688c6e4a964c8f822ca94002b63a4640" + ] + }, + { + "address": "0x0a83554657974f2f3e752de497159d1856775474", + "storageKeys": [ + "0xdceb7fb2d8ee0353a9a8ef5f8a5f920156db8e41f6e689cd1236bf74e7d795b8", + "0x3d06ee86765443efad3c61ce853af9feb23fa7f4f3db9891af0072462dfefc35", + "0x5a69c7770ad76cdd9352dca7744d2fcb30f1253499f3cb0aee2eda480c100227" + ] + }, + { + "address": "0x36317a013f4589fcb918d655d3fc034ce6ed8b01", + "storageKeys": [ + "0xe1dc4c34f68b85aed808449e126752e814b7021550690b8a6880c0f3a2a9c3a6", + "0xf9fc31bd86d58e678cf36735500e45d4b4f371c645eaedaa8e0c5d9c42991300", + "0x9e428304ec157e406b1c8713d421b927df89f9e1283c9d89f11a3d2ada09224b", + "0xa01265facf003516cb78d6382be394737783e03b842c1aba0955d498afa921b2", + "0xb4c72e24374a72e1931cf5ba5b9568882783ee0a0bb8e4e9c1dd637abf14dc09", + "0x6d0a20693ea0faaec00354898e11ffbe512dfa5197123058444df2c80c6448d8", + "0xdcdafbfd049aecd39118c2ef167ed9b76480d22b8459a1f5d23da6529891767b", + "0xc14f9e15d735b0a0dae62032ea9f12f71509558ab7131ed04d553fb62b007515", + "0x723ac770ac5e685d1430b83b249252265983fea4c7078dda4b213afcef3d3df5", + "0x6caea4b35ae2ba6c08b5295f602f22593545ae742a1e58d52fdb1fbf3a807a84", + "0x95c985943807102fd50801c09f0100e5012674922d4be3d02cc621cf70ef413c", + "0xb37e12322a239c3d8b7badc8005a3807069afb747aa8b5d55dbfd4127ffbf5d5", + "0xd558ded329a75942c5aa7f233e876dc7054f721eb537f6bbb88bbe4df9fb9431", + "0x9aac8f272f702304be6cddd69ec1951cedcc565a9ccaf9d3ad0015bfd200420f", + "0x36ab65c1b6d441ba966fa37709ba6c4324e1845b0169a6fab11c7f0d10ced651", + "0x5b42eb2452f8de8da048194c225b21ab73db5f21b2c64b43e3ee60cd746c965b", + "0x67bbf93f272a0ee010d845f68654b6a41f61d17e35dc8e94f41b6f42c7150c73" + ] + }, + { + "address": "0x42c638abba952e586554daec2af5c86e81812937", + "storageKeys": [ + "0xd046df9cc6f1831b3ec602669cefb435fd3fbd36d34a53158204dbfb96a3ef5a", + "0x729264bc4f0df176df2bcbc2729ff72f304a9e875ad081e0cd8ffea757ef1d3c", + "0x918dbf9f61b03f3027f2ba0833429fd611d7b238e581e2d46ee431856c9fcb46", + "0xcf5b572d3121d2020129d5bfb07d4856ebf75f502eb80ba7843e4fd082affb31", + "0xd8508e334f31f519101b03e05e80b95c5818dc4dbcc75f32806d950552a610d9", + "0x015f3c62b976ef5c4c03d1f59807de9f2bde32d6e29c02251ac3217977c8d0ba", + "0x44b59db9cccaac419f401c3e8797f237824abde7f3db639e749610ff04f13510", + "0x74c5b8f05a04bd9f3a3db1d377c769d79a6b27b31e23c63dfe6ddeada91089be", + "0x99302ad5a9018a3c38643fc81241530acfd64fbff2f0c58fc1aab4607f2595d7", + "0xa361448db8f18cb5c8790c3a10f34c0d46163916540429578028687e3cb396aa", + "0xacd920d35ed2efe151a359dee2cf211da8a0ae6567216b02252c9382ce6e0f1e", + "0x6e6c00e824891ca4f9e8615baa834385fc2debce6ee96ef819b55821147b6a77", + "0x81780c9aa506e645e5922a33da9df857accabca617e18fe0c59f92b15c3ab513", + "0xc566e93d8123f1708c6dc15ce12f1b02d0a8cde23195b282d4f423d95f3341f0", + "0xfdca6da853f082bd956e61cf3c3572fff703391cd0efdd3f81adf18b6a55471e", + "0x324c242bd392cfec4c49a64bebeb24af97be4a151b8c8ae9bf69b8a7a98763e0", + "0x4dc01507e0a397b7311baee5e915903f705f282af02bd689740b87b5c1d591b0" + ] + }, + { + "address": "0x59d9200c8aaf805c20ba3eaea4677fe3ed765f3d", + "storageKeys": [ + "0xf3743ed67dd5068496fbe6b9d60ae2a7d616968f4e27e2381559eb9cd9ae3b6b" + ] + }, + { + "address": "0x35aa5007eebe3e3361f430d04448b5699e87d02f", + "storageKeys": [ + "0x713a7826a29a3ad4a5e28e6173bd7c8d5e7584e5313772e129cd037d0d4f4a08", + "0x88843737dae9e3ef1c043a42c3344a2dd8812352f3bc29276ca78859f63aa13c", + "0x708fdc0de7dee0ea53fa5cb20509df51e2b4a705284909d17cba215a9b7fcca7", + "0x7bf12b6524daf13fb4d14ac70ca864ca6b231c6d325bd469efa836f512c64fbe", + "0x14d0139fa9e6d3869b603e5e85645307901f8192bd437c359b5c9e27d051265c", + "0x515ffd9b9aa46c516741efcaa96c73cfdc3f19abfc0d485a7ea268cae9e27b0c", + "0xc2c437c6f712a450f93d40e684e8abdca99c0c338cf53c1f63971eda13f5320b", + "0x77ce50c1b941f94614b45c47cabf5b2079bfc56b083a78813ab237963d9027f1", + "0xc3a211d3a9febfc8d3ab553467ef77beb34ddeb0e7571b9d7af97af68a355924", + "0xe49eabfe0c6b13e873100edbbe98ef67e7e887c6b60983a7d24e8c9a0dfae1e9", + "0x2b966a041eb801ac0e61435f21558add884e3c1d34a708b20107bc7787afda21", + "0x27546b592b0a5fdfa66ce52a457d09d84e64270e5f50cea25fc6d7c89c533952", + "0x9a8d8bc39cc7253de7a278e00383dbfad6c96d023fad2bcec93783a2a5fff551", + "0xb00f3aba282bc429f33638cd9a316b1321cb79f159d0b6e92595306d8a845169", + "0x3464c5c99c5cb39821647a5b2398ec6b6add0b51285dba083c94a89780958960", + "0x7f57fd05df1298c95cecc914588f11c05c3d831d8affcf8a19dced7f87538274", + "0x770033ad94d29987ef11d48aed8a3f6ce9550929ca664c8396aecff8d13ce9a8" + ] + }, + { + "address": "0x2b9ab1b0846c54a40666d585e1d33ab7b05cdc9d", + "storageKeys": [ + "0x5a326c57682c270234927ec48811e46a7234222b24fb123872106cff3891a2d5", + "0xcccbb0f8fc580ee60cb98fb29b99d2599c740b21b1120851305aeb54f807d09e", + "0x36f522f41816b83d84bd4931d107c87e44fefe642c6fcf8ecf4e27e7fbbfc03b", + "0xab1952f70a9f8796e1b8c17198b72abdd08d3f850451fd440a01532091535002", + "0x14f1c464245f735ec38739c20f3456a762f2acaeaf03fa12d121a52d3a77ba6c", + "0xf4290cc607db0f896821848633a093cd99e5860783d8242829c5893d6349a86f", + "0x0cd7412ed5e3835fc426e037b151ae79fdd43c30494ce4c24587e7ff188d9209", + "0x9a77da75e1ca1bc9bb754ceec0cc98229c7b1d48706c1fcb69b3e637ec642282", + "0xbec7bafb5b79fa8ca776f3e019cb4978336deae003827c6bd1142bd6c586b83d", + "0xc99b41b0341c6388f6e90586af08fa9affe7552e9b1cbfb74337f752ab07557d", + "0xb1bcfe9309dfe36747726053692a78f782fe46cae76d4bd77375f37108600d0b", + "0x31ab3d3d27baf4b88eb849e5427798117b9ee1cadd438c41811da921cb83c8c9", + "0xf5fe8f7e8d94089eb0f2b5a78d0c005af9a4760d4065744d63c06fd9c129d869" + ] + }, + { + "address": "0x371b091cd5049cd5e11aec5cdd6d3775649f74ba", + "storageKeys": [ + "0xbfbba0ab2008b07af29fa19a20c2d8700c56a89229fcebecd9e60e7c8909ebe8", + "0xdba4cdf355606f72966c2316cb47f3bfd4b2639b3a4ed1c2e9a9da3c4066e064", + "0x56aa3c65ae89d9c5006baeedbfa30445b3f5127d2c2f95ea185c7193dd9f8a1d", + "0x8c3d2103b677bdaa788df2e02da7674bd0bf6ae9d9e8886dafcf13898301791f", + "0x201a22903d79c16f2d614d4ca423d15e6178c8df798def33d0868b67c1b47e13", + "0x5fbe39f64feba5275db14db265e615f018f9c7023f6ced81ba7f2c5b8aeb1937", + "0x9fa0268044c73c22b9a3ac8ba1fe4596437292d7cf2d8ab879a3de2163328edf", + "0x62d24ea916ebe55a7fe676cab6bfc9af58f20ab2afefefd0753b868cec108ab9", + "0x529a148df83d567fbc85be3239429c817129c1a7b72d4006a4df2d6084a56a72", + "0x8ccb556d9b8b3cd800c3485e69e77868cabb86968fc886c64b3906beac2b009c", + "0xafbf5028254048c1baf54f307c36abe322e28eaa1848209c307a6c9bef1e0dab", + "0xf6e363e906ce8b63c483a9c5fce0662f3bb863b41dc2381eb059b6070ebe84a1", + "0x819d930565531dcd272e5786b68171e538ac74e4d566f971ccad45566dae042b", + "0xade40c8b1c762f0286213fc8e62ab2fb94e5c91e7f1f70f1ba89cd68ecf5908a", + "0x59fc68cb9c1ff6c6adcda39f82380cf1bdda81936a2436063daee9d3d2be9830" + ] + }, + { + "address": "0x1fbcbc45822256c381236c747d673dda2e860cce", + "storageKeys": [ + "0x13c8a9278cd497fd030c005f7399ea1e77354802151a10e09e10095443c619a8", + "0x37cc1e5eb7c19fc856a2d98f15fd63e061eed268353b68569f20d87235b378e8", + "0xc00094a47ac11acc1a3141b79d303bf7e7dbeda84d7990521d6d7633edabe4ac", + "0xf2b5b5ee099509ab1a54cb2d9b81d38b8f1dbf3ae1b809e8911a47192bee5a75", + "0x59209f0830c19d1354b8d07efc6b21c58d751e0257fbd3ec146f438aeeea0bcb" + ] + }, + { + "address": "0x1fdf36eb007598ca6ea6c8c65cc473d415393e61", + "storageKeys": [ + "0xfe786fe9d033de60135375ca87a602476044bfb12139aca88d9c7b9471bc6103", + "0xa7a4997f40086ed7e91e6f84831721c624f606244d4f78a57fd45702aa1fbdad", + "0xf8b2e898b8c75e731c12946fc855ce0925d0712296fc324773bcc6102044939a", + "0x74045eb1a09d6b7ec48e908667c8be9b286dd905ac9ff96b75faf467c6955f0f", + "0xaa81da92e736858eb8f6207c4825e2402ade740cb6b8a77f224ed48229b5db0d", + "0x6ecb3587dc81311eb55d13c064d19fdedb2d7438fba69831593660903de789e5", + "0x20493f3aafa1e02a8d09828b8c3d6e52d2ece1e7d780e6091e3c05e6bb119f80", + "0x9de4dd26b035464709d5d344153bf5f9e73b6b4bea598352bf9eb8c1e30419a2", + "0x87460ce9bf13e450e34db7ce5bb9d5fde36a77910d360fbbc38117e56229656b", + "0xaa2ab59950a5b022e504da201b8dc9739b285c8cee35a1b0b048ab049c9af6ca", + "0x92907b9aae67519dddfedec216bf2142c2d7903b41eb427b8f81a5273ee79192" + ] + }, + { + "address": "0xd0bf46cbb912dcaf63ebf54e136c404f70c7bed2", + "storageKeys": [ + "0xdee362aaec98aa9af7db3842f2499477ce41753bf989ae0f5a7343d6ceb17d5e", + "0x3050bfb5d1341d3f9ff4da1c2721fa0de0d157a220f99244e2ed65edde1d8331", + "0x281db71f2d969d6853b204e6ec1a41fa98135b0f7785a53038879d76e7fe5416", + "0x93059e272d5cd97292092448dff0f878ee5b3c03834b3c93038dab94fc11db11", + "0xcfdb5e114f8bd6bbd153558e367d6d09cfa380a3d488cb03f69e00549b7ee309", + "0x8b2c0dd90abc6b408724c1fd6c399a589413cac5717b510ff8be9b446ffc665b", + "0x7f3917c088949b4c4a1bb2d70bec4e4ab2604b3219c03e10e383fd7dd508997d", + "0x076b655ba13b447227cfc35a85905dd639f5a07295aa5097c34405805e2ed221", + "0x18c2bad0fd6251fcf136016136a4eb7b976946c82018a08445afa23fe55e094b", + "0xe509bbc4c8714d77ad471bef9bd5f2425118edf45e3b26433519ce26fdc694a1", + "0xef1fe74fdb3ad9e3ac3981e0d566271b6f0c898517415e1db8efcb5405b96e87", + "0x5d299122ce0c60b60663d500e857dd938cd85923452de6d1d6ad4333481fc6a3", + "0x7ab5c7f68b8fa1f6ca4a2e91e692170cc6e622e74177c0cc0a78f9983db0f079", + "0x481e66969bf4d7c73a8798c100d20865decfa96bc4756c91290c926942ab99dc", + "0xa085495353d7bd22292e9752d49d58c37f497a52d6295ac4fc4c80e4830eea78", + "0x2ed18853890ef0458d72ed323acaba6a3bd5a6859471df005ef74fc92a8be5a5" + ] + }, + { + "address": "0x772e7ce97bd44345dfb024fd4b2d27e54e7c7b7d", + "storageKeys": [ + "0xdb79c6264610afa809b5ff5ea46c19accfd2f14aff4d6739ab2aae527f3a6eea", + "0xd92d8e7cab6dd391f1a6d6266fb44b117473032536d26f47227dbbd81794ed1f", + "0x714cbc3299ddf6c94fa064f7669bdbb7b5449f15a3887fd58e01dd877639c825", + "0x63ef5371c315eb5bb90fdbfd4269742415bf521fba095cb09bbd4909b1db043b", + "0xbdfb0e06d7886e0831600496493f0e01a68087688c4110afe9e813e4a24bf4eb", + "0x29cda096f9f37e7d35a8b9bf165ab307cc5b592e98f37e3238079ae4006bf5a4", + "0xc554ec6852cfae497a73f02bb4613501fc8b1f88f15db5db5875a02e83c603e5", + "0xec5ed8bf1272f09f3a0ec34084b5dd5c6db453275588c945dad2425ba181cb90", + "0x1d1cc6cee4011a29aad6677107555df8f45430420bfba8eea73583a2e3aedfa2", + "0x117ced9ee78ad1c6583d243eeba54378fc1e310833e2a5d5ffb9ae108f4a233b", + "0xa746f816640e0c19f1c1a5bc98d4a89b9f9edbb423c2f4f348f0264554a762bf", + "0xca52aa89b20832199339b31c2c3a18ff58ed5a5b8920ebdca525132baf1020bf", + "0xada90e8366a98f1342c2dc651892e0d2e23c248914e2ee85fb36ec70a8757cf8", + "0xf28084d435211a6646a19b6a2e2ee79fc6f6956d6005ee2a1edcb71c325a143f", + "0x5d6b26131d0ea3e4d5f23ce59fcffd6f61f8a09500e741740157e1d9e4770cd1", + "0x86efd60e8d4c41c4eff97d018d9fa74bc4ae53073207a3aa7894286d5f15f6ac" + ] + }, + { + "address": "0x34ee7a3c20ecb4899c0b4159f22d9d455a7888df", + "storageKeys": [ + "0xac1c42ceaccc712293cca11bbeafbb8dae54ee11873eb4dd491506ff59e975d0", + "0x9cf5b0e47ba3738b91d5a931f994d7db644eed93f2528a65106ee2254ac9252e", + "0xb676afe00ec32afa12bebc06c9fd69d16cdeef3afb5cf20d248c83350cb5db58", + "0xe2b19662fe3be390fa19ae6bb2e4b8510f3df1f68fdf33f72670cc886fc9dded", + "0xed1784d9edb9e5156974d4aeaa14bba5e3aaa7955ec89f4a2ba90bd8b253b162", + "0x4dd29c8285c440d447246685120200012546094c9b92fab9af8c9c8380c668de", + "0xe1ebb48ea63cf5d0a4eb778b208379930255bd3a848c9e5b42b34f6483500397", + "0x6b72cf0903b7554563dc2cb7ffd53eed6af589c3899c42327d4308239a296701", + "0xcb71f5e2533a5aa8a1820d8598466cd636f9fe2d09a59c22c7c877248386dfb3", + "0x5244955ee964d09347b2c25bb5174994dc5fb9cab7f83c449d7daae1ec977839", + "0x5d66d9d75bbf1336be390dff3597ccea2e9c9aab3f2e51a322a55b21c6eaf667", + "0x316dc78383d83ec2f967a2d8ee81d40ef01bbcfe3a2af292d2119982c64bac01", + "0x20c3e26be9bbb7b280e7e47cc70a31757f1a2151da89110f961ff67f331d120e", + "0xd81d83a5260c00d6fdb7c5612f3cb6138994905805eaf474dff09ff114d7fe3b", + "0xefd73cd41bc6f7936d7ffca40ca8a853646126db30af866c3b09244603d978ec", + "0x95f95d102bf3e224373e0487e8e721137d48ca0beb059befdcaed3e2f07a7671" + ] + } + ], + "encoded_bytes": "0x0f950587c3de62f4c65310ba3b7eece046318ffbd2132b1487a55c2f68449b4009b3c11a2888100e91f1ea135462cce3e82b2fbe7ea4587e753a6ba0430ec08db12585193f0c84ba0f05e45a1e42cf9283235e13832cd3636be20d460b2d3f0e116c12bfebf57649a0bb037262ee179cb6819a32b634ca316b44ff7ec003716b9e71730b4402e93b5a3e353c02f70757ed09a9ab830cfce8d311d106c19b4cf3c0e9e8fa9d23cb336675e686d2c71b6e234e8c98aabae4606caaae4d65e889fd89930454b76889cd895857e6826f11a32dceecff16462f122d26e085c56dc0301ff6a782a8e4c3c3557b0ad91742925fded00179bd082f335a96a41f4c638bbc8823f5cfa4e6d6b16d22dceba3b5107f66b59825e4b427afca41d2aba2126b6674beaa01a1fd014bdccb0622b3c513ae62b9acb172b0bc150cf9f0913d240c3cb4ef546b37eb0612852dc40555040e583811f69edb49c102a6449ade6b1eca540396a0b2fd9346424525f7a4d70311490ebb8072c87a9dbbfc137c562b2a9c98f0df7d590e59562c0a12c52073779b25e4ffd6e6d7afdab948a27ba444db932e65957eff7ba2a2599e7b733f75d81bb1755a9b7beeeb28a01ca4a880f4013c126213205651bf43dc4699ad59e0ca20448aba63ef120a6974e5699c79ef7426f5ce1c8b447a906308213faa3e1a8083b0f960fc74ce0fc15441d50aff43e0b112bc0623ffee4e55ff612719618ca8675aa22af0558c632faf36521eac364b15393c05431d60b9229eb02e112433a5bcd5cff24cbe5915207e8b59d4a09734a130d03ac8a2a772a700f3af5592a49056675249df60faea866358e0b36a4eaa7530ae53c514fa706f099d76dfe88679cf1d0cf19798363016f438450fc4b3598d9b7073f3501964b5fddc39725dd80d26d4c5382cdad7f00f49d504336931c92da05a54bc90be949e0c1ff3ea5364d012d3f6243cf9996c3208af5390dc2e7fa94001b7cb038a9d18a09755e3266321aa226f0e089988a862a05ab3d2670357a3c73ccb05b434748cf306290fe39409ad18cafdfeec53550dfeba16b259c63c0416668106579d8509447d9023061da0d31a5101b02ad72ac3946c2a4f47460791fab12db7fff6c07265d416c391c5dbaad8dadf236634b85cbd7521017cd84b9e6987da4a96593e203fec965497dad8ce750e24a80b7c5b258782fceee146a53eb4d743d50d0a80b612986eb2737a122d7b10043486ce84a568f13ccfc1c8507387eb92a9ef6974f20e8356c2110c7b258857f47e933685a27a84c00d3aa93689736a8e0131f81863e58e4ab327e66e092cab3cd2082b7259587ffe66d4a41e022c34af7d01128e713739938ce73ed1f9d1ceb9a20876f3a3c04268161d4ac0f9f0c59badadf45a9e54d291de3e77f137dff333e98733363b69571a92b2f8e8d2f024d8f377de6bc762e79e4eb464466953a6d4f9b5112e0b900464311c59efe4e89ddfe3f10ab7acd26a5609008f2b5b2d9ce4ff5174c420f2d464a9199d81ebbb6becd6a86c31ab35b276eb5f54aecf10cfd50ef0fba81914c94e813828e622d0be40551ca27b8ddf7c782f833c6f0e7787f82f232bc396776f1091074087d9f78f71b26343bd97b180ef2e961665d51c11e9e83af5872cdc40a425a3a7524fc8926bfd70e1b3a7b2b93b3e02e328c445258e3d653556b1f4a4ddcd4ed81059f7ff7dce9ea434f4299c23074b462ecacfe36c90cbb4dfffe2caa47f4933420ac65164791702f846d2248b85257e760cb5038ebf07a81653b141902aaae46777547ab4c661860d03c1ba22d3aaf36fd56e010ca07723f2bd0c2bb935a5c7dc22c3b31dd3f95c0894a80fd77d8c82bece225d4347a0468a6a49815710dfb50127c7d6a904f3b7e79b54ce2a97c1d6c936e21b11c6fa4d6a601dc68cd32b11a20948c7271b556179a7d89f800b37c5a21f46fe8902a6c6c8891cbf4c9e285cec819563363fae20b64f4981ce4d554bd4582630c47bfcf9789a0d64907cd45bc214e3b5615adfddf7ab2e410c161fb5399cb8aff9caee529d3c1007df321e98fc6ab911a6a34d05c4907eadfda7a55ca7b862fca2cca8193991619cda46c1b6f1f6a0abd5ccc6acb540b3fdaa6dacf5101530dd1dde6ffba50a361adade3ae2797ac419a9ffc04c2c7547fd2752763dff78d93f4cbbd3e8c62806cf05c753c3a164085af877538747a17aa5364056ca52d517497f03ed7c40360b3f0de2850b584d038ec2ae92d95036edad4f5962a553d7bc46b48204999f42bee1c0aa52c8e5cd26ca0221f211937bb34c65c8b4327b73dd327727df6a616843ecddd3d88a359eff9d8d41acc0abb360ef358a0019207831e8fcc12829b2688c6e4a964c8f822ca94002b63a4640750a83554657974f2f3e752de497159d185677547403dceb7fb2d8ee0353a9a8ef5f8a5f920156db8e41f6e689cd1236bf74e7d795b83d06ee86765443efad3c61ce853af9feb23fa7f4f3db9891af0072462dfefc355a69c7770ad76cdd9352dca7744d2fcb30f1253499f3cb0aee2eda480c100227b50436317a013f4589fcb918d655d3fc034ce6ed8b0111e1dc4c34f68b85aed808449e126752e814b7021550690b8a6880c0f3a2a9c3a6f9fc31bd86d58e678cf36735500e45d4b4f371c645eaedaa8e0c5d9c429913009e428304ec157e406b1c8713d421b927df89f9e1283c9d89f11a3d2ada09224ba01265facf003516cb78d6382be394737783e03b842c1aba0955d498afa921b2b4c72e24374a72e1931cf5ba5b9568882783ee0a0bb8e4e9c1dd637abf14dc096d0a20693ea0faaec00354898e11ffbe512dfa5197123058444df2c80c6448d8dcdafbfd049aecd39118c2ef167ed9b76480d22b8459a1f5d23da6529891767bc14f9e15d735b0a0dae62032ea9f12f71509558ab7131ed04d553fb62b007515723ac770ac5e685d1430b83b249252265983fea4c7078dda4b213afcef3d3df56caea4b35ae2ba6c08b5295f602f22593545ae742a1e58d52fdb1fbf3a807a8495c985943807102fd50801c09f0100e5012674922d4be3d02cc621cf70ef413cb37e12322a239c3d8b7badc8005a3807069afb747aa8b5d55dbfd4127ffbf5d5d558ded329a75942c5aa7f233e876dc7054f721eb537f6bbb88bbe4df9fb94319aac8f272f702304be6cddd69ec1951cedcc565a9ccaf9d3ad0015bfd200420f36ab65c1b6d441ba966fa37709ba6c4324e1845b0169a6fab11c7f0d10ced6515b42eb2452f8de8da048194c225b21ab73db5f21b2c64b43e3ee60cd746c965b67bbf93f272a0ee010d845f68654b6a41f61d17e35dc8e94f41b6f42c7150c73b50442c638abba952e586554daec2af5c86e8181293711d046df9cc6f1831b3ec602669cefb435fd3fbd36d34a53158204dbfb96a3ef5a729264bc4f0df176df2bcbc2729ff72f304a9e875ad081e0cd8ffea757ef1d3c918dbf9f61b03f3027f2ba0833429fd611d7b238e581e2d46ee431856c9fcb46cf5b572d3121d2020129d5bfb07d4856ebf75f502eb80ba7843e4fd082affb31d8508e334f31f519101b03e05e80b95c5818dc4dbcc75f32806d950552a610d9015f3c62b976ef5c4c03d1f59807de9f2bde32d6e29c02251ac3217977c8d0ba44b59db9cccaac419f401c3e8797f237824abde7f3db639e749610ff04f1351074c5b8f05a04bd9f3a3db1d377c769d79a6b27b31e23c63dfe6ddeada91089be99302ad5a9018a3c38643fc81241530acfd64fbff2f0c58fc1aab4607f2595d7a361448db8f18cb5c8790c3a10f34c0d46163916540429578028687e3cb396aaacd920d35ed2efe151a359dee2cf211da8a0ae6567216b02252c9382ce6e0f1e6e6c00e824891ca4f9e8615baa834385fc2debce6ee96ef819b55821147b6a7781780c9aa506e645e5922a33da9df857accabca617e18fe0c59f92b15c3ab513c566e93d8123f1708c6dc15ce12f1b02d0a8cde23195b282d4f423d95f3341f0fdca6da853f082bd956e61cf3c3572fff703391cd0efdd3f81adf18b6a55471e324c242bd392cfec4c49a64bebeb24af97be4a151b8c8ae9bf69b8a7a98763e04dc01507e0a397b7311baee5e915903f705f282af02bd689740b87b5c1d591b03559d9200c8aaf805c20ba3eaea4677fe3ed765f3d01f3743ed67dd5068496fbe6b9d60ae2a7d616968f4e27e2381559eb9cd9ae3b6bb50435aa5007eebe3e3361f430d04448b5699e87d02f11713a7826a29a3ad4a5e28e6173bd7c8d5e7584e5313772e129cd037d0d4f4a0888843737dae9e3ef1c043a42c3344a2dd8812352f3bc29276ca78859f63aa13c708fdc0de7dee0ea53fa5cb20509df51e2b4a705284909d17cba215a9b7fcca77bf12b6524daf13fb4d14ac70ca864ca6b231c6d325bd469efa836f512c64fbe14d0139fa9e6d3869b603e5e85645307901f8192bd437c359b5c9e27d051265c515ffd9b9aa46c516741efcaa96c73cfdc3f19abfc0d485a7ea268cae9e27b0cc2c437c6f712a450f93d40e684e8abdca99c0c338cf53c1f63971eda13f5320b77ce50c1b941f94614b45c47cabf5b2079bfc56b083a78813ab237963d9027f1c3a211d3a9febfc8d3ab553467ef77beb34ddeb0e7571b9d7af97af68a355924e49eabfe0c6b13e873100edbbe98ef67e7e887c6b60983a7d24e8c9a0dfae1e92b966a041eb801ac0e61435f21558add884e3c1d34a708b20107bc7787afda2127546b592b0a5fdfa66ce52a457d09d84e64270e5f50cea25fc6d7c89c5339529a8d8bc39cc7253de7a278e00383dbfad6c96d023fad2bcec93783a2a5fff551b00f3aba282bc429f33638cd9a316b1321cb79f159d0b6e92595306d8a8451693464c5c99c5cb39821647a5b2398ec6b6add0b51285dba083c94a897809589607f57fd05df1298c95cecc914588f11c05c3d831d8affcf8a19dced7f87538274770033ad94d29987ef11d48aed8a3f6ce9550929ca664c8396aecff8d13ce9a8b5032b9ab1b0846c54a40666d585e1d33ab7b05cdc9d0d5a326c57682c270234927ec48811e46a7234222b24fb123872106cff3891a2d5cccbb0f8fc580ee60cb98fb29b99d2599c740b21b1120851305aeb54f807d09e36f522f41816b83d84bd4931d107c87e44fefe642c6fcf8ecf4e27e7fbbfc03bab1952f70a9f8796e1b8c17198b72abdd08d3f850451fd440a0153209153500214f1c464245f735ec38739c20f3456a762f2acaeaf03fa12d121a52d3a77ba6cf4290cc607db0f896821848633a093cd99e5860783d8242829c5893d6349a86f0cd7412ed5e3835fc426e037b151ae79fdd43c30494ce4c24587e7ff188d92099a77da75e1ca1bc9bb754ceec0cc98229c7b1d48706c1fcb69b3e637ec642282bec7bafb5b79fa8ca776f3e019cb4978336deae003827c6bd1142bd6c586b83dc99b41b0341c6388f6e90586af08fa9affe7552e9b1cbfb74337f752ab07557db1bcfe9309dfe36747726053692a78f782fe46cae76d4bd77375f37108600d0b31ab3d3d27baf4b88eb849e5427798117b9ee1cadd438c41811da921cb83c8c9f5fe8f7e8d94089eb0f2b5a78d0c005af9a4760d4065744d63c06fd9c129d869f503371b091cd5049cd5e11aec5cdd6d3775649f74ba0fbfbba0ab2008b07af29fa19a20c2d8700c56a89229fcebecd9e60e7c8909ebe8dba4cdf355606f72966c2316cb47f3bfd4b2639b3a4ed1c2e9a9da3c4066e06456aa3c65ae89d9c5006baeedbfa30445b3f5127d2c2f95ea185c7193dd9f8a1d8c3d2103b677bdaa788df2e02da7674bd0bf6ae9d9e8886dafcf13898301791f201a22903d79c16f2d614d4ca423d15e6178c8df798def33d0868b67c1b47e135fbe39f64feba5275db14db265e615f018f9c7023f6ced81ba7f2c5b8aeb19379fa0268044c73c22b9a3ac8ba1fe4596437292d7cf2d8ab879a3de2163328edf62d24ea916ebe55a7fe676cab6bfc9af58f20ab2afefefd0753b868cec108ab9529a148df83d567fbc85be3239429c817129c1a7b72d4006a4df2d6084a56a728ccb556d9b8b3cd800c3485e69e77868cabb86968fc886c64b3906beac2b009cafbf5028254048c1baf54f307c36abe322e28eaa1848209c307a6c9bef1e0dabf6e363e906ce8b63c483a9c5fce0662f3bb863b41dc2381eb059b6070ebe84a1819d930565531dcd272e5786b68171e538ac74e4d566f971ccad45566dae042bade40c8b1c762f0286213fc8e62ab2fb94e5c91e7f1f70f1ba89cd68ecf5908a59fc68cb9c1ff6c6adcda39f82380cf1bdda81936a2436063daee9d3d2be9830b5011fbcbc45822256c381236c747d673dda2e860cce0513c8a9278cd497fd030c005f7399ea1e77354802151a10e09e10095443c619a837cc1e5eb7c19fc856a2d98f15fd63e061eed268353b68569f20d87235b378e8c00094a47ac11acc1a3141b79d303bf7e7dbeda84d7990521d6d7633edabe4acf2b5b5ee099509ab1a54cb2d9b81d38b8f1dbf3ae1b809e8911a47192bee5a7559209f0830c19d1354b8d07efc6b21c58d751e0257fbd3ec146f438aeeea0bcbf5021fdf36eb007598ca6ea6c8c65cc473d415393e610bfe786fe9d033de60135375ca87a602476044bfb12139aca88d9c7b9471bc6103a7a4997f40086ed7e91e6f84831721c624f606244d4f78a57fd45702aa1fbdadf8b2e898b8c75e731c12946fc855ce0925d0712296fc324773bcc6102044939a74045eb1a09d6b7ec48e908667c8be9b286dd905ac9ff96b75faf467c6955f0faa81da92e736858eb8f6207c4825e2402ade740cb6b8a77f224ed48229b5db0d6ecb3587dc81311eb55d13c064d19fdedb2d7438fba69831593660903de789e520493f3aafa1e02a8d09828b8c3d6e52d2ece1e7d780e6091e3c05e6bb119f809de4dd26b035464709d5d344153bf5f9e73b6b4bea598352bf9eb8c1e30419a287460ce9bf13e450e34db7ce5bb9d5fde36a77910d360fbbc38117e56229656baa2ab59950a5b022e504da201b8dc9739b285c8cee35a1b0b048ab049c9af6ca92907b9aae67519dddfedec216bf2142c2d7903b41eb427b8f81a5273ee791929504d0bf46cbb912dcaf63ebf54e136c404f70c7bed210dee362aaec98aa9af7db3842f2499477ce41753bf989ae0f5a7343d6ceb17d5e3050bfb5d1341d3f9ff4da1c2721fa0de0d157a220f99244e2ed65edde1d8331281db71f2d969d6853b204e6ec1a41fa98135b0f7785a53038879d76e7fe541693059e272d5cd97292092448dff0f878ee5b3c03834b3c93038dab94fc11db11cfdb5e114f8bd6bbd153558e367d6d09cfa380a3d488cb03f69e00549b7ee3098b2c0dd90abc6b408724c1fd6c399a589413cac5717b510ff8be9b446ffc665b7f3917c088949b4c4a1bb2d70bec4e4ab2604b3219c03e10e383fd7dd508997d076b655ba13b447227cfc35a85905dd639f5a07295aa5097c34405805e2ed22118c2bad0fd6251fcf136016136a4eb7b976946c82018a08445afa23fe55e094be509bbc4c8714d77ad471bef9bd5f2425118edf45e3b26433519ce26fdc694a1ef1fe74fdb3ad9e3ac3981e0d566271b6f0c898517415e1db8efcb5405b96e875d299122ce0c60b60663d500e857dd938cd85923452de6d1d6ad4333481fc6a37ab5c7f68b8fa1f6ca4a2e91e692170cc6e622e74177c0cc0a78f9983db0f079481e66969bf4d7c73a8798c100d20865decfa96bc4756c91290c926942ab99dca085495353d7bd22292e9752d49d58c37f497a52d6295ac4fc4c80e4830eea782ed18853890ef0458d72ed323acaba6a3bd5a6859471df005ef74fc92a8be5a59504772e7ce97bd44345dfb024fd4b2d27e54e7c7b7d10db79c6264610afa809b5ff5ea46c19accfd2f14aff4d6739ab2aae527f3a6eead92d8e7cab6dd391f1a6d6266fb44b117473032536d26f47227dbbd81794ed1f714cbc3299ddf6c94fa064f7669bdbb7b5449f15a3887fd58e01dd877639c82563ef5371c315eb5bb90fdbfd4269742415bf521fba095cb09bbd4909b1db043bbdfb0e06d7886e0831600496493f0e01a68087688c4110afe9e813e4a24bf4eb29cda096f9f37e7d35a8b9bf165ab307cc5b592e98f37e3238079ae4006bf5a4c554ec6852cfae497a73f02bb4613501fc8b1f88f15db5db5875a02e83c603e5ec5ed8bf1272f09f3a0ec34084b5dd5c6db453275588c945dad2425ba181cb901d1cc6cee4011a29aad6677107555df8f45430420bfba8eea73583a2e3aedfa2117ced9ee78ad1c6583d243eeba54378fc1e310833e2a5d5ffb9ae108f4a233ba746f816640e0c19f1c1a5bc98d4a89b9f9edbb423c2f4f348f0264554a762bfca52aa89b20832199339b31c2c3a18ff58ed5a5b8920ebdca525132baf1020bfada90e8366a98f1342c2dc651892e0d2e23c248914e2ee85fb36ec70a8757cf8f28084d435211a6646a19b6a2e2ee79fc6f6956d6005ee2a1edcb71c325a143f5d6b26131d0ea3e4d5f23ce59fcffd6f61f8a09500e741740157e1d9e4770cd186efd60e8d4c41c4eff97d018d9fa74bc4ae53073207a3aa7894286d5f15f6ac950434ee7a3c20ecb4899c0b4159f22d9d455a7888df10ac1c42ceaccc712293cca11bbeafbb8dae54ee11873eb4dd491506ff59e975d09cf5b0e47ba3738b91d5a931f994d7db644eed93f2528a65106ee2254ac9252eb676afe00ec32afa12bebc06c9fd69d16cdeef3afb5cf20d248c83350cb5db58e2b19662fe3be390fa19ae6bb2e4b8510f3df1f68fdf33f72670cc886fc9ddeded1784d9edb9e5156974d4aeaa14bba5e3aaa7955ec89f4a2ba90bd8b253b1624dd29c8285c440d447246685120200012546094c9b92fab9af8c9c8380c668dee1ebb48ea63cf5d0a4eb778b208379930255bd3a848c9e5b42b34f64835003976b72cf0903b7554563dc2cb7ffd53eed6af589c3899c42327d4308239a296701cb71f5e2533a5aa8a1820d8598466cd636f9fe2d09a59c22c7c877248386dfb35244955ee964d09347b2c25bb5174994dc5fb9cab7f83c449d7daae1ec9778395d66d9d75bbf1336be390dff3597ccea2e9c9aab3f2e51a322a55b21c6eaf667316dc78383d83ec2f967a2d8ee81d40ef01bbcfe3a2af292d2119982c64bac0120c3e26be9bbb7b280e7e47cc70a31757f1a2151da89110f961ff67f331d120ed81d83a5260c00d6fdb7c5612f3cb6138994905805eaf474dff09ff114d7fe3befd73cd41bc6f7936d7ffca40ca8a853646126db30af866c3b09244603d978ec95f95d102bf3e224373e0487e8e721137d48ca0beb059befdcaed3e2f07a7671" + }, + { + "access_list": [ + { + "address": "0xde28e38f4eb984c5d7c7feaac029a24e712cfe83", + "storageKeys": [ + "0x314af2d474b5fadaa246d3f465c39e48b9b7519b7639aaa73c62679b109ced51", + "0xa3c603ee7c0c5ecaf3b272bb3852e28391f26bd86418130ccfd118653ca80ad9", + "0x976a0e7e131ea57b4daefa958b5be2395189f5ccd8855f5c071d18f142a02324", + "0xb419d19a92bfb1eb8e87be4f85950048e1c10e738c211bca941fd08edfc25a32", + "0x2216d601ce9c6fbcf96f8cf899475c4b53b6692464361ac7608bf7bf4d4ac68c", + "0x078f9a76a3873c1c4e6f7c40c4f7316fb52e7981dcef558bb1aa0f88cb54fb66", + "0xbdd9122c8b19620878d26ac9057654c69b45cd29af5398e871fc8ef0716b6bba", + "0xc52f7cff76b415fa2568936f7602dee4f5ef90392bc3dabc2a0fa0337aeb6756", + "0x6556a530336053cbeb0b0a50ba4dc4c28106c912986864c4c2bd2aa0be33596c", + "0x82250c7cc04d7f2e1d123f37cdac0564c83fbdfdfb1edf1966f91103692aeeb8", + "0x529ca7e8e3c2a6184660060e81e7e5d2b067c458b832fa6bf52cadd4faf5f33b", + "0x0d86a9b07a3b626f66a8e1729c6a12d526afa45bb83d8b0a8c991a11156bd041", + "0x71813fecc668c9765beafda16d724d65cc595b783d9d78cb31e8e0c8e512a7e3", + "0x4a28a27c5716dc47630c7bc370523afeb2e35c6bbf40a9302b2463e0132d40c4" + ] + }, + { + "address": "0x2ad1e781cb897a9cb5d9e639fa31809d713faa66", + "storageKeys": [ + "0x764ce477c4f8f27e6cbae68c1dc2223b6baf72cd08c960e4701e030edeb9172d", + "0x4362eb9dbb3146eb374ee1a08157d3d6b42774e0f49abaab74cad74bb6bc1056", + "0xa9be63a72e4617051f7a2e663391e901959ebe1e0c1ff55c439bb901e576d4ce", + "0xf24fe965b1825abd7555b23c2e982b5ff22bb6c364220a31d69c24510ecf9fa0", + "0x7dbe6fd2574a400dde1a1e255fbb92f719457e763800b5ea4417c8b2937e1e2a", + "0xea962f71ea9ae24d61d6ccd1933c41871e8bcc2842c043c67c93bb893c53b564", + "0xc80ea315d940df8f789b0b9320961a6ce50f9d67818a8f66557b3c92e8319b3c", + "0xe442d48bff7bcbb8c7e55ee4cf6411278a4871d771dd783d972091a5646fadc1", + "0x88960a504e059ce02a3000fc85d6866adbc1c71a9f40fd5ed13c02421def9d20", + "0x48249b85536effb858304ce550e10fabebc730f360f4c2d613b079c2f3dbf963" + ] + } + ], + "encoded_bytes": "0x02d503de28e38f4eb984c5d7c7feaac029a24e712cfe830e314af2d474b5fadaa246d3f465c39e48b9b7519b7639aaa73c62679b109ced51a3c603ee7c0c5ecaf3b272bb3852e28391f26bd86418130ccfd118653ca80ad9976a0e7e131ea57b4daefa958b5be2395189f5ccd8855f5c071d18f142a02324b419d19a92bfb1eb8e87be4f85950048e1c10e738c211bca941fd08edfc25a322216d601ce9c6fbcf96f8cf899475c4b53b6692464361ac7608bf7bf4d4ac68c078f9a76a3873c1c4e6f7c40c4f7316fb52e7981dcef558bb1aa0f88cb54fb66bdd9122c8b19620878d26ac9057654c69b45cd29af5398e871fc8ef0716b6bbac52f7cff76b415fa2568936f7602dee4f5ef90392bc3dabc2a0fa0337aeb67566556a530336053cbeb0b0a50ba4dc4c28106c912986864c4c2bd2aa0be33596c82250c7cc04d7f2e1d123f37cdac0564c83fbdfdfb1edf1966f91103692aeeb8529ca7e8e3c2a6184660060e81e7e5d2b067c458b832fa6bf52cadd4faf5f33b0d86a9b07a3b626f66a8e1729c6a12d526afa45bb83d8b0a8c991a11156bd04171813fecc668c9765beafda16d724d65cc595b783d9d78cb31e8e0c8e512a7e34a28a27c5716dc47630c7bc370523afeb2e35c6bbf40a9302b2463e0132d40c4d5022ad1e781cb897a9cb5d9e639fa31809d713faa660a764ce477c4f8f27e6cbae68c1dc2223b6baf72cd08c960e4701e030edeb9172d4362eb9dbb3146eb374ee1a08157d3d6b42774e0f49abaab74cad74bb6bc1056a9be63a72e4617051f7a2e663391e901959ebe1e0c1ff55c439bb901e576d4cef24fe965b1825abd7555b23c2e982b5ff22bb6c364220a31d69c24510ecf9fa07dbe6fd2574a400dde1a1e255fbb92f719457e763800b5ea4417c8b2937e1e2aea962f71ea9ae24d61d6ccd1933c41871e8bcc2842c043c67c93bb893c53b564c80ea315d940df8f789b0b9320961a6ce50f9d67818a8f66557b3c92e8319b3ce442d48bff7bcbb8c7e55ee4cf6411278a4871d771dd783d972091a5646fadc188960a504e059ce02a3000fc85d6866adbc1c71a9f40fd5ed13c02421def9d2048249b85536effb858304ce550e10fabebc730f360f4c2d613b079c2f3dbf963" + }, + { + "access_list": [ + { + "address": "0xa306b11035b593910c3c8e5405669640dacbdf69", + "storageKeys": [ + "0xbcee31d3e9e7a9cc989ace8c56cbcf412196dc49eac584d5fc4e0298666e70e6", + "0x14dd883ee3bd9089c3038f1b8620bdcdac09d86963e7dd18bdee2d5b2043b63e", + "0x14e84edd0c242ebf70f2e2ff1ee82fa560db80ed57de10a0975a43d391c2d382", + "0xee01a363b4fb8a8cfa94bee44912c18fac9c4b9053f8fddcd7bb642951972d82", + "0x6f278e7ad3434b53c1f6cab6bc02a09952f7b6e4ac4154de61dd1be81a17519f", + "0x9c5e478617b1f02ddd4258a55cfe57af1bceeea5268e921dd521de06228641d3", + "0x940f81e03631a30ba6a3b7b7b98c3d266fa360896ec62a4693b40120fdb4ec4a", + "0x9f801c1432139504dcd182b92b7c2fd224bdfbd35cb2e94e9583f06ff278c560", + "0x760717c5aae950143d110d011771b15e205114b7317a683cfb089ca0a9a446d8", + "0xf8b1e85086d5971afe166e054f6bcfe8241fb029d4d598d29f2d6eb08b7764b1", + "0xb2c15a20e559ea5e6e8af80ef2f28eec3b2097add62252f3c845d06b1c143f80", + "0x65cc51e302cc6ab2980e85ab43962df147e979c3575c4e860cb5d5ec89540d39", + "0xf13a088ea3682300d6d1323107c0e2d9d55548d05c968539319a402f3b2d7e71", + "0xdcc4a4817718d3dcdc42a6da74ffba6bc37171658964fbfa57fc47b6e070891c", + "0x470d9a6549d5c5dc2bd273fe4108c277c682d905fc1a38e527f5dffcfc084c0a", + "0xf5dffab055ca6e3a3a6bf4c01091342f5824e7c8f68668fd12e1354bf4574512", + "0xf631e04ade2baad469518dd3b3dc271df561b45c7ab65b2f8a7920092da93ff6", + "0x724eb04da6f88ad42a9c92f0a8178adcebdb0fa3b3b5b36a33ba0388aa8f1190" + ] + }, + { + "address": "0xb4e213664fcc44742ae67317b1e1fe3cd7d0300d", + "storageKeys": [ + "0x11aec02f61ae1fe299a656275e0542ad28c9136a72db59198191cf3fdfc9b1ab", + "0x1dbf3fd13e0ce9367fa6851f774fc78c414c7be487e3ae250ebcd4fcc49df883", + "0x202ffb4c3b7abb00b02aad0dacf0a39c4af5951b3a9cba047dc5568816b8d79f", + "0x7e76ae961d49703a068939ca5a27fdcd7ccf6a956585205400d25663731f5e91", + "0xac9728aed74eb03a73c8b134c61596d30cbd2880693212479fb44ff65486731a", + "0x64a7a63771bfd51e56b8e5ebb1164315e1215a0931de7f0e74926bdac334b2a9", + "0x62afcbcbcc4b47b6da5d0a8a84d2e9b5b3884bf648fe38babeac20325745dae2", + "0x872fee835e0048b29ea93a6798180a06614fa046f0248a87a55e4922da5ca46b", + "0xa6d26b43ef3ea4892c12497e31b6a1d2b975474ccb98e451ab984d40f27f7220", + "0xdc240997e9aa6022d71addb407bbf78587531c663d26afb7daa0170e584fadcb", + "0xd0291fdf3c0086d359d9ead6c33647df4d1d8cb50f680d7a37cdc3c7f6ce98d2", + "0x4109a1a38947823987b1893ff73405fbf83f6fb0b11cced4d5e517c1222874d0", + "0x3fea4a5d99cb50e226515a66cb6c05331f540675f4e32fb70e78b7e9e86fa816" + ] + }, + { + "address": "0x8e600ee4f48dfb3fc796182b14a24b6b6ca28683", + "storageKeys": [ + "0x4234e4922c3021720b600914c8d4517832d4eebefcdab828b3cb3e03f6897643", + "0xee4c6a90a7ffd81540b0fdcda2d8553eb625fc2d96416277ce995cde50f48036", + "0xb33a1fb3ef1ef3f95a92f1e4140c00e4a6aab238918c0ef27a90e1760612c601", + "0x3ddfa62be67d9b689795fa9762d7bdf65deaca4a79e83e56e23d15750f2b9a25", + "0x6afc2ab451688df7806ffc1fad3fcc24436b7600ba1a56a284afbe1bfc6f4716", + "0xbe9adb8cb48f167880519f77f668f2f069068cc44d6f017e29f20ddf65f988cc", + "0x44480eec27b7e566bedad77ce4f70b4568aee70939fdccc7ea98c6740f5ef9ed", + "0xe03687240aad5e9700d48cbaa411983035bf9e683505942991d8963abb78bc8c", + "0x69948ea57a2bb58734e49ebe273fcc6573bc79c2808ac592babecd64770e942e", + "0x7b9f714ddc423b16cdbe2073ff2f2e3af806a6190c2db507bb623cccf17e09d6", + "0xfe0ea1d739fbcfbcb33daa62a15d0a7306cedca3b357d9bbbaa0dcbb7cd5112c", + "0xc57f817ca1c285d686f1b60523f1e2063364fc6c040ee7501e37bc40d40e3a67", + "0xe7e597ae487097620e35cf9fbe96e5eff45f5d7439c86c768580b1dad0e1702d", + "0x4422b926eebb920ec58cacfebbce61cb8baf7578f868219757c611d464577af5", + "0x035c16294424929cb3424914292697584786e4ac05f247c8de7ea7f0f648b5b2", + "0xee29d73fd02feac90643de600dde07e1f08ee49d56d93e371f861b1a709da6a1", + "0x5550ecf4d6479f13e13286ffb64598c019250977687bc810c334a2dd6b2e130d" + ] + } + ], + "encoded_bytes": "0x03d504a306b11035b593910c3c8e5405669640dacbdf6912bcee31d3e9e7a9cc989ace8c56cbcf412196dc49eac584d5fc4e0298666e70e614dd883ee3bd9089c3038f1b8620bdcdac09d86963e7dd18bdee2d5b2043b63e14e84edd0c242ebf70f2e2ff1ee82fa560db80ed57de10a0975a43d391c2d382ee01a363b4fb8a8cfa94bee44912c18fac9c4b9053f8fddcd7bb642951972d826f278e7ad3434b53c1f6cab6bc02a09952f7b6e4ac4154de61dd1be81a17519f9c5e478617b1f02ddd4258a55cfe57af1bceeea5268e921dd521de06228641d3940f81e03631a30ba6a3b7b7b98c3d266fa360896ec62a4693b40120fdb4ec4a9f801c1432139504dcd182b92b7c2fd224bdfbd35cb2e94e9583f06ff278c560760717c5aae950143d110d011771b15e205114b7317a683cfb089ca0a9a446d8f8b1e85086d5971afe166e054f6bcfe8241fb029d4d598d29f2d6eb08b7764b1b2c15a20e559ea5e6e8af80ef2f28eec3b2097add62252f3c845d06b1c143f8065cc51e302cc6ab2980e85ab43962df147e979c3575c4e860cb5d5ec89540d39f13a088ea3682300d6d1323107c0e2d9d55548d05c968539319a402f3b2d7e71dcc4a4817718d3dcdc42a6da74ffba6bc37171658964fbfa57fc47b6e070891c470d9a6549d5c5dc2bd273fe4108c277c682d905fc1a38e527f5dffcfc084c0af5dffab055ca6e3a3a6bf4c01091342f5824e7c8f68668fd12e1354bf4574512f631e04ade2baad469518dd3b3dc271df561b45c7ab65b2f8a7920092da93ff6724eb04da6f88ad42a9c92f0a8178adcebdb0fa3b3b5b36a33ba0388aa8f1190b503b4e213664fcc44742ae67317b1e1fe3cd7d0300d0d11aec02f61ae1fe299a656275e0542ad28c9136a72db59198191cf3fdfc9b1ab1dbf3fd13e0ce9367fa6851f774fc78c414c7be487e3ae250ebcd4fcc49df883202ffb4c3b7abb00b02aad0dacf0a39c4af5951b3a9cba047dc5568816b8d79f7e76ae961d49703a068939ca5a27fdcd7ccf6a956585205400d25663731f5e91ac9728aed74eb03a73c8b134c61596d30cbd2880693212479fb44ff65486731a64a7a63771bfd51e56b8e5ebb1164315e1215a0931de7f0e74926bdac334b2a962afcbcbcc4b47b6da5d0a8a84d2e9b5b3884bf648fe38babeac20325745dae2872fee835e0048b29ea93a6798180a06614fa046f0248a87a55e4922da5ca46ba6d26b43ef3ea4892c12497e31b6a1d2b975474ccb98e451ab984d40f27f7220dc240997e9aa6022d71addb407bbf78587531c663d26afb7daa0170e584fadcbd0291fdf3c0086d359d9ead6c33647df4d1d8cb50f680d7a37cdc3c7f6ce98d24109a1a38947823987b1893ff73405fbf83f6fb0b11cced4d5e517c1222874d03fea4a5d99cb50e226515a66cb6c05331f540675f4e32fb70e78b7e9e86fa816b5048e600ee4f48dfb3fc796182b14a24b6b6ca28683114234e4922c3021720b600914c8d4517832d4eebefcdab828b3cb3e03f6897643ee4c6a90a7ffd81540b0fdcda2d8553eb625fc2d96416277ce995cde50f48036b33a1fb3ef1ef3f95a92f1e4140c00e4a6aab238918c0ef27a90e1760612c6013ddfa62be67d9b689795fa9762d7bdf65deaca4a79e83e56e23d15750f2b9a256afc2ab451688df7806ffc1fad3fcc24436b7600ba1a56a284afbe1bfc6f4716be9adb8cb48f167880519f77f668f2f069068cc44d6f017e29f20ddf65f988cc44480eec27b7e566bedad77ce4f70b4568aee70939fdccc7ea98c6740f5ef9ede03687240aad5e9700d48cbaa411983035bf9e683505942991d8963abb78bc8c69948ea57a2bb58734e49ebe273fcc6573bc79c2808ac592babecd64770e942e7b9f714ddc423b16cdbe2073ff2f2e3af806a6190c2db507bb623cccf17e09d6fe0ea1d739fbcfbcb33daa62a15d0a7306cedca3b357d9bbbaa0dcbb7cd5112cc57f817ca1c285d686f1b60523f1e2063364fc6c040ee7501e37bc40d40e3a67e7e597ae487097620e35cf9fbe96e5eff45f5d7439c86c768580b1dad0e1702d4422b926eebb920ec58cacfebbce61cb8baf7578f868219757c611d464577af5035c16294424929cb3424914292697584786e4ac05f247c8de7ea7f0f648b5b2ee29d73fd02feac90643de600dde07e1f08ee49d56d93e371f861b1a709da6a15550ecf4d6479f13e13286ffb64598c019250977687bc810c334a2dd6b2e130d" + }, + { + "access_list": [ + { + "address": "0x4f71c3b9e1c17c8fc7d4821d4d6b6e2101c53f93", + "storageKeys": [ + "0xa56774f6c2e00d8f0a5b866017b2e195e11bf43d0f32a11ba272f0600a5c1ecf", + "0x803558be88930ae84624cbdd30397a199be63d7a29755b15985dcb03102c83dc", + "0xfd6857026bfa851e401049a2c181febd2defa1993df2ac4caf59d93019f5a76e", + "0xcd9a1481f2ff58b08c2f32969e0b7ac6f71e97abcd951a1de582b99e08dc451f", + "0xe933959444542a8f71b1f7baf7ede152eb4258922ab090131f307753bd5bb4a4", + "0x273395910a5b7fc31ec1b779545ed3a6e44d21f614da87751385f36858bc80f6", + "0xd0bb2fb880cf5061bda457d212e87fb485cdcfaa4ecde9075a0546a186f00e57", + "0x67726346ddab905c0fd7ed204ed4754d0f4e725a553b9029eccb51362d969736", + "0x84e20702114419fdc2db417f7dd21f435a489416164bccb813644f3b8f09f850", + "0x8f91076a3fd5841dbf8d1287e09057b94b1c8816752181d9588be84030b9d1ed", + "0x81dc85cbb693276910cd3c5ff2495b5f8a8ca1613720588d7d9f28ec03abc3c6", + "0xa6bc0bf22dcbd3307be6022172194066fe58df60df29cc52b6535ac6860d50b6" + ] + }, + { + "address": "0x1f11d3b12e4a4bf53ec6d327bf642a63c2a5eabe", + "storageKeys": [ + "0x4d445e2503908848bdc743ae3954f2502b49aaec6470d79973385cd55d6b8a98", + "0x7ba3a97f6beda31a27aa0d1814b4b69f491941753f5f7c9ecf28de622eabc56b", + "0x48f7d285742238a80bf9009b126538ab47aa788bd9b6ad4cfbb130d7e54d7514", + "0x720a9a08134f4ed76da43d5e0ad407a7467f635d490e38e1208f87b247186245", + "0xaf70d3f32d6e278e80880eafde3937f7d965decafa178e8d4639fcddd2f5027b" + ] + } + ], + "encoded_bytes": "0x0295034f71c3b9e1c17c8fc7d4821d4d6b6e2101c53f930ca56774f6c2e00d8f0a5b866017b2e195e11bf43d0f32a11ba272f0600a5c1ecf803558be88930ae84624cbdd30397a199be63d7a29755b15985dcb03102c83dcfd6857026bfa851e401049a2c181febd2defa1993df2ac4caf59d93019f5a76ecd9a1481f2ff58b08c2f32969e0b7ac6f71e97abcd951a1de582b99e08dc451fe933959444542a8f71b1f7baf7ede152eb4258922ab090131f307753bd5bb4a4273395910a5b7fc31ec1b779545ed3a6e44d21f614da87751385f36858bc80f6d0bb2fb880cf5061bda457d212e87fb485cdcfaa4ecde9075a0546a186f00e5767726346ddab905c0fd7ed204ed4754d0f4e725a553b9029eccb51362d96973684e20702114419fdc2db417f7dd21f435a489416164bccb813644f3b8f09f8508f91076a3fd5841dbf8d1287e09057b94b1c8816752181d9588be84030b9d1ed81dc85cbb693276910cd3c5ff2495b5f8a8ca1613720588d7d9f28ec03abc3c6a6bc0bf22dcbd3307be6022172194066fe58df60df29cc52b6535ac6860d50b6b5011f11d3b12e4a4bf53ec6d327bf642a63c2a5eabe054d445e2503908848bdc743ae3954f2502b49aaec6470d79973385cd55d6b8a987ba3a97f6beda31a27aa0d1814b4b69f491941753f5f7c9ecf28de622eabc56b48f7d285742238a80bf9009b126538ab47aa788bd9b6ad4cfbb130d7e54d7514720a9a08134f4ed76da43d5e0ad407a7467f635d490e38e1208f87b247186245af70d3f32d6e278e80880eafde3937f7d965decafa178e8d4639fcddd2f5027b" + }, + { + "access_list": [ + { + "address": "0xab32c4c93c6afd0583159fe6b3a66a42e8ea1ec0", + "storageKeys": [ + "0xdbc7783e36548259a0beab2aefb73f2f0cba05c123793b5a46e9428cf19d27a6", + "0x23916305cf9226c5fd1cfb50e68dae113d05bc65edc201d51be69d8031a34728", + "0x75350e55b169f00cddd874192f6d2180e330bb755f10a2e67c5e125ef844c1f3", + "0xd0fd9da52fa32fb7f589c3ee716615a06f3bbc5200ba4b43893080ceb55af864", + "0x655bb869e501eb984c0cece5881c94e23c9c7a08442ab858f2892678c2e092fe", + "0x34398ade276078e1ba23d556a77763ce4b05d5366c56daae0ec77f3919d55ebf", + "0x7b057fc436351970ccce3e5a814742149bd8d5a19c6386cbb5a9f06be9cde0a3", + "0xd2fcda2e7e6c312ee34fe3c81dc0756f652b7aa47b06f3bac8477affc764ead7", + "0x3913c94ed933e15696c182ba3482370fd141d5059051872be03638b1fbc38982" + ] + }, + { + "address": "0x7a24dbb264c8fc9b7a86b8c02365ab13e7522499", + "storageKeys": [ + "0x95e47b82f0c6c13df59458c324f9b7f5eb3f20529d3bf6316f393a1d728a4609", + "0x9f361d50580c9bbfa15cbe3c45e89f128a0635b6fc401ccced12fe86fbf9fa27", + "0x879e44e0a3bbf41d907a6254468082113e19a2a590d33258c6327deb5ed8a93f", + "0x747ad91ae76470eddc68ca016ceae2ea24deb0cb57b6e9521b75b66487949346", + "0xe9299cd7a2ad6d77c4417016f582c7a284b9d14d6f716305592b600e6a7ab500", + "0x0b5ae6d3821707491f0b5e46b81884fb6240cd9c41c660ee2d8e7cbcc33100ff", + "0x466aa37f685ae53bae64836ed995b4287945433a7da25751fefee86982a8456e", + "0xdcf7254a6c352e8c94a2647231cd5b2c42e71861a7b9992285f6d2e02ffdcae1", + "0xa6c2b42cec453ea728f79ee0deab98c0bac568fdd2902c9dc4d5e91a93f0a5d8" + ] + }, + { + "address": "0xb4fe1f4253a1e47e185d59bb5e8b55535fc2fd0a", + "storageKeys": [ + "0xd41e8a51448df25cb3097a4c57c26208e6f845ae9359a3edab3938ad9c351bfb", + "0xb909db1438bcd838a6901940250183689afc63c5ca5fdf1c79df8d300b069950", + "0x91926367cfd0ad349e5b6592a23b4bdbdc5755d2f1142e76190328c6164fb8de", + "0x470663e0a1d5b86ee2f61d6c446738091ea8f675f63022484d58c25b01b138b2", + "0xf0bc94a3aa4fc62e819f144c95448f1725b3f2438b490960679ede571f3cd4b5", + "0x4413f922757feaa3cee0b7dca414505a721b1c597497ab1da287a77afce10e87", + "0xa888728870f1b160743255069308816e21c805b7ac3f3ded642d027370403cd2", + "0x82be4c478a2d97e7a45e2328649d041e1095ac9e4ce283672dbc49435457b38e", + "0x3ec19bd4a84840de64e2d0b3b774f077e712f5ca6264f09dd433b4a4102a3be4", + "0x19852ced3356d614a230e9e2b479638bb49ab6612e158cb0fe631058f341e7e7" + ] + }, + { + "address": "0xdbd3b1f6e13493574896f4b273ecc58ccc26cc15", + "storageKeys": [ + "0x0217e96e640ad86002b8297c9cf8bcdfdde8125cdad9ac3349e2ca17ee0fe80c", + "0xb641fc891e878eeaf30b4072aed506647922839ed2ba109aad8bc637817501f1", + "0x6b28c71f73a863cba110e003d4bec128a4db513bd5034b10b860c26c3b5a6e18", + "0xd345493626136ba97a3dbb8317d686503cf37926b69ed0981e22c3a582d5bdb6", + "0x119fa173f9def20545b5475205ebd4ee195131bd11b460a2752dacfc2531f2a7", + "0x634905e403413e3aaa3646d084d1368de69f6b9ebbd92e7e22b753eb003f74d1", + "0x99f9811ddaa40a485994d7308dc1e6a9c03b2a3ed6a54652bebc102c65e285a5", + "0x5b230a5e215e9bb9787d238070f0a8449896f4b79a0c6e0f6a52549848138025", + "0xdd74dc51c34cbccc5699ca394e8a96b084dc51d6d5bc12693b6223de956784af", + "0x4d9ea52924cf0adc90699096d5eb7aa3e1c0bfcf976e0c5241f6845ef6c21850", + "0xf354832bfdefa15a92cff2fc101fad3d8d0d72eccea803d460f65674d8733a3f", + "0x3886e041280f186ddc37e6e26bcefc84d95a03d0a536413883ec19d70285ed61", + "0xde98b35f126ebaf1da04bf8471ab0924984c74eee9cbe7c03e01f5a86c7d7df1", + "0xf47048be3aabdf207db9bf96edbdf517f05ffab7e430b390cf5ff9ce843ba1cb", + "0xd92c9e7370b3dd1bbbcf104b3be0a8334c817f5abbded698ebcb354a4973968b", + "0xfcbee8aedb25e322e206b66d934beabe38a1c244958f3445c48a80d02e81bda7", + "0x497ca87891ca8b72457b5283e772609d1722098ee768abfa430befb24b1fae54", + "0xc60172415550655ce8270ba6530b3044af63b372fc40b5b5489728481d24fecb", + "0x69572af6ed9eff333d0159ac751b945b15f6ff71b665c77fc674212bcfa7bcdf" + ] + }, + { + "address": "0x5f0f19cdcb3a28d788e6ecab9e56653f6d64a57b", + "storageKeys": [ + "0x19ff8bb91f5c41274808fdaff554dd378f6492da10611a6eb9ad359c60ddd42a", + "0x3d8becf4a4e7401b5e946826e454e9aa827262a3c7e3bf131a51ce471fe38121", + "0x700a69ca9730100a200c27994f7ef632d455c72945739e8d93c2e85bd5f7ac12", + "0xfb879ce7a44507910c25fb41c1d28043ea408a689dd20dbd0efa6db693cc0cb4", + "0x0e9a1f47c1e69f7732998b1e5d87fca43bbcfcf84494b2f1779083f1486047fe", + "0x0e594cf465c1434329e5b74ae0d324fc38300e20e300f4af4be38b025a01abb2", + "0x49a388237e60f11571c922c212435a846112c0ad672f183233e10499e1686ec1" + ] + }, + { + "address": "0x1fe5e0114f1d40bda42b48c7ed1d25ef474e5b4c", + "storageKeys": [ + "0xcbac155eaaca62071383760715fa6643fdacf354d955c20e32497cb9cbcc3db2", + "0x4c8d03f7d6466fd9d4a9f60a7484bcda75d538367d2146d744d3ec93fcf1e522", + "0x99493a817e26abf8e4b3d86aeec7baf77eceb5931da1ed4c8479679c6df55d62", + "0x673e3583b4877182a4ee4a9a2c75825eec7b2371b36071445c7976502248bed4", + "0xa01d4fae0e40167e77d92c7e435a24aeeda318c7d0a8e7ca1addff9466686053", + "0xc80a51f7a3f74be38c106273f628ad04f286b772bf189751874135f628a37a5d", + "0x75cc5a75716fe447c7afe4f4bd236830f05db515837b3cc14b41cb441e3c7eb4", + "0x988db42be1a18979322142b3ff8d041615676c4a9d93926913f98c76ac0a940e", + "0x9c64046b4f8e114132b614c14389ba3c55c0ecdf5140888fcc678f00b8e75a2c", + "0xaf107e879e0521ee9108ca2a66781d50ddcbe5f83619e23da201cc2a35018e28", + "0x7b8b891ea5e0cbf4719eb5f9df61b5d403b556e1bb87643cf3407f0dc2e2b470", + "0xdf71d7f7e246bf28817f93470f64b8b20924aa7dc1acfd83eeceb42a2d6f6d63", + "0x8863398a432490be9fa1f64d88f4aca22e9c95aab64813b14902e74ee0bbef7e", + "0x9299af7050f4a897e8ef57867cebee216bb4a27372c1d67744e498f7773f07c3" + ] + }, + { + "address": "0xb2df0017e78b96d03d3a8563a6fee7cffd019879", + "storageKeys": [ + "0x393a7c53d33521bc3b51ecaa5a5a43aad6976642c9a04962791baea95c35b744", + "0x0e023615d7ca8df95caad8a188fc8b4a629b0f91492b61f30d33aaf666491cdb", + "0x31ce1c27da465710c02c830086fa1e85049415f245e1f2241b4c1abdc99cd217", + "0x4cf5c2a7dc76e67d4aa5cf2ac99ec1cb949f8cdd7aef8fca6064881cfddad689", + "0x63c90a744f6fe5b661842029fd735c1ed94cf29acf1ad23e891e7283c17eecbd", + "0x883e002b623e1a6a9edabc847ee14f14313e6cb45fd6d38e08f74670abec8c00", + "0x381c2e7ba34f860e040ece0d5256301d703eaf34dbd66318bb85f6639f052e54", + "0xe596832076e4cb5ae4af3b587f12dcfbe9e9851c147a3541ce6f37d6db0f0859", + "0x5feeff074cbf1f59b950fc77b64954981c68a497c97c7cb2304ace920ce15734", + "0xe7a3d679bed48a5179ed3f99e28e4a27da00e2b65df9b024eb00049a590c2d4f", + "0x8a08a18ce37dd333a168693732e5011f602017b82d6d58836b43f81c0b2889a4" + ] + }, + { + "address": "0x457031d941c6a83a62e00474edddfd051592b1ff", + "storageKeys": [ + "0x68a994332d5c4fec0160cd93f1c409fb7405364961fa72b24e3bf66c75b44b15", + "0x234bc1de11831e76ae84ce32bf0bc6311b7c6d1809d2a54ff861679cce2ce26d", + "0xd7c5b6c19924f0b1336b640505ee889f55a44599fa15deba06d38ae93239729e", + "0x8e91c3398b9c4b64e1cb51f345bb04ed22d379c414b8d5eac82ce805b9558c3d", + "0xa000f324b25b472b878624290dfdefa2d7d04bb026eb1d3d0332e62e15e5bc59", + "0x858053a590468abaedf964438fa02e06bce1c428c884bac0d9c55764ade883be", + "0x1716ef1c8302f958e0a621c46eea86335511aa2c2bddc449fd0e70e1cc6a8c1b", + "0x3b3785a3716b191d3f22a1bdf50784be275f1445aade374dc72dd6410697e200", + "0x93d95ba5f34c00e3eb44d7ccb202ed1ac8acc5c8c7fb45bf8d0b537fee663b37", + "0xecc888254a673150801bf766eb94a6a72cb7b4b55d6c4bbe9da1e9a7f22c0218", + "0xac03991ab0aff24f5d44baa8baf642a2dd197bedb6bc6abaf844905a1a5bc218", + "0xda8835d809d4eb4ae820a868c866992e60b39824a008d7d2d3e485551e67d610", + "0x2e8b9f973f534a016dc06ea23d6b9fef21303747b939436c457edeaaee8a1b05", + "0x901bcef6dce2e559a3915148d7929d444cc0afb8e749896a4b82bda19856f2a6", + "0xa3c0a0c3f7124629b36f9825b4ecbc62d53113c55cd141a8036d09382204ee90" + ] + }, + { + "address": "0x7c8349a37bb066e4ab086d7bd59fd0c349a47a42", + "storageKeys": [ + "0x5633d31bf79259487d0acf17b68fb2e7eb5742d68f4f56a4d68d8d2181c08afd", + "0x520e92c72e9840d50a60ffb161cd8020c5878af7bef03d94ba6771bfd9455b3a", + "0xddeff464ffea2a188e1f7eeeffbe4f0b0309fe3abdd94f824689e01ad5ecd1e6", + "0x41f932f0a5e9c3660627a3b465c08d3bdc37a434ea8293fb4f5cdedcf4f98142", + "0xffe923063982a249a37c41225f0660f91c06f79f77a29265a855526abf1d23c0", + "0xade3407dad7c46c83daa99d8dc9416a7fdc1a3a639257dc63ba263c0c109041d", + "0x488dddf0de1b4b37ef8bae6083ac3a52045ef3f56624dd037d16c05adc224223", + "0x8571945f6ee734f4ee2751c883fc3a62813e3e4656255a63a781aa40428f04dc", + "0x022bca78b410ad466fb2a86738d96a16e92a74fda21b78fce14bde3acbb71f16", + "0xed702597e7faa6dee5076b9ffbd2fe50d50b1af27f28eb8bd78221697b805d70", + "0x2bae5e99ac7468dd5cb622da146030023d49182f92341cea14bc5541ead233ba", + "0x98671d6e6ae7013550cb40d1e18dd1324641cdb1879551cd8c5d596126b6bad3", + "0x4e18073f66acc2cf9ad9ce61ac234f43201e0fc3677357445712a2c007a6d1a6", + "0x88f98d9cd15b1351697c4e78695da5a02113fe6463b3a3c00470a4f7d708ca48", + "0xc6e8cccb42f8aee0d37fbc56d2e06eb4236dfd5b3e52b89cbfcb06d15e31fe08" + ] + }, + { + "address": "0xf33480fc7ec63943a2026529b9897910199c32fc", + "storageKeys": [ + "0x700d7e50a7ecdba987e968e7e71ef23b6f462b807b505fd6d9361fbdc461e6dc", + "0x090cfa65e89d55fcf0bff2b85ef39fd870268c02d104fdc07a81061210e43a6f", + "0x8b1b087534d08aa595b17df538503cb59d40d5ae27ceca1609a2c11c584c1fa8", + "0xb47a01da0a804eccd1ee91ce2b62cc37e9dd3a36e476dce0e37cf16eab5afdd4", + "0x665bcf4467d177f5de80cbad2f5dc27bde0bbeec6fa685d6520fa176c3ae807b", + "0xbefa625dbae52b10ca48c542015cf4db34299c585014ee79b3f4990f15c08a04", + "0x4d311b5031ee278d905b68aec2e5eff282c097de89cd06500adbd296e6a7e908", + "0xa11bb60c6a5cf8eeaa0f362d8fa5a7d2b17eb7599f94ba68035cd0737d1eaa12", + "0x717dfaeec0584aa1587adccff03c49832049de0fe7575480567834c876e38013" + ] + }, + { + "address": "0x98541a554fbeaee16bd22100bd17a43a37d968bb", + "storageKeys": [ + "0x12ac434b6753f35b3c1ad85995b06c2553b20ff510d02005b54b5143784f4d96", + "0xf6705319d5d0999d177eba16317622d54c1fef4646392a871ea2d9b6346f2495", + "0x20a466e7293d7956f9552cc07301d7bec96f0c41e0ef7ee75aab80727e5597ba", + "0xb3cf710b9858161721efb1ec78034bcf5856160e2a6ea030945c1cf56c90adcc", + "0xfc7e83f121b0c71c71cf061fbdb1ba6d9b6f3c209e64fa67849987f9d5c37ee5", + "0x4f16f9657e5d3fc2693731a20d8c66dbbe4df6c5925fcd71c2864cd7b5bd0eb1", + "0xdd6e2a05badac2809f264f8996bc6f88e71f146823f4117bef93f81c7b27cae7", + "0xab750baba8b7591e151012e0c986b0f683fb7e022e36281314e7e85ccab65785", + "0x45040aeb242427bd2ce7f22b7f95d7cca765efc1457f1cfdbb9699860d992869", + "0x7585f43093ce23ff1c780ea228be270be5c46c3312334cc4aae3b8f4840a6136", + "0xea36a590c4bdc94ea383c098e8553c6868965324c1d6e704cb79256c6295ab11", + "0xab27ab768fd21c3e47be8ae2401723b476d82ee4e34d89813b6f1cc808fb8f61", + "0xa0778405e00a69773305cc30689cbe9b11dff8d2888a6763e38d417dae57d0e0", + "0xb4fc82c47936e1ba948ef0cee6626854f4798391debe1415c0997d12fcf481e1", + "0xbdc944c29788853485206a1c130f54b4be0e80709d79d5907ad0b97adc73fe06", + "0x46159b55138c62e28c3c8acd0a89185e605e5a62bd4ce4d0e02a8fe631b98dbc" + ] + } + ], + "encoded_bytes": "0x0bb502ab32c4c93c6afd0583159fe6b3a66a42e8ea1ec009dbc7783e36548259a0beab2aefb73f2f0cba05c123793b5a46e9428cf19d27a623916305cf9226c5fd1cfb50e68dae113d05bc65edc201d51be69d8031a3472875350e55b169f00cddd874192f6d2180e330bb755f10a2e67c5e125ef844c1f3d0fd9da52fa32fb7f589c3ee716615a06f3bbc5200ba4b43893080ceb55af864655bb869e501eb984c0cece5881c94e23c9c7a08442ab858f2892678c2e092fe34398ade276078e1ba23d556a77763ce4b05d5366c56daae0ec77f3919d55ebf7b057fc436351970ccce3e5a814742149bd8d5a19c6386cbb5a9f06be9cde0a3d2fcda2e7e6c312ee34fe3c81dc0756f652b7aa47b06f3bac8477affc764ead73913c94ed933e15696c182ba3482370fd141d5059051872be03638b1fbc38982b5027a24dbb264c8fc9b7a86b8c02365ab13e75224990995e47b82f0c6c13df59458c324f9b7f5eb3f20529d3bf6316f393a1d728a46099f361d50580c9bbfa15cbe3c45e89f128a0635b6fc401ccced12fe86fbf9fa27879e44e0a3bbf41d907a6254468082113e19a2a590d33258c6327deb5ed8a93f747ad91ae76470eddc68ca016ceae2ea24deb0cb57b6e9521b75b66487949346e9299cd7a2ad6d77c4417016f582c7a284b9d14d6f716305592b600e6a7ab5000b5ae6d3821707491f0b5e46b81884fb6240cd9c41c660ee2d8e7cbcc33100ff466aa37f685ae53bae64836ed995b4287945433a7da25751fefee86982a8456edcf7254a6c352e8c94a2647231cd5b2c42e71861a7b9992285f6d2e02ffdcae1a6c2b42cec453ea728f79ee0deab98c0bac568fdd2902c9dc4d5e91a93f0a5d8d502b4fe1f4253a1e47e185d59bb5e8b55535fc2fd0a0ad41e8a51448df25cb3097a4c57c26208e6f845ae9359a3edab3938ad9c351bfbb909db1438bcd838a6901940250183689afc63c5ca5fdf1c79df8d300b06995091926367cfd0ad349e5b6592a23b4bdbdc5755d2f1142e76190328c6164fb8de470663e0a1d5b86ee2f61d6c446738091ea8f675f63022484d58c25b01b138b2f0bc94a3aa4fc62e819f144c95448f1725b3f2438b490960679ede571f3cd4b54413f922757feaa3cee0b7dca414505a721b1c597497ab1da287a77afce10e87a888728870f1b160743255069308816e21c805b7ac3f3ded642d027370403cd282be4c478a2d97e7a45e2328649d041e1095ac9e4ce283672dbc49435457b38e3ec19bd4a84840de64e2d0b3b774f077e712f5ca6264f09dd433b4a4102a3be419852ced3356d614a230e9e2b479638bb49ab6612e158cb0fe631058f341e7e7f504dbd3b1f6e13493574896f4b273ecc58ccc26cc15130217e96e640ad86002b8297c9cf8bcdfdde8125cdad9ac3349e2ca17ee0fe80cb641fc891e878eeaf30b4072aed506647922839ed2ba109aad8bc637817501f16b28c71f73a863cba110e003d4bec128a4db513bd5034b10b860c26c3b5a6e18d345493626136ba97a3dbb8317d686503cf37926b69ed0981e22c3a582d5bdb6119fa173f9def20545b5475205ebd4ee195131bd11b460a2752dacfc2531f2a7634905e403413e3aaa3646d084d1368de69f6b9ebbd92e7e22b753eb003f74d199f9811ddaa40a485994d7308dc1e6a9c03b2a3ed6a54652bebc102c65e285a55b230a5e215e9bb9787d238070f0a8449896f4b79a0c6e0f6a52549848138025dd74dc51c34cbccc5699ca394e8a96b084dc51d6d5bc12693b6223de956784af4d9ea52924cf0adc90699096d5eb7aa3e1c0bfcf976e0c5241f6845ef6c21850f354832bfdefa15a92cff2fc101fad3d8d0d72eccea803d460f65674d8733a3f3886e041280f186ddc37e6e26bcefc84d95a03d0a536413883ec19d70285ed61de98b35f126ebaf1da04bf8471ab0924984c74eee9cbe7c03e01f5a86c7d7df1f47048be3aabdf207db9bf96edbdf517f05ffab7e430b390cf5ff9ce843ba1cbd92c9e7370b3dd1bbbcf104b3be0a8334c817f5abbded698ebcb354a4973968bfcbee8aedb25e322e206b66d934beabe38a1c244958f3445c48a80d02e81bda7497ca87891ca8b72457b5283e772609d1722098ee768abfa430befb24b1fae54c60172415550655ce8270ba6530b3044af63b372fc40b5b5489728481d24fecb69572af6ed9eff333d0159ac751b945b15f6ff71b665c77fc674212bcfa7bcdff5015f0f19cdcb3a28d788e6ecab9e56653f6d64a57b0719ff8bb91f5c41274808fdaff554dd378f6492da10611a6eb9ad359c60ddd42a3d8becf4a4e7401b5e946826e454e9aa827262a3c7e3bf131a51ce471fe38121700a69ca9730100a200c27994f7ef632d455c72945739e8d93c2e85bd5f7ac12fb879ce7a44507910c25fb41c1d28043ea408a689dd20dbd0efa6db693cc0cb40e9a1f47c1e69f7732998b1e5d87fca43bbcfcf84494b2f1779083f1486047fe0e594cf465c1434329e5b74ae0d324fc38300e20e300f4af4be38b025a01abb249a388237e60f11571c922c212435a846112c0ad672f183233e10499e1686ec1d5031fe5e0114f1d40bda42b48c7ed1d25ef474e5b4c0ecbac155eaaca62071383760715fa6643fdacf354d955c20e32497cb9cbcc3db24c8d03f7d6466fd9d4a9f60a7484bcda75d538367d2146d744d3ec93fcf1e52299493a817e26abf8e4b3d86aeec7baf77eceb5931da1ed4c8479679c6df55d62673e3583b4877182a4ee4a9a2c75825eec7b2371b36071445c7976502248bed4a01d4fae0e40167e77d92c7e435a24aeeda318c7d0a8e7ca1addff9466686053c80a51f7a3f74be38c106273f628ad04f286b772bf189751874135f628a37a5d75cc5a75716fe447c7afe4f4bd236830f05db515837b3cc14b41cb441e3c7eb4988db42be1a18979322142b3ff8d041615676c4a9d93926913f98c76ac0a940e9c64046b4f8e114132b614c14389ba3c55c0ecdf5140888fcc678f00b8e75a2caf107e879e0521ee9108ca2a66781d50ddcbe5f83619e23da201cc2a35018e287b8b891ea5e0cbf4719eb5f9df61b5d403b556e1bb87643cf3407f0dc2e2b470df71d7f7e246bf28817f93470f64b8b20924aa7dc1acfd83eeceb42a2d6f6d638863398a432490be9fa1f64d88f4aca22e9c95aab64813b14902e74ee0bbef7e9299af7050f4a897e8ef57867cebee216bb4a27372c1d67744e498f7773f07c3f502b2df0017e78b96d03d3a8563a6fee7cffd0198790b393a7c53d33521bc3b51ecaa5a5a43aad6976642c9a04962791baea95c35b7440e023615d7ca8df95caad8a188fc8b4a629b0f91492b61f30d33aaf666491cdb31ce1c27da465710c02c830086fa1e85049415f245e1f2241b4c1abdc99cd2174cf5c2a7dc76e67d4aa5cf2ac99ec1cb949f8cdd7aef8fca6064881cfddad68963c90a744f6fe5b661842029fd735c1ed94cf29acf1ad23e891e7283c17eecbd883e002b623e1a6a9edabc847ee14f14313e6cb45fd6d38e08f74670abec8c00381c2e7ba34f860e040ece0d5256301d703eaf34dbd66318bb85f6639f052e54e596832076e4cb5ae4af3b587f12dcfbe9e9851c147a3541ce6f37d6db0f08595feeff074cbf1f59b950fc77b64954981c68a497c97c7cb2304ace920ce15734e7a3d679bed48a5179ed3f99e28e4a27da00e2b65df9b024eb00049a590c2d4f8a08a18ce37dd333a168693732e5011f602017b82d6d58836b43f81c0b2889a4f503457031d941c6a83a62e00474edddfd051592b1ff0f68a994332d5c4fec0160cd93f1c409fb7405364961fa72b24e3bf66c75b44b15234bc1de11831e76ae84ce32bf0bc6311b7c6d1809d2a54ff861679cce2ce26dd7c5b6c19924f0b1336b640505ee889f55a44599fa15deba06d38ae93239729e8e91c3398b9c4b64e1cb51f345bb04ed22d379c414b8d5eac82ce805b9558c3da000f324b25b472b878624290dfdefa2d7d04bb026eb1d3d0332e62e15e5bc59858053a590468abaedf964438fa02e06bce1c428c884bac0d9c55764ade883be1716ef1c8302f958e0a621c46eea86335511aa2c2bddc449fd0e70e1cc6a8c1b3b3785a3716b191d3f22a1bdf50784be275f1445aade374dc72dd6410697e20093d95ba5f34c00e3eb44d7ccb202ed1ac8acc5c8c7fb45bf8d0b537fee663b37ecc888254a673150801bf766eb94a6a72cb7b4b55d6c4bbe9da1e9a7f22c0218ac03991ab0aff24f5d44baa8baf642a2dd197bedb6bc6abaf844905a1a5bc218da8835d809d4eb4ae820a868c866992e60b39824a008d7d2d3e485551e67d6102e8b9f973f534a016dc06ea23d6b9fef21303747b939436c457edeaaee8a1b05901bcef6dce2e559a3915148d7929d444cc0afb8e749896a4b82bda19856f2a6a3c0a0c3f7124629b36f9825b4ecbc62d53113c55cd141a8036d09382204ee90f5037c8349a37bb066e4ab086d7bd59fd0c349a47a420f5633d31bf79259487d0acf17b68fb2e7eb5742d68f4f56a4d68d8d2181c08afd520e92c72e9840d50a60ffb161cd8020c5878af7bef03d94ba6771bfd9455b3addeff464ffea2a188e1f7eeeffbe4f0b0309fe3abdd94f824689e01ad5ecd1e641f932f0a5e9c3660627a3b465c08d3bdc37a434ea8293fb4f5cdedcf4f98142ffe923063982a249a37c41225f0660f91c06f79f77a29265a855526abf1d23c0ade3407dad7c46c83daa99d8dc9416a7fdc1a3a639257dc63ba263c0c109041d488dddf0de1b4b37ef8bae6083ac3a52045ef3f56624dd037d16c05adc2242238571945f6ee734f4ee2751c883fc3a62813e3e4656255a63a781aa40428f04dc022bca78b410ad466fb2a86738d96a16e92a74fda21b78fce14bde3acbb71f16ed702597e7faa6dee5076b9ffbd2fe50d50b1af27f28eb8bd78221697b805d702bae5e99ac7468dd5cb622da146030023d49182f92341cea14bc5541ead233ba98671d6e6ae7013550cb40d1e18dd1324641cdb1879551cd8c5d596126b6bad34e18073f66acc2cf9ad9ce61ac234f43201e0fc3677357445712a2c007a6d1a688f98d9cd15b1351697c4e78695da5a02113fe6463b3a3c00470a4f7d708ca48c6e8cccb42f8aee0d37fbc56d2e06eb4236dfd5b3e52b89cbfcb06d15e31fe08b502f33480fc7ec63943a2026529b9897910199c32fc09700d7e50a7ecdba987e968e7e71ef23b6f462b807b505fd6d9361fbdc461e6dc090cfa65e89d55fcf0bff2b85ef39fd870268c02d104fdc07a81061210e43a6f8b1b087534d08aa595b17df538503cb59d40d5ae27ceca1609a2c11c584c1fa8b47a01da0a804eccd1ee91ce2b62cc37e9dd3a36e476dce0e37cf16eab5afdd4665bcf4467d177f5de80cbad2f5dc27bde0bbeec6fa685d6520fa176c3ae807bbefa625dbae52b10ca48c542015cf4db34299c585014ee79b3f4990f15c08a044d311b5031ee278d905b68aec2e5eff282c097de89cd06500adbd296e6a7e908a11bb60c6a5cf8eeaa0f362d8fa5a7d2b17eb7599f94ba68035cd0737d1eaa12717dfaeec0584aa1587adccff03c49832049de0fe7575480567834c876e38013950498541a554fbeaee16bd22100bd17a43a37d968bb1012ac434b6753f35b3c1ad85995b06c2553b20ff510d02005b54b5143784f4d96f6705319d5d0999d177eba16317622d54c1fef4646392a871ea2d9b6346f249520a466e7293d7956f9552cc07301d7bec96f0c41e0ef7ee75aab80727e5597bab3cf710b9858161721efb1ec78034bcf5856160e2a6ea030945c1cf56c90adccfc7e83f121b0c71c71cf061fbdb1ba6d9b6f3c209e64fa67849987f9d5c37ee54f16f9657e5d3fc2693731a20d8c66dbbe4df6c5925fcd71c2864cd7b5bd0eb1dd6e2a05badac2809f264f8996bc6f88e71f146823f4117bef93f81c7b27cae7ab750baba8b7591e151012e0c986b0f683fb7e022e36281314e7e85ccab6578545040aeb242427bd2ce7f22b7f95d7cca765efc1457f1cfdbb9699860d9928697585f43093ce23ff1c780ea228be270be5c46c3312334cc4aae3b8f4840a6136ea36a590c4bdc94ea383c098e8553c6868965324c1d6e704cb79256c6295ab11ab27ab768fd21c3e47be8ae2401723b476d82ee4e34d89813b6f1cc808fb8f61a0778405e00a69773305cc30689cbe9b11dff8d2888a6763e38d417dae57d0e0b4fc82c47936e1ba948ef0cee6626854f4798391debe1415c0997d12fcf481e1bdc944c29788853485206a1c130f54b4be0e80709d79d5907ad0b97adc73fe0646159b55138c62e28c3c8acd0a89185e605e5a62bd4ce4d0e02a8fe631b98dbc" + }, + { + "access_list": [ + { + "address": "0x5cb95949c4446088afa8305839eb74949fbec939", + "storageKeys": [ + "0x87d8feb6a06cf94be5926953cb778d0595fe04d4176df6bf0e465c6f4ac95523", + "0xbd239f75c821ae2534474d5e04457147575c20071ee7fdfd6bd9763a27aed150", + "0xeef2e6b9421c401120500ae7b5446d7dc4cbc76763c28fc84ca89e72573a0427", + "0x9373f35cd264aee3da385a66adc7e624ed6fca9169591f68da5ea28990063f64", + "0xc2f3528cc49b9408f5cdca6b36061fd1f13b981ae544d4f71ba6ddcf78e2350a", + "0xbc61c372ac6811d6fadcf44e5245d7cfad626b7c69b27a3ae47eef3764af75b4", + "0x39f5f1edd5eeadfb95a97c3bbb5b4f43661d5d05a397dbfafefcadb3f9c22866", + "0x68177c5ebde82c7a2d792740267f73b3f750691455e875af942ba88ba5834f16", + "0x22ee71ebd1b365736370f500f9027f0279f79455f5337534ac03be6814fee86c", + "0xdf3a3531c3f5c0cb467d8767dcff3fdbab203662beab76574c93001557aab690", + "0x82c346a6bdf29e8c8d09c7253114336064f2ae5afad59744408c98c479e5bc9b", + "0x8b81a6df095e2f87d40874742502592ae29832806fac461d909728c6da87dcac", + "0x1222603af8430d31db8df9d3e79670c85319c8226bb6c6fbd5f21fa545eefcbc" + ] + }, + { + "address": "0x6f0a1591a530aea981fec493738415bdd4da0c37", + "storageKeys": [ + "0x9f276b8629589d0bad8126a6c10f6398b681ec6c4d4012c38ebcd10760ddba38", + "0x5f25df4b392c794bd2f408f6d57001d26d3ba4b4cc904c60dc6861b5226afc84" + ] + }, + { + "address": "0x90e54f6eecd0c93d3de598b9f10c0dec8e39b36b", + "storageKeys": [ + "0x45d9f2a8d9cda0a0b1d17efd2abac9984c2473d7e9e380eb2fee9d8c211de074", + "0x6849f6fdb6142f97d63f6cf9aea9a2523ca08c29a10aff161080357dee4a659b", + "0x4aae5a344ef24b2f7dedd4b479cae12fce1abc286f40911027d6483d0953be86", + "0xd9c9bca6bac8d745bd9dd45ed1919d60536ea832d493318d294e826d477aff43", + "0x883b7c27685caeba087c6e76ed32461655b47f94794759b3e08a32577d93845a", + "0xe058fc3ba5dc5a54fb251a477e7d9edb099518b6d754a5987a124bfd07ab76c5", + "0x68d1e0886ae572230e62c1962e4dcf40701729e38ca895a0dbfb37d99d68eed6", + "0xbf7e6b4b1bba6aab4e0b545983248a8c2400817c596a34e4ccf717fd56ddec0b", + "0xebbbd156a87aec593a1a86c32db2eea9d28e2a082929e0b287f75e6cdc53a5fb", + "0x2f18216fac51b7f01d0bb5d52858e7a5472c99aa191efacf7c73c0567a26decb", + "0xc00fcd06bbbbedd14187ab1cd90b97cf59f8910c5b8fcccf043124e71f319e24", + "0xcf036e5cb1e99e5b118d968b03de50caef825523a8e16430f0bae91aa2d2198f", + "0xae77d02641bbf6e1177fc800fd7b3051e8d9c6a38007b70bf7cfb986a7f21614", + "0xc1f30819374899fd0a4292d209be479079da6e393bcaa1f6e9c6eddfbabaa893" + ] + }, + { + "address": "0x33ea323e9873dfde752835f05532e8980017b1a6", + "storageKeys": [] + }, + { + "address": "0x40738949d052886dd52d72687c56886a813bcdd7", + "storageKeys": [ + "0x90f77608825f889c7de55e4c4d01cef591178a563d0e4161abacdad23a17783d" + ] + }, + { + "address": "0xc467321043e0f77ae07a0a9b07425f3d4f2810b8", + "storageKeys": [ + "0x9873d768649ae6dcadae8b5844736a79cd292b2a87ab1f7aed4e7b9b0d1ea496", + "0xe6e982a10099e7100f6474c8aa527da3d183fdcb544404d81a1cc1f8ffa56569" + ] + }, + { + "address": "0x743c57b964b0eed4e417990cf0d72b5b84af4d34", + "storageKeys": [ + "0xb81c9f3f70bb4e55039e73306d851fc871abe42985cde5e007224e735c9890d2", + "0x6fa37473c8e7f85f0fadfc8c24f6a566401b126d0764d29dc344848a28cae77b", + "0x34ba88ddb5e4e9ea3f13b96971af8300f24435e08c2ed932f33a5ea4d20f22f3", + "0x128213270454901fef81253ef26ae60984692a6b88d916973e7cf56ccb2df4c0", + "0xbf019b979168e81a6ddc3b6fa744f3acab2139e36b49a58405d87c4daaf83288", + "0x241517ecb4fa08a2063e232994bdd880872f34d78b9601bfc74396bbe2f1d01a", + "0xdb8a8265b095129ece67321d110b22152674d33582fd8c0c0429342edc8acdfb", + "0x9404529fbc71b93830879acb4d49eaf56fe09367ba51651608396748d6ab9eba", + "0x3c17e617f65e90ce715d8891bc5ab6dfec2f24dc20c4eb4aa4e3778528ea88e9", + "0x5efbfe7a705df19ade0d1b229bf632d24e559d518ca6c39d27e674fd605a2545", + "0xd1785b52e20b64fb78a0d6c6a4f8b1714318f1c6f93a182c8420f12253f89895", + "0xe54e0d2f55e2029dc067da7e77bbd860c3640a2cab0aba7999dc33287b138f5a", + "0x565a2dd931adf27a9061b86a7d3ba6eea5ef43953c6939c9d4618dd8a0d27919", + "0x4deaac8d076c7aa4c24b7613c69578dfdef47d6d1761ecddd069f18992b26eb3", + "0x7e213bc1cf69fdf6950e8d3074e3cde354f5eaca7c1fde249b0965836f040783", + "0x4771326f50fcb78deb760ccb47ed6a9f2b4ffa00dc523a533a4c993de3838378" + ] + } + ], + "encoded_bytes": "0x07b5035cb95949c4446088afa8305839eb74949fbec9390d87d8feb6a06cf94be5926953cb778d0595fe04d4176df6bf0e465c6f4ac95523bd239f75c821ae2534474d5e04457147575c20071ee7fdfd6bd9763a27aed150eef2e6b9421c401120500ae7b5446d7dc4cbc76763c28fc84ca89e72573a04279373f35cd264aee3da385a66adc7e624ed6fca9169591f68da5ea28990063f64c2f3528cc49b9408f5cdca6b36061fd1f13b981ae544d4f71ba6ddcf78e2350abc61c372ac6811d6fadcf44e5245d7cfad626b7c69b27a3ae47eef3764af75b439f5f1edd5eeadfb95a97c3bbb5b4f43661d5d05a397dbfafefcadb3f9c2286668177c5ebde82c7a2d792740267f73b3f750691455e875af942ba88ba5834f1622ee71ebd1b365736370f500f9027f0279f79455f5337534ac03be6814fee86cdf3a3531c3f5c0cb467d8767dcff3fdbab203662beab76574c93001557aab69082c346a6bdf29e8c8d09c7253114336064f2ae5afad59744408c98c479e5bc9b8b81a6df095e2f87d40874742502592ae29832806fac461d909728c6da87dcac1222603af8430d31db8df9d3e79670c85319c8226bb6c6fbd5f21fa545eefcbc556f0a1591a530aea981fec493738415bdd4da0c37029f276b8629589d0bad8126a6c10f6398b681ec6c4d4012c38ebcd10760ddba385f25df4b392c794bd2f408f6d57001d26d3ba4b4cc904c60dc6861b5226afc84d50390e54f6eecd0c93d3de598b9f10c0dec8e39b36b0e45d9f2a8d9cda0a0b1d17efd2abac9984c2473d7e9e380eb2fee9d8c211de0746849f6fdb6142f97d63f6cf9aea9a2523ca08c29a10aff161080357dee4a659b4aae5a344ef24b2f7dedd4b479cae12fce1abc286f40911027d6483d0953be86d9c9bca6bac8d745bd9dd45ed1919d60536ea832d493318d294e826d477aff43883b7c27685caeba087c6e76ed32461655b47f94794759b3e08a32577d93845ae058fc3ba5dc5a54fb251a477e7d9edb099518b6d754a5987a124bfd07ab76c568d1e0886ae572230e62c1962e4dcf40701729e38ca895a0dbfb37d99d68eed6bf7e6b4b1bba6aab4e0b545983248a8c2400817c596a34e4ccf717fd56ddec0bebbbd156a87aec593a1a86c32db2eea9d28e2a082929e0b287f75e6cdc53a5fb2f18216fac51b7f01d0bb5d52858e7a5472c99aa191efacf7c73c0567a26decbc00fcd06bbbbedd14187ab1cd90b97cf59f8910c5b8fcccf043124e71f319e24cf036e5cb1e99e5b118d968b03de50caef825523a8e16430f0bae91aa2d2198fae77d02641bbf6e1177fc800fd7b3051e8d9c6a38007b70bf7cfb986a7f21614c1f30819374899fd0a4292d209be479079da6e393bcaa1f6e9c6eddfbabaa8931533ea323e9873dfde752835f05532e8980017b1a6003540738949d052886dd52d72687c56886a813bcdd70190f77608825f889c7de55e4c4d01cef591178a563d0e4161abacdad23a17783d55c467321043e0f77ae07a0a9b07425f3d4f2810b8029873d768649ae6dcadae8b5844736a79cd292b2a87ab1f7aed4e7b9b0d1ea496e6e982a10099e7100f6474c8aa527da3d183fdcb544404d81a1cc1f8ffa565699504743c57b964b0eed4e417990cf0d72b5b84af4d3410b81c9f3f70bb4e55039e73306d851fc871abe42985cde5e007224e735c9890d26fa37473c8e7f85f0fadfc8c24f6a566401b126d0764d29dc344848a28cae77b34ba88ddb5e4e9ea3f13b96971af8300f24435e08c2ed932f33a5ea4d20f22f3128213270454901fef81253ef26ae60984692a6b88d916973e7cf56ccb2df4c0bf019b979168e81a6ddc3b6fa744f3acab2139e36b49a58405d87c4daaf83288241517ecb4fa08a2063e232994bdd880872f34d78b9601bfc74396bbe2f1d01adb8a8265b095129ece67321d110b22152674d33582fd8c0c0429342edc8acdfb9404529fbc71b93830879acb4d49eaf56fe09367ba51651608396748d6ab9eba3c17e617f65e90ce715d8891bc5ab6dfec2f24dc20c4eb4aa4e3778528ea88e95efbfe7a705df19ade0d1b229bf632d24e559d518ca6c39d27e674fd605a2545d1785b52e20b64fb78a0d6c6a4f8b1714318f1c6f93a182c8420f12253f89895e54e0d2f55e2029dc067da7e77bbd860c3640a2cab0aba7999dc33287b138f5a565a2dd931adf27a9061b86a7d3ba6eea5ef43953c6939c9d4618dd8a0d279194deaac8d076c7aa4c24b7613c69578dfdef47d6d1761ecddd069f18992b26eb37e213bc1cf69fdf6950e8d3074e3cde354f5eaca7c1fde249b0965836f0407834771326f50fcb78deb760ccb47ed6a9f2b4ffa00dc523a533a4c993de3838378" + }, + { + "access_list": [ + { + "address": "0x9e6e48ae1841d6413d56b9007c36d653d8441bd9", + "storageKeys": [ + "0xc3087388dc0c94f1b1262ba213e1cb7b01b8e1dbb52a6bccb60f508a77065c68", + "0xe9b443dc979ac690401f9e55147ef16427d0bda32386ff549ef1c475fa8ca4ac", + "0x9a718e752bc70152954dca615da41d24401af30cb96fe1beb71ba49b48ea0cde", + "0x58914eb718e454d26cea4a30666ba0996576b9354b36978f74dce7fc7eeb4029", + "0xbfa5f184903f08ef9634a40ac8cb2d1f1de681c879c8476dd6d86657d26fa61c", + "0x4ecfbed47949ca2e605ca88b51de6d601b74d2129c9edc61a0fee007e42accc0", + "0x1379622900cffe0885265f47d2e23588dbc5dd7f43caddc2d202ff211159c7e2", + "0x5f355c35f645c14fd6ca8b90b76026d0cd23b7ee95cfdbccab07d952818c60fd", + "0xf61e8d591bf82176204398e247474069266bea69d507e8a1fdfdb5b4db26e6c4", + "0xfb2cf6eca9119f1eea79487dff99a93aed1734b6aa8e28d831026c0c05e1342b", + "0x17ed5efd4afc628624bb2951f7f93ba6406f36e41621bdd1d25a6f7cc2261b39", + "0x02a5e623f33268fa03ba662463a85596cc9953bf95c91e6d565b59b500fd6392", + "0x9c83d5ae074ee71753d19abfadeacb906c2d4bcdfadbe950bf03a190893b9663", + "0x5c35a36ec3871520b930805d70c96230b624eb744a6d7fc9cb5c2abc913272c7" + ] + }, + { + "address": "0xaa371be114e9598adbaaeb52215e414f0459b4d2", + "storageKeys": [ + "0x4bccc305834d4ababcc696644fae9734dbb5397f1ea7dd5bf634a0499d45c9de" + ] + }, + { + "address": "0xeba274cd7cd6bea3d8530d65981053631869fb51", + "storageKeys": [ + "0x4e7b65539314e148d1171209704d8f6fc644f91b374bae9d84577d2a20e64fe1", + "0x1748db7ef5635065df10aca63e929951bea19b4604b3367e2ac56419b0bfc7b1" + ] + } + ], + "encoded_bytes": "0x03d5039e6e48ae1841d6413d56b9007c36d653d8441bd90ec3087388dc0c94f1b1262ba213e1cb7b01b8e1dbb52a6bccb60f508a77065c68e9b443dc979ac690401f9e55147ef16427d0bda32386ff549ef1c475fa8ca4ac9a718e752bc70152954dca615da41d24401af30cb96fe1beb71ba49b48ea0cde58914eb718e454d26cea4a30666ba0996576b9354b36978f74dce7fc7eeb4029bfa5f184903f08ef9634a40ac8cb2d1f1de681c879c8476dd6d86657d26fa61c4ecfbed47949ca2e605ca88b51de6d601b74d2129c9edc61a0fee007e42accc01379622900cffe0885265f47d2e23588dbc5dd7f43caddc2d202ff211159c7e25f355c35f645c14fd6ca8b90b76026d0cd23b7ee95cfdbccab07d952818c60fdf61e8d591bf82176204398e247474069266bea69d507e8a1fdfdb5b4db26e6c4fb2cf6eca9119f1eea79487dff99a93aed1734b6aa8e28d831026c0c05e1342b17ed5efd4afc628624bb2951f7f93ba6406f36e41621bdd1d25a6f7cc2261b3902a5e623f33268fa03ba662463a85596cc9953bf95c91e6d565b59b500fd63929c83d5ae074ee71753d19abfadeacb906c2d4bcdfadbe950bf03a190893b96635c35a36ec3871520b930805d70c96230b624eb744a6d7fc9cb5c2abc913272c735aa371be114e9598adbaaeb52215e414f0459b4d2014bccc305834d4ababcc696644fae9734dbb5397f1ea7dd5bf634a0499d45c9de55eba274cd7cd6bea3d8530d65981053631869fb51024e7b65539314e148d1171209704d8f6fc644f91b374bae9d84577d2a20e64fe11748db7ef5635065df10aca63e929951bea19b4604b3367e2ac56419b0bfc7b1" + } +] \ No newline at end of file From 96e3619ea9a7aada54eac442fda7be95ad56a701 Mon Sep 17 00:00:00 2001 From: Rodrigo Herrera Date: Sat, 13 Apr 2024 07:02:15 -0600 Subject: [PATCH 148/700] use alloy's BaseFeeParams (2) (#7617) Co-authored-by: Matthias Seitz --- crates/primitives/src/basefee.rs | 13 ++-- crates/primitives/src/chain/mod.rs | 4 ++ crates/primitives/src/chain/spec.rs | 92 +++++--------------------- crates/primitives/src/constants/mod.rs | 43 ++++++++++-- crates/primitives/src/header.rs | 3 +- 5 files changed, 68 insertions(+), 87 deletions(-) diff --git a/crates/primitives/src/basefee.rs b/crates/primitives/src/basefee.rs index b414bb36acc2d..442cb66409014 100644 --- a/crates/primitives/src/basefee.rs +++ b/crates/primitives/src/basefee.rs @@ -30,7 +30,7 @@ pub fn calculate_next_block_base_fee( base_fee_params: crate::BaseFeeParams, ) -> u64 { // Calculate the target gas by dividing the gas limit by the elasticity multiplier. - let gas_target = gas_limit / base_fee_params.elasticity_multiplier; + let gas_target = gas_limit / base_fee_params.elasticity_multiplier as u64; match gas_used.cmp(&gas_target) { // If the gas used in the current block is equal to the gas target, the base fee remains the @@ -45,7 +45,7 @@ pub fn calculate_next_block_base_fee( // Ensure a minimum increase of 1. 1, base_fee as u128 * (gas_used - gas_target) as u128 / - (gas_target as u128 * base_fee_params.max_change_denominator as u128), + (gas_target as u128 * base_fee_params.max_change_denominator), ) as u64) } // If the gas used in the current block is less than the gas target, calculate a new @@ -54,7 +54,7 @@ pub fn calculate_next_block_base_fee( // Calculate the decrease in base fee based on the formula defined by EIP-1559. base_fee.saturating_sub( (base_fee as u128 * (gas_target - gas_used) as u128 / - (gas_target as u128 * base_fee_params.max_change_denominator as u128)) + (gas_target as u128 * base_fee_params.max_change_denominator)) as u64, ) } @@ -65,6 +65,9 @@ pub fn calculate_next_block_base_fee( mod tests { use super::*; + #[cfg(feature = "optimism")] + use crate::chain::{OP_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS}; + #[test] fn calculate_base_fee_success() { let base_fee = [ @@ -124,7 +127,7 @@ mod tests { gas_used[i], gas_limit[i], base_fee[i], - crate::BaseFeeParams::optimism(), + OP_BASE_FEE_PARAMS, ) ); } @@ -157,7 +160,7 @@ mod tests { gas_used[i], gas_limit[i], base_fee[i], - crate::BaseFeeParams::optimism_sepolia(), + OP_SEPOLIA_BASE_FEE_PARAMS, ) ); } diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index 6f9673e719468..f8425f95e6631 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -8,6 +8,10 @@ pub use spec::{ #[cfg(feature = "optimism")] pub use spec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; +#[cfg(feature = "optimism")] +#[cfg(test)] +pub(crate) use spec::{OP_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS}; + // The chain spec module. mod spec; // The chain info module. diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 32d3b2ad841a8..160eddf9df5e9 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -1,8 +1,5 @@ use crate::{ - constants::{ - EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, - EIP1559_INITIAL_BASE_FEE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, EMPTY_WITHDRAWALS, - }, + constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, EMPTY_WITHDRAWALS}, holesky_nodes, net::{goerli_nodes, mainnet_nodes, sepolia_nodes}, proofs::state_root_ref_unhashed, @@ -18,6 +15,14 @@ use std::{ sync::Arc, }; +pub use alloy_eips::eip1559::BaseFeeParams; + +#[cfg(feature = "optimism")] +pub(crate) use crate::constants::{ + OP_BASE_FEE_PARAMS, OP_CANYON_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS, + OP_SEPOLIA_CANYON_BASE_FEE_PARAMS, +}; + /// The Ethereum mainnet spec pub static MAINNET: Lazy> = Lazy::new(|| { ChainSpec { @@ -284,8 +289,8 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { ]), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, BaseFeeParams::optimism()), - (Hardfork::Canyon, BaseFeeParams::optimism_canyon()), + (Hardfork::London, OP_BASE_FEE_PARAMS), + (Hardfork::Canyon, OP_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -338,8 +343,8 @@ pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { ]), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, BaseFeeParams::optimism_sepolia()), - (Hardfork::Canyon, BaseFeeParams::optimism_sepolia_canyon()), + (Hardfork::London, OP_SEPOLIA_BASE_FEE_PARAMS), + (Hardfork::Canyon, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -392,8 +397,8 @@ pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| { ]), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, BaseFeeParams::optimism_sepolia()), - (Hardfork::Canyon, BaseFeeParams::optimism_sepolia_canyon()), + (Hardfork::London, OP_SEPOLIA_BASE_FEE_PARAMS), + (Hardfork::Canyon, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -446,8 +451,8 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| { ]), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, BaseFeeParams::optimism()), - (Hardfork::Canyon, BaseFeeParams::optimism_canyon()), + (Hardfork::London, OP_BASE_FEE_PARAMS), + (Hardfork::Canyon, OP_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -492,69 +497,6 @@ impl From> for ForkBaseFeeParams { } } -/// BaseFeeParams contains the config parameters that control block base fee computation -#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)] -pub struct BaseFeeParams { - /// The base_fee_max_change_denominator from EIP-1559 - pub max_change_denominator: u64, - /// The elasticity multiplier from EIP-1559 - pub elasticity_multiplier: u64, -} - -impl BaseFeeParams { - /// Get the base fee parameters for Ethereum mainnet - pub const fn ethereum() -> BaseFeeParams { - BaseFeeParams { - max_change_denominator: EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, - elasticity_multiplier: EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, - } - } - - /// Get the base fee parameters for optimism sepolia - #[cfg(feature = "optimism")] - pub const fn optimism_sepolia() -> BaseFeeParams { - BaseFeeParams { - max_change_denominator: - crate::constants::OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, - elasticity_multiplier: - crate::constants::OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, - } - } - - /// Get the base fee parameters for optimism sepolia (post Canyon) - #[cfg(feature = "optimism")] - pub const fn optimism_sepolia_canyon() -> BaseFeeParams { - BaseFeeParams { - max_change_denominator: - crate::constants::OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, - elasticity_multiplier: - crate::constants::OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, - } - } - - /// Get the base fee parameters for optimism mainnet - #[cfg(feature = "optimism")] - pub const fn optimism() -> BaseFeeParams { - BaseFeeParams { - max_change_denominator: - crate::constants::OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, - elasticity_multiplier: - crate::constants::OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, - } - } - - /// Get the base fee parameters for optimism mainnet (post Canyon) - #[cfg(feature = "optimism")] - pub const fn optimism_canyon() -> BaseFeeParams { - BaseFeeParams { - max_change_denominator: - crate::constants::OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, - elasticity_multiplier: - crate::constants::OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, - } - } -} - /// An Ethereum chain specification. /// /// A chain specification describes: diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index 18a41168fd59f..4fc0aadfe9dc2 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -6,6 +6,9 @@ use crate::{ }; use std::time::Duration; +#[cfg(feature = "optimism")] +use crate::chain::BaseFeeParams; + /// [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) constants. pub mod eip4844; @@ -67,32 +70,60 @@ pub const MINIMUM_GAS_LIMIT: u64 = 5000; /// Base fee max change denominator for Optimism Mainnet as defined in the Optimism /// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. #[cfg(feature = "optimism")] -pub const OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 50; +pub const OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; /// Base fee max change denominator for Optimism Mainnet as defined in the Optimism Canyon /// hardfork. #[cfg(feature = "optimism")] -pub const OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u64 = 250; +pub const OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; /// Base fee max change denominator for Optimism Mainnet as defined in the Optimism /// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. #[cfg(feature = "optimism")] -pub const OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 6; +pub const OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; /// Base fee max change denominator for Optimism Sepolia as defined in the Optimism /// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. #[cfg(feature = "optimism")] -pub const OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 50; +pub const OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; /// Base fee max change denominator for Optimism Sepolia as defined in the Optimism Canyon /// hardfork. #[cfg(feature = "optimism")] -pub const OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u64 = 250; +pub const OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; /// Base fee max change denominator for Optimism Sepolia as defined in the Optimism /// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. #[cfg(feature = "optimism")] -pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 10; +pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10; + +/// Get the base fee parameters for Optimism Sepolia. +#[cfg(feature = "optimism")] +pub const OP_SEPOLIA_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, + elasticity_multiplier: OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; + +/// Get the base fee parameters for Optimism Sepolia (post Canyon). +#[cfg(feature = "optimism")] +pub const OP_SEPOLIA_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, + elasticity_multiplier: OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; + +/// Get the base fee parameters for Optimism Mainnet. +#[cfg(feature = "optimism")] +pub const OP_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, + elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; + +/// Get the base fee parameters for Optimism Mainnet (post Canyon). +#[cfg(feature = "optimism")] +pub const OP_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, + elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; /// Multiplier for converting gwei to wei. pub const GWEI_TO_WEI: u64 = 1_000_000_000; diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index e436fb6734171..0516d32271b65 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -654,7 +654,8 @@ impl SealedHeader { // Determine the parent gas limit, considering elasticity multiplier on the London fork. let parent_gas_limit = if chain_spec.fork(Hardfork::London).transitions_at_block(self.number) { - parent.gas_limit * chain_spec.base_fee_params(self.timestamp).elasticity_multiplier + parent.gas_limit * + chain_spec.base_fee_params(self.timestamp).elasticity_multiplier as u64 } else { parent.gas_limit }; From 4d799673357d69927059854aa2231d34dc4dbe18 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Sat, 13 Apr 2024 20:25:04 +0200 Subject: [PATCH 149/700] chore: simplify DisplayBlocksChain (#7624) --- crates/net/downloaders/src/file_client.rs | 1 - crates/storage/provider/src/chain.rs | 26 +++++++---------------- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 9ebde1b70513b..a692bc8258d41 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -287,7 +287,6 @@ mod tests { }, test_utils::TestConsensus, }; - use reth_primitives::SealedHeader; use reth_provider::test_utils::create_test_provider_factory; use std::sync::Arc; diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index eb9ef6a4b7ec3..4114ba0970068 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -325,26 +325,16 @@ pub struct DisplayBlocksChain<'a>(pub &'a BTreeMap fmt::Display for DisplayBlocksChain<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.0.len() <= 3 { - write!(f, "[")?; - let mut iter = self.0.values().map(|block| block.num_hash()); - if let Some(block_num_hash) = iter.next() { - write!(f, "{block_num_hash:?}")?; - for block_num_hash_iter in iter { - write!(f, ", {block_num_hash_iter:?}")?; - } - } - write!(f, "]")?; + let mut list = f.debug_list(); + let mut values = self.0.values().map(|block| block.num_hash()); + if values.len() <= 3 { + list.entries(values); } else { - write!( - f, - "[{:?}, ..., {:?}]", - self.0.values().next().unwrap().num_hash(), - self.0.values().last().unwrap().num_hash() - )?; + list.entry(&values.next().unwrap()); + list.entry(&format_args!("...")); + list.entry(&values.next_back().unwrap()); } - - Ok(()) + list.finish() } } From 4d175cb783e8e5583c7d1ebc357bfc6412fdbbee Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Sun, 14 Apr 2024 12:53:50 +0800 Subject: [PATCH 150/700] feat: add a metric for the number of ExEx's on a node (#7632) --- crates/exex/src/manager.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 7e202b2d72a3a..f12a6e7564111 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -127,6 +127,8 @@ pub struct ExExManagerMetrics { /// /// Note that this might be slightly bigger than the maximum capacity in some cases. buffer_size: Gauge, + /// Current number of ExEx's on the node. + num_exexs: Gauge, } /// The execution extension manager. @@ -200,6 +202,7 @@ impl ExExManager { let metrics = ExExManagerMetrics::default(); metrics.max_capacity.set(max_capacity as f64); + metrics.num_exexs.set(num_exexs as f64); Self { exex_handles: handles, From aa32f7bfa34b48d5be59015fd522761475f8a2e7 Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Sun, 14 Apr 2024 13:22:23 +0800 Subject: [PATCH 151/700] fix: crash ExEx task when the ExEx future returns (#7633) Co-authored-by: Oliver Nordbjerg --- crates/node-builder/src/builder.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 2c387161fb0d1..6e05e93d05f49 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -627,7 +627,10 @@ where // spawn it as a crit task executor.spawn_critical("exex", async move { info!(target: "reth::cli", id, "ExEx started"); - exex.await.unwrap_or_else(|_| panic!("exex {} crashed", id)) + match exex.await { + Ok(_) => panic!("ExEx {id} finished. ExEx's should run indefinitely"), + Err(err) => panic!("ExEx {id} crashed: {err}"), + } }); }); } From 8cdeacf388650f6bb8feebca584b5e95f1061a11 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 14 Apr 2024 12:46:09 +0000 Subject: [PATCH 152/700] chore(deps): weekly `cargo update` (#7627) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 103 ++++++++++++++++++++++++++++------------------------- 1 file changed, 55 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ca68955e9d36..166e66290df82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -125,9 +125,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" @@ -172,7 +172,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.5", + "winnow 0.6.6", ] [[package]] @@ -499,7 +499,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "715f4d09a330cc181fc7c361b5c5c2766408fa59a0bac60349dcb7baabd404cc" dependencies = [ - "winnow 0.6.5", + "winnow 0.6.6", ] [[package]] @@ -877,9 +877,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.79" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", @@ -1462,7 +1462,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -2549,9 +2549,9 @@ dependencies = [ [[package]] name = "either" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "elliptic-curve" @@ -3462,7 +3462,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -3933,9 +3933,9 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "iri-string" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21859b667d66a4c1dacd9df0863b3efb65785474255face87f5bca39dd8407c0" +checksum = "81669f3b77acd397a241a988f05190b1785cb83f0287d8fb3a05f0648405d65f" dependencies = [ "memchr", "serde", @@ -3978,9 +3978,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" dependencies = [ "libc", ] @@ -4250,7 +4250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -4926,9 +4926,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41" dependencies = [ "num-bigint", "num-complex", @@ -8883,9 +8883,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.35" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef89ece63debf11bc32d1ed8d078ac870cbeb44da02afb02a9ff135ae7ca0582" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -9091,7 +9091,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.5", + "winnow 0.6.6", ] [[package]] @@ -9801,7 +9801,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -9810,7 +9810,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -9828,7 +9828,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -9848,17 +9848,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -9869,9 +9870,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -9881,9 +9882,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -9893,9 +9894,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -9905,9 +9912,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -9917,9 +9924,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -9929,9 +9936,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -9941,9 +9948,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -9956,9 +9963,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" +checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" dependencies = [ "memchr", ] From f6f713ddded3e71ac8fa64debd0dda7a6134938f Mon Sep 17 00:00:00 2001 From: Delweng Date: Sun, 14 Apr 2024 21:15:01 +0800 Subject: [PATCH 153/700] chore(discv5): add comments for op bootnodes (#7634) Signed-off-by: jsvisa --- crates/net/discv5/src/config.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index fb47276ed9762..eb8b409de2d9e 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -25,10 +25,24 @@ pub const OPSTACK: &[u8] = b"opstack"; const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; /// Optimism mainnet and base mainnet boot nodes. -const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &["enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301"]; +/// Added from +const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &[ + // OP Labs + "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", + "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", + "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", + // Base + "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", + "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", + "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", + "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", + "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301" +]; /// Optimism sepolia and base sepolia boot nodes. -const BOOT_NODES_OP_SEPOLIA_AND_BASE_SEPOLIA: &[&str] = &["enode://09d1a6110757b95628cc54ab6cc50a29773075ed00e3a25bd9388807c9a6c007664e88646a6fefd82baad5d8374ba555e426e8aed93f0f0c517e2eb5d929b2a2@34.65.21.188:30304?discport=30303"]; +const BOOT_NODES_OP_SEPOLIA_AND_BASE_SEPOLIA: &[&str] = &[ + "enode://09d1a6110757b95628cc54ab6cc50a29773075ed00e3a25bd9388807c9a6c007664e88646a6fefd82baad5d8374ba555e426e8aed93f0f0c517e2eb5d929b2a2@34.65.21.188:30304?discport=30303" +]; /// Builds a [`Config`]. #[derive(Debug, Default)] From d4ea41528a68c851b3d67c2a224cc2b225f65fef Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 14 Apr 2024 15:18:28 +0200 Subject: [PATCH 154/700] fix(cli): import cmd boundaries (#7620) --- bin/reth/src/commands/import.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 9f6161a2cf316..49fdba1eef857 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -192,8 +192,8 @@ impl ImportCommand { let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) .build(file_client.clone(), consensus.clone()) .into_task(); - header_downloader.update_local_head(file_client.tip_header().unwrap()); - header_downloader.update_sync_target(SyncTarget::Tip(file_client.start().unwrap())); + header_downloader.update_local_head(file_client.start_header().unwrap()); + header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) .build(file_client.clone(), consensus.clone(), provider_factory.clone()) From 3e8d5c69cf67906909d41fa5b2737bd9667e46e5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 14 Apr 2024 20:07:41 +0200 Subject: [PATCH 155/700] feat: replace reth native AccessList type (#7636) --- .../primitives/src/transaction/access_list.rs | 230 ++++-------------- crates/primitives/src/transaction/mod.rs | 6 +- .../rpc-types-compat/src/transaction/typed.rs | 6 +- 3 files changed, 57 insertions(+), 185 deletions(-) diff --git a/crates/primitives/src/transaction/access_list.rs b/crates/primitives/src/transaction/access_list.rs index c15646b4be5ee..acaf132c479a6 100644 --- a/crates/primitives/src/transaction/access_list.rs +++ b/crates/primitives/src/transaction/access_list.rs @@ -1,202 +1,74 @@ -use crate::{Address, B256, U256}; -use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs::{main_codec, Compact}; -use std::{ - mem, - ops::{Deref, DerefMut}, -}; +//! [EIP-2930](https://eips.ethereum.org/EIPS/eip-2930): Access List types -/// Represents a list of addresses and storage keys that a transaction plans to access. -/// -/// Accesses outside this list incur higher costs due to gas charging. -/// -/// This structure is part of [EIP-2930](https://eips.ethereum.org/EIPS/eip-2930), introducing an optional access list for Ethereum transactions. -/// -/// The access list allows pre-specifying and pre-paying for accounts and storage -/// slots, mitigating risks introduced by [EIP-2929](https://eips.ethereum.org/EIPS/e). -#[main_codec(rlp)] -#[derive(Clone, Debug, PartialEq, Eq, Hash, Default, RlpDecodable, RlpEncodable)] -#[serde(rename_all = "camelCase")] -pub struct AccessListItem { - /// Account address that would be loaded at the start of execution - pub address: Address, - /// The storage keys to be loaded at the start of execution. - /// - /// Each key is a 32-byte value representing a specific storage slot. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" - ) - )] - pub storage_keys: Vec, -} +/// Re-export from `alloy_eips`. +#[doc(inline)] +pub use alloy_eips::eip2930::{AccessList, AccessListItem}; -impl AccessListItem { - /// Calculates a heuristic for the in-memory size of the [AccessListItem]. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::
() + self.storage_keys.capacity() * mem::size_of::() - } -} +#[cfg(test)] +mod tests { + use super::*; + use crate::{Address, B256}; + use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; + use proptest::proptest; + use reth_codecs::{main_codec, Compact}; -/// AccessList as defined in [EIP-2930](https://eips.ethereum.org/EIPS/eip-2930) -#[main_codec(rlp)] -#[derive(Clone, Debug, PartialEq, Eq, Hash, Default, RlpDecodableWrapper, RlpEncodableWrapper)] -pub struct AccessList( - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" - ) + /// This type is kept for compatibility tests after the codec support was added to alloy-eips + /// AccessList type natively + #[main_codec(rlp)] + #[derive( + Clone, Debug, PartialEq, Eq, Hash, Default, RlpDecodableWrapper, RlpEncodableWrapper, )] - pub Vec, -); - -impl AccessList { - /// Converts the list into a vec, expected by revm - pub fn flattened(&self) -> Vec<(Address, Vec)> { - self.flatten().collect() - } - - /// Consumes the type and converts the list into a vec, expected by revm - pub fn into_flattened(self) -> Vec<(Address, Vec)> { - self.into_flatten().collect() - } - - /// Consumes the type and returns an iterator over the list's addresses and storage keys. - pub fn into_flatten(self) -> impl Iterator)> { - self.0.into_iter().map(|item| { - ( - item.address, - item.storage_keys.into_iter().map(|slot| U256::from_be_bytes(slot.0)).collect(), - ) - }) - } - - /// Returns an iterator over the list's addresses and storage keys. - pub fn flatten(&self) -> impl Iterator)> + '_ { - self.iter().map(|item| { - ( - item.address, - item.storage_keys.iter().map(|slot| U256::from_be_bytes(slot.0)).collect(), - ) - }) - } - - /// Calculates a heuristic for the in-memory size of the [AccessList]. - #[inline] - pub fn size(&self) -> usize { - // take into account capacity - self.iter().map(AccessListItem::size).sum::() + - self.capacity() * mem::size_of::() - } - - /// Returns the position of the given address in the access list, if present. - pub fn index_of_address(&self, address: Address) -> Option { - self.iter().position(|item| item.address == address) - } - - /// Checks if a specific storage slot within an account is present in the access list. - /// - /// Returns a tuple with flags for the presence of the account and the slot. - pub fn contains(&self, address: Address, slot: B256) -> (bool, bool) { - self.index_of_address(address) - .map_or((false, false), |idx| (true, self.contains_storage_key_at_index(slot, idx))) - } - - /// Checks if the access list contains the specified address. - pub fn contains_address(&self, address: Address) -> bool { - self.iter().any(|item| item.address == address) - } - - /// Checks if the storage keys at the given index within an account are present in the access - /// list. - pub fn contains_storage_key_at_index(&self, slot: B256, index: usize) -> bool { - self.get(index).map_or(false, |entry| { - entry.storage_keys.iter().any(|storage_key| *storage_key == slot) - }) - } + struct RethAccessList( + #[proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" + )] + Vec, + ); - /// Adds an address to the access list and returns `true` if the operation results in a change, - /// indicating that the address was not previously present. - pub fn add_address(&mut self, address: Address) -> bool { - !self.contains_address(address) && { - self.push(AccessListItem { address, storage_keys: Vec::new() }); - true + impl PartialEq for RethAccessList { + fn eq(&self, other: &AccessList) -> bool { + self.0.iter().zip(other.iter()).all(|(a, b)| a == b) } } -} - -impl Deref for AccessList { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} -impl DerefMut for AccessList { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl From for AccessList { - #[inline] - fn from(value: reth_rpc_types::AccessList) -> Self { - AccessList( - value - .0 - .into_iter() - .map(|item| AccessListItem { - address: item.address, - storage_keys: item.storage_keys, - }) - .collect(), - ) + // This + #[main_codec(rlp)] + #[derive(Clone, Debug, PartialEq, Eq, Hash, Default, RlpDecodable, RlpEncodable)] + #[serde(rename_all = "camelCase")] + struct RethAccessListItem { + /// Account address that would be loaded at the start of execution + address: Address, + /// The storage keys to be loaded at the start of execution. + /// + /// Each key is a 32-byte value representing a specific storage slot. + #[proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" + )] + storage_keys: Vec, } -} -impl From for reth_rpc_types::AccessList { - #[inline] - fn from(value: AccessList) -> Self { - reth_rpc_types::AccessList( - value - .0 - .into_iter() - .map(|item| reth_rpc_types::AccessListItem { - address: item.address, - storage_keys: item.storage_keys, - }) - .collect(), - ) + impl PartialEq for RethAccessListItem { + fn eq(&self, other: &AccessListItem) -> bool { + self.address == other.address && self.storage_keys == other.storage_keys + } } -} - -#[cfg(test)] -mod tests { - use super::*; - - use proptest::proptest; proptest!( #[test] - fn test_roundtrip_accesslist_conversion(access_list: AccessList) { + fn test_roundtrip_accesslist_compat(access_list: RethAccessList) { // Convert access_list to buffer and then create alloy_access_list from buffer and // compare - let mut compacted_access_list = Vec::::new(); - let len = access_list.clone().to_compact(&mut compacted_access_list); + let mut compacted_reth_access_list = Vec::::new(); + let len = access_list.clone().to_compact(&mut compacted_reth_access_list); - let alloy_access_list = AccessList::from_compact(&compacted_access_list, len).0; + // decode the compacted buffer to AccessList + let alloy_access_list = AccessList::from_compact(&compacted_reth_access_list, len).0; assert_eq!(access_list, alloy_access_list); - // Create alloy_access_list from access_list and then convert it to buffer and compare - // compacted_alloy_access_list and compacted_access_list - let alloy_access_list = AccessList(access_list.0); let mut compacted_alloy_access_list = Vec::::new(); - let _len = alloy_access_list.to_compact(&mut compacted_alloy_access_list); - assert_eq!(compacted_access_list, compacted_alloy_access_list); + let alloy_len = alloy_access_list.to_compact(&mut compacted_alloy_access_list); + assert_eq!(len, alloy_len); + assert_eq!(compacted_reth_access_list, compacted_alloy_access_list); } ); } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 2d3d664b2184c..7c57495fed927 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -652,7 +652,7 @@ impl TryFrom for Transaction { to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), value: tx.value, input: tx.input, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?.into(), + access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, })) } @@ -673,7 +673,7 @@ impl TryFrom for Transaction { .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?.into(), + access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, input: tx.input, })) } @@ -694,7 +694,7 @@ impl TryFrom for Transaction { .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?.into(), + access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, input: tx.input, blob_versioned_hashes: tx .blob_versioned_hashes diff --git a/crates/rpc/rpc-types-compat/src/transaction/typed.rs b/crates/rpc/rpc-types-compat/src/transaction/typed.rs index a14898195ada7..cc90c626ec98f 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/typed.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/typed.rs @@ -28,7 +28,7 @@ pub fn to_primitive_transaction( to: to_primitive_transaction_kind(tx.kind), value: tx.value, input: tx.input, - access_list: tx.access_list.into(), + access_list: tx.access_list, }), TypedTransactionRequest::EIP1559(tx) => Transaction::Eip1559(TxEip1559 { chain_id: tx.chain_id, @@ -38,7 +38,7 @@ pub fn to_primitive_transaction( to: to_primitive_transaction_kind(tx.kind), value: tx.value, input: tx.input, - access_list: tx.access_list.into(), + access_list: tx.access_list, max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), }), TypedTransactionRequest::EIP4844(tx) => Transaction::Eip4844(TxEip4844 { @@ -49,7 +49,7 @@ pub fn to_primitive_transaction( max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), to: to_primitive_transaction_kind(tx.kind), value: tx.value, - access_list: tx.access_list.into(), + access_list: tx.access_list, blob_versioned_hashes: tx.blob_versioned_hashes, max_fee_per_blob_gas: tx.max_fee_per_blob_gas.to(), input: tx.input, From cfbebc159587558463750174e3eac9ac91c2eb39 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Sun, 14 Apr 2024 19:19:53 +0100 Subject: [PATCH 156/700] docs(book): update node size numbers (#7638) --- book/installation/installation.md | 20 ++++++++++---------- book/run/pruning.md | 24 +++++++----------------- 2 files changed, 17 insertions(+), 27 deletions(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index dbbaa1b34e002..9ecf71cc5d8d1 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -14,12 +14,12 @@ The hardware requirements for running Reth depend on the node configuration and The most important requirement is by far the disk, whereas CPU and RAM requirements are relatively flexible. -| | Archive Node | Full Node | -|-----------|---------------------------------------|-------------------------------------| -| Disk | At least 2.2TB (TLC NVMe recommended) | At least 1TB (TLC NVMe recommended) | -| Memory | 8GB+ | 8GB+ | -| CPU | Higher clock speed over core count | Higher clock speeds over core count | -| Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | +| | Archive Node | Full Node | +|-----------|---------------------------------------|---------------------------------------| +| Disk | At least 2.2TB (TLC NVMe recommended) | At least 1.2TB (TLC NVMe recommended) | +| Memory | 8GB+ | 8GB+ | +| CPU | Higher clock speed over core count | Higher clock speeds over core count | +| Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | #### QLC and TLC @@ -34,14 +34,14 @@ Prior to purchasing an NVMe drive, it is advisable to research and determine whe ### Disk There are multiple types of disks to sync Reth, with varying size requirements, depending on the syncing mode. -As of October 2023 at block number 18.3M: +As of April 2024 at block number 19.6M: -* Archive Node: At least 2.2TB is required -* Full Node: At least 1TB is required +* Archive Node: At least 2.14TB is required +* Full Node: At least 1.13TB is required NVMe drives are recommended for the best performance, with SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. -As of July 2023, syncing an Ethereum mainnet node to block 17.7M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. +As of February 2024, syncing an Ethereum mainnet node to block 19.3M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. > **Note** > diff --git a/book/run/pruning.md b/book/run/pruning.md index 6800b7f5fa174..4e69665510346 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -48,14 +48,14 @@ RUST_LOG=info reth node \ ## Size -All numbers are as of October 2023 at block number 18.3M for mainnet. +All numbers are as of April 2024 at block number 19.6M for mainnet. ### Archive Node Archive node occupies at least 2.14TB. You can track the growth of Reth archive node size with our -[public Grafana dashboard](https://reth.paradigm.xyz/d/2k8BXz24k/reth?orgId=1&refresh=30s&viewPanel=52). +[public Grafana dashboard](https://reth.paradigm.xyz/d/2k8BXz24x/reth?orgId=1&refresh=30s&viewPanel=52). ### Pruned Node @@ -64,15 +64,15 @@ If pruned fully, this is the total freed space you'll get, per segment: | Segment | Size | | ------------------ | ----- | -| Sender Recovery | 75GB | -| Transaction Lookup | 150GB | +| Sender Recovery | 85GB | +| Transaction Lookup | 200GB | | Receipts | 250GB | -| Account History | 240GB | -| Storage History | 700GB | +| Account History | 235GB | +| Storage History | 590GB | ### Full Node -Full node occupies at least 950GB. +Full node occupies at least 1.13TB. Essentially, the full node is the same as following configuration for the pruned node: @@ -100,16 +100,6 @@ Meaning, it prunes: is completed, so the disk space is reclaimed slowly. - Receipts up to the last 10064 blocks, preserving all receipts with the logs from Beacon Deposit Contract -Given the aforementioned segment sizes, we get the following full node size: - -```text -Archive Node - Receipts - AccountsHistory - StoragesHistory = Full Node -``` - -```text -2.14TB - 250GB - 240GB - 700GB = 950GB -``` - ## RPC support As it was mentioned in the [pruning configuration chapter](./config.md#the-prune-section), there are several segments which can be pruned From a2654650bac5a9d3d6ec988fff59637d5b716de6 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 14 Apr 2024 23:01:49 +0200 Subject: [PATCH 157/700] Implement `From` for `BlockBody` (#7643) --- crates/primitives/src/block.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 5ccba67dda839..b22007b474d3d 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -604,6 +604,12 @@ impl BlockBody { } } +impl From for BlockBody { + fn from(block: Block) -> Self { + Self { transactions: block.body, ommers: block.ommers, withdrawals: block.withdrawals } + } +} + #[cfg(test)] mod tests { use super::{BlockNumberOrTag::*, *}; From 62eb8fd2b258439f2091f66fb2cda4166d256261 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 15 Apr 2024 11:45:05 +0200 Subject: [PATCH 158/700] chore(tree): remove `BlockchainTreeEngine::unwind` (#7647) --- crates/blockchain-tree/src/noop.rs | 4 ---- crates/blockchain-tree/src/shareable.rs | 8 -------- crates/interfaces/src/blockchain_tree/mod.rs | 3 --- crates/storage/provider/src/providers/mod.rs | 4 ---- 4 files changed, 19 deletions(-) diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 1917a9955196b..a9fc43eb8344e 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -56,10 +56,6 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn make_canonical(&self, block_hash: BlockHash) -> Result { Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) } - - fn unwind(&self, _unwind_to: BlockNumber) -> RethResult<()> { - Ok(()) - } } impl BlockchainTreeViewer for NoopBlockchainTree { diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 20c32acf9bf8d..03adfe2afa110 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -101,14 +101,6 @@ where tree.update_chains_metrics(); res } - - fn unwind(&self, unwind_to: BlockNumber) -> RethResult<()> { - trace!(target: "blockchain_tree", unwind_to, "Unwinding to block number"); - let mut tree = self.tree.write(); - let res = tree.unwind(unwind_to); - tree.update_chains_metrics(); - res - } } impl BlockchainTreeViewer for ShareableBlockchainTree diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index d79e9becd0909..f512d46be8ca8 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -97,9 +97,6 @@ pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { /// /// Returns `Ok` if the blocks were canonicalized, or if the blocks were already canonical. fn make_canonical(&self, block_hash: BlockHash) -> Result; - - /// Unwind tables and put it inside state - fn unwind(&self, unwind_to: BlockNumber) -> RethResult<()>; } /// Represents the kind of validation that should be performed when inserting a block. diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 73ea827d1d0c6..bc5d6a3dfb5ba 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -679,10 +679,6 @@ where fn make_canonical(&self, block_hash: BlockHash) -> Result { self.tree.make_canonical(block_hash) } - - fn unwind(&self, unwind_to: BlockNumber) -> RethResult<()> { - self.tree.unwind(unwind_to) - } } impl BlockchainTreeViewer for BlockchainProvider From ceeb0da03b5407f8628bbb8fee255c47e20a7ef6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 15 Apr 2024 12:25:15 +0200 Subject: [PATCH 159/700] fix(op): headers above merge (#7622) --- crates/consensus/beacon-core/Cargo.toml | 5 ++++- crates/consensus/beacon-core/src/lib.rs | 23 ++++++++++++++++++----- crates/consensus/beacon/Cargo.toml | 1 + crates/primitives/src/chain/spec.rs | 19 +++++++++++++++++++ 4 files changed, 42 insertions(+), 6 deletions(-) diff --git a/crates/consensus/beacon-core/Cargo.toml b/crates/consensus/beacon-core/Cargo.toml index 01623eaf9e629..232631f73b601 100644 --- a/crates/consensus/beacon-core/Cargo.toml +++ b/crates/consensus/beacon-core/Cargo.toml @@ -14,4 +14,7 @@ workspace = true # reth reth-consensus-common.workspace = true reth-primitives.workspace = true -reth-interfaces.workspace = true \ No newline at end of file +reth-interfaces.workspace = true + +[features] +optimism = ["reth-primitives/optimism"] \ No newline at end of file diff --git a/crates/consensus/beacon-core/src/lib.rs b/crates/consensus/beacon-core/src/lib.rs index e9b4114a3b758..599e010092202 100644 --- a/crates/consensus/beacon-core/src/lib.rs +++ b/crates/consensus/beacon-core/src/lib.rs @@ -46,14 +46,27 @@ impl Consensus for BeaconConsensus { Ok(()) } + #[allow(unused_assignments)] + #[allow(unused_mut)] fn validate_header_with_total_difficulty( &self, header: &Header, total_difficulty: U256, ) -> Result<(), ConsensusError> { - if self.chain_spec.fork(Hardfork::Paris).active_at_ttd(total_difficulty, header.difficulty) + let mut is_post_merge = self + .chain_spec + .fork(Hardfork::Paris) + .active_at_ttd(total_difficulty, header.difficulty); + + #[cfg(feature = "optimism")] { - if !header.is_zero_difficulty() { + // If OP-Stack then bedrock activation number determines when TTD (eth Merge) has been + // reached. + is_post_merge = self.chain_spec.is_bedrock_active_at_block(header.number); + } + + if is_post_merge { + if !self.chain_spec.is_optimism() && !header.is_zero_difficulty() { return Err(ConsensusError::TheMergeDifficultyIsNotZero) } @@ -94,10 +107,10 @@ impl Consensus for BeaconConsensus { }) } - // Goerli exception: + // Goerli and early OP exception: // * If the network is goerli pre-merge, ignore the extradata check, since we do not - // support clique. - if self.chain_spec.chain != Chain::goerli() { + // support clique. Same goes for OP blocks below Bedrock. + if self.chain_spec.chain != Chain::goerli() && !self.chain_spec.is_optimism() { validate_header_extradata(header)?; } } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 52389143c4041..2c86a4d414e12 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -65,4 +65,5 @@ optimism = [ "reth-interfaces/optimism", "reth-provider/optimism", "reth-blockchain-tree/optimism", + "reth-beacon-consensus-core/optimism" ] diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 160eddf9df5e9..f8b599f8dc794 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -574,6 +574,12 @@ impl ChainSpec { self.chain.is_optimism() } + /// Returns `true` if this chain is Optimism mainnet. + #[inline] + pub fn is_optimism_mainnet(&self) -> bool { + self.chain == Chain::optimism_mainnet() + } + /// Get the genesis block specification. /// /// To get the header for the genesis block, use [`Self::genesis_header`] instead. @@ -779,6 +785,13 @@ impl ChainSpec { self.fork(Hardfork::Homestead).active_at_block(block_number) } + /// Convenience method to check if [Hardfork::Bedrock] is active at a given block number. + #[cfg(feature = "optimism")] + #[inline] + pub fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { + self.fork(Hardfork::Bedrock).active_at_block(block_number) + } + /// Creates a [`ForkFilter`] for the block described by [Head]. pub fn fork_filter(&self, head: Head) -> ForkFilter { let forks = self.forks_iter().filter_map(|(_, condition)| { @@ -3176,4 +3189,10 @@ Post-merge hard forks (timestamp based): BASE_MAINNET.latest_fork_id() ) } + + #[cfg(feature = "optimism")] + #[test] + fn is_bedrock_active() { + assert!(!OP_MAINNET.is_bedrock_active_at_block(1)) + } } From 58cd4c5ed87c0f61eafea1e4da71072806bbcfba Mon Sep 17 00:00:00 2001 From: yjh Date: Mon, 15 Apr 2024 19:30:16 +0800 Subject: [PATCH 160/700] chore: update `decode_enveloped` docs (#7645) --- crates/primitives/src/transaction/mod.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 7c57495fed927..abc810b9e2681 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1491,9 +1491,9 @@ impl TransactionSigned { /// Decodes the "raw" format of transaction (similar to `eth_sendRawTransaction`). /// - /// This should be used for any RPC method that accepts a raw transaction, **excluding** raw - /// EIP-4844 transactions in `eth_sendRawTransaction`. Currently, this includes: - /// * `eth_sendRawTransaction` for non-EIP-4844 transactions. + /// This should be used for any RPC method that accepts a raw transaction. + /// Currently, this includes: + /// * `eth_sendRawTransaction`. /// * All versions of `engine_newPayload`, in the `transactions` field. /// /// A raw transaction is either a legacy transaction or EIP-2718 typed transaction. @@ -1503,9 +1503,6 @@ impl TransactionSigned { /// /// For EIP-2718 typed transactions, the format is encoded as the type of the transaction /// followed by the rlp of the transaction: `type || rlp(tx-data)`. - /// - /// To decode EIP-4844 transactions from `eth_sendRawTransaction`, use - /// [PooledTransactionsElement::decode_enveloped]. pub fn decode_enveloped(data: &mut &[u8]) -> alloy_rlp::Result { if data.is_empty() { return Err(RlpError::InputTooShort) From 288b7fb37b908c352287c29050fe3fba22315bf3 Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Mon, 15 Apr 2024 19:45:03 +0800 Subject: [PATCH 161/700] ci: automatically set pr labels upon open (#7613) --- .github/scripts/label_pr.js | 35 ++++++++++++++++++++++++++++++++++ .github/workflows/label-pr.yml | 24 +++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 .github/scripts/label_pr.js create mode 100644 .github/workflows/label-pr.yml diff --git a/.github/scripts/label_pr.js b/.github/scripts/label_pr.js new file mode 100644 index 0000000000000..9a6810ef34910 --- /dev/null +++ b/.github/scripts/label_pr.js @@ -0,0 +1,35 @@ +module.exports = async ({ github, context }) => { + try { + const prNumber = context.payload.pull_request.number; + const prBody = context.payload.pull_request.body; + const repo = context.repo; + + const repoUrl = context.payload.repository.html_url; + const pattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) ${repoUrl}/issues/(?\\d+)`, 'i') + + const re = prBody.match(pattern); + const issueNumber = re.groups?.issue_number; + + if (!issueNumber) { + console.log("No issue reference found in PR description."); + return; + } + + const issue = await github.rest.issues.get({ + ...repo, + issue_number: issueNumber, + }); + + const issueLabels = issue.data.labels.map(label => label.name); + if (issueLabels.length > 0) { + await github.rest.issues.setLabels({ + ...repo, + issue_number: prNumber, + labels: issueLabels, + }); + } + } catch (err) { + console.error(`Failed to label PR`); + console.error(err); + } +} \ No newline at end of file diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml new file mode 100644 index 0000000000000..e52721b9cc832 --- /dev/null +++ b/.github/workflows/label-pr.yml @@ -0,0 +1,24 @@ +name: Label PRs + +on: + pull_request: + types: [opened] + +jobs: + label_prs: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Label PRs + uses: actions/github-script@v7 + with: + script: | + const label_pr = require('./.github/scripts/label_pr.js') + await label_pr({github, context}) + From 8f1f386f5202c1099c002974d3b08bed79d73ce9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 15 Apr 2024 13:46:34 +0200 Subject: [PATCH 162/700] feat(op): chunked chain import (#7574) Co-authored-by: Atris --- Cargo.lock | 1 + bin/reth/src/commands/import.rs | 107 +++++++----- crates/net/downloaders/Cargo.toml | 1 + crates/net/downloaders/src/file_client.rs | 201 ++++++++++++++++++++-- crates/net/downloaders/src/file_codec.rs | 5 +- 5 files changed, 255 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 166e66290df82..5a71369a251fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6408,6 +6408,7 @@ dependencies = [ "itertools 0.12.1", "metrics", "pin-project", + "rand 0.8.5", "rayon", "reth-config", "reth-db", diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 49fdba1eef857..74e694388acaa 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -15,7 +15,8 @@ use reth_beacon_consensus::BeaconConsensus; use reth_config::Config; use reth_db::{database::Database, init_db}; use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient, + bodies::bodies::BodiesDownloaderBuilder, + file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_interfaces::{ @@ -76,6 +77,10 @@ pub struct ImportCommand { #[arg(long, verbatim_doc_comment, env = OP_RETH_MAINNET_BELOW_BEDROCK)] op_mainnet_below_bedrock: bool, + /// Chunk byte length. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + #[command(flatten)] db: DatabaseArgs, @@ -101,6 +106,10 @@ impl ImportCommand { debug!(target: "reth::cli", "Execution stage disabled"); } + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), "Chunking chain import" + ); + // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); @@ -123,49 +132,55 @@ impl ImportCommand { let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); info!(target: "reth::cli", "Consensus engine initialized"); - // create a new FileClient - info!(target: "reth::cli", "Importing chain file"); - let file_client = Arc::new(FileClient::new(&self.path).await?); - - // override the tip - let tip = file_client.tip().expect("file client has no tip"); - info!(target: "reth::cli", "Chain file read"); - - let (mut pipeline, events) = self - .build_import_pipeline( - config, - provider_factory.clone(), - &consensus, - file_client, - StaticFileProducer::new( + // open file + let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + + while let Some(file_client) = reader.next_chunk().await? { + // create a new FileClient from chunk read from file + info!(target: "reth::cli", + "Importing chain file chunk" + ); + + // override the tip + let tip = file_client.tip().expect("file client has no tip"); + info!(target: "reth::cli", "Chain file chunk read"); + + let (mut pipeline, events) = self + .build_import_pipeline( + &config, provider_factory.clone(), - provider_factory.static_file_provider(), - PruneModes::default(), - ), - self.disable_execution, - ) - .await?; - - // override the tip - pipeline.set_tip(tip); - debug!(target: "reth::cli", ?tip, "Tip manually set"); - - let provider = provider_factory.provider()?; - - let latest_block_number = - provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); - tokio::spawn(reth_node_core::events::node::handle_events( - None, - latest_block_number, - events, - db.clone(), - )); - - // Run pipeline - info!(target: "reth::cli", "Starting sync pipeline"); - tokio::select! { - res = pipeline.run() => res?, - _ = tokio::signal::ctrl_c() => {}, + &consensus, + Arc::new(file_client), + StaticFileProducer::new( + provider_factory.clone(), + provider_factory.static_file_provider(), + PruneModes::default(), + ), + self.disable_execution, + ) + .await?; + + // override the tip + pipeline.set_tip(tip); + debug!(target: "reth::cli", ?tip, "Tip manually set"); + + let provider = provider_factory.provider()?; + + let latest_block_number = + provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); + tokio::spawn(reth_node_core::events::node::handle_events( + None, + latest_block_number, + events, + db.clone(), + )); + + // Run pipeline + info!(target: "reth::cli", "Starting sync pipeline"); + tokio::select! { + res = pipeline.run() => res?, + _ = tokio::signal::ctrl_c() => {}, + } } info!(target: "reth::cli", "Chain file imported"); @@ -174,7 +189,7 @@ impl ImportCommand { async fn build_import_pipeline( &self, - config: Config, + config: &Config, provider_factory: ProviderFactory, consensus: &Arc, file_client: Arc, @@ -220,7 +235,7 @@ impl ImportCommand { header_downloader, body_downloader, factory.clone(), - config.stages.etl, + config.stages.etl.clone(), ) .set(SenderRecoveryStage { commit_threshold: config.stages.sender_recovery.commit_threshold, @@ -239,7 +254,7 @@ impl ImportCommand { .clean_threshold .max(config.stages.account_hashing.clean_threshold) .max(config.stages.storage_hashing.clean_threshold), - config.prune.map(|prune| prune.segments).unwrap_or_default(), + config.prune.clone().map(|prune| prune.segments).unwrap_or_default(), )) .disable_if(StageId::Execution, || disable_execution), ) diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 83aadb85326f2..7ae6db8e69521 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -52,6 +52,7 @@ assert_matches.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } alloy-rlp.workspace = true itertools.workspace = true +rand.workspace = true tempfile.workspace = true diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index a692bc8258d41..ce830383f0d37 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -8,8 +8,8 @@ use reth_interfaces::p2p::{ priority::Priority, }; use reth_primitives::{ - BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header, HeadersDirection, PeerId, - SealedHeader, B256, + BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BytesMut, Header, HeadersDirection, + PeerId, SealedHeader, B256, }; use std::{collections::HashMap, path::Path}; use thiserror::Error; @@ -18,6 +18,9 @@ use tokio_stream::StreamExt; use tokio_util::codec::FramedRead; use tracing::{trace, warn}; +/// Byte length of chunk to read from chain file. +pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000; + /// Front-end API for fetching chain data from a file. /// /// Blocks are assumed to be written one after another in a file, as rlp bytes. @@ -49,8 +52,8 @@ pub enum FileClientError { Io(#[from] std::io::Error), /// An error occurred when decoding blocks, headers, or rlp headers from the file. - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), + #[error("{0}")] + Rlp(alloy_rlp::Error, Vec), } impl FileClient { @@ -66,23 +69,41 @@ impl FileClient { let metadata = file.metadata().await?; let file_len = metadata.len(); - // todo: read chunks into memory. for op mainnet 1/8 th of blocks below bedrock can be - // decoded at once let mut reader = vec![]; file.read_to_end(&mut reader).await.unwrap(); + Ok(Self::from_reader(&reader[..], file_len).await?.0) + } + + /// Initialize the [`FileClient`] from bytes that have been read from file. + pub(crate) async fn from_reader( + reader: B, + num_bytes: u64, + ) -> Result<(Self, Vec), FileClientError> + where + B: AsyncReadExt + Unpin, + { let mut headers = HashMap::new(); let mut hash_to_number = HashMap::new(); let mut bodies = HashMap::new(); // use with_capacity to make sure the internal buffer contains the entire file - let mut stream = FramedRead::with_capacity(&reader[..], BlockFileCodec, file_len as usize); + let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); + + let mut remaining_bytes = vec![]; let mut log_interval = 0; let mut log_interval_start_block = 0; while let Some(block_res) = stream.next().await { - let block = block_res?; + let block = match block_res { + Ok(block) => block, + Err(FileClientError::Rlp(_err, bytes)) => { + remaining_bytes = bytes; + break + } + Err(err) => return Err(err), + }; let block_number = block.header.number; let block_hash = block.header.hash_slow(); @@ -99,11 +120,15 @@ impl FileClient { ); if log_interval == 0 { + trace!(target: "downloaders::file", + block_number, + "read first block" + ); log_interval_start_block = block_number; - } else if log_interval % 100000 == 0 { + } else if log_interval % 100_000 == 0 { trace!(target: "downloaders::file", blocks=?log_interval_start_block..=block_number, - "inserted blocks into db" + "read blocks from file" ); log_interval_start_block = block_number + 1; } @@ -112,12 +137,12 @@ impl FileClient { trace!(blocks = headers.len(), "Initialized file client"); - Ok(Self { headers, hash_to_number, bodies }) + Ok((Self { headers, hash_to_number, bodies }, remaining_bytes)) } /// Get the tip hash of the chain. pub fn tip(&self) -> Option { - self.headers.get(&((self.headers.len() - 1) as u64)).map(|h| h.hash_slow()) + self.headers.get(&self.max_block()?).map(|h| h.hash_slow()) } /// Get the start hash of the chain. @@ -267,6 +292,96 @@ impl DownloadClient for FileClient { } } +/// Chunks file into several [`FileClient`]s. +#[derive(Debug)] +pub struct ChunkedFileReader { + /// File to read from. + file: File, + /// Current file length. + file_len: u64, + /// Bytes that have been read. + chunk: Vec, + /// Max bytes per chunk. + chunk_byte_len: u64, +} + +impl ChunkedFileReader { + /// Opens the file to import from given path. Returns a new instance. If no chunk byte length + /// is passed, chunks have [`DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE`] (one static file). + pub async fn new>( + path: P, + chunk_byte_len: Option, + ) -> Result { + let file = File::open(path).await?; + let chunk_byte_len = chunk_byte_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE); + + Self::from_file(file, chunk_byte_len).await + } + + /// Opens the file to import from given path. Returns a new instance. + pub async fn from_file(file: File, chunk_byte_len: u64) -> Result { + // get file len from metadata before reading + let metadata = file.metadata().await?; + let file_len = metadata.len(); + + Ok(Self { file, file_len, chunk: vec![], chunk_byte_len }) + } + + /// Calculates the number of bytes to read from the chain file. Returns a tuple of the chunk + /// length and the remaining file length. + fn chunk_len(&self) -> u64 { + let Self { chunk_byte_len, file_len, .. } = *self; + let file_len = file_len + self.chunk.len() as u64; + + if chunk_byte_len > file_len { + // last chunk + file_len + } else { + chunk_byte_len + } + } + + /// Read next chunk from file. Returns [`FileClient`] containing decoded chunk. + pub async fn next_chunk(&mut self) -> Result, FileClientError> { + if self.file_len == 0 && self.chunk.is_empty() { + // eof + return Ok(None) + } + + let chunk_len = self.chunk_len(); + let old_bytes_len = self.chunk.len() as u64; + + // calculate reserved space in chunk + let new_bytes_len = chunk_len - old_bytes_len; + + // read new bytes from file + let mut reader = BytesMut::with_capacity(new_bytes_len as usize); + self.file.read_buf(&mut reader).await?; + + // update remaining file length + self.file_len -= new_bytes_len; + + trace!(target: "downloaders::file", + max_chunk_byte_len=self.chunk_byte_len, + prev_read_bytes_len=self.chunk.len(), + new_bytes_len, + remaining_file_byte_len=self.file_len, + "new bytes were read from file" + ); + + // read new bytes from file into chunk + self.chunk.extend_from_slice(&reader[..]); + + // make new file client from chunk + let (file_client, bytes) = FileClient::from_reader(&self.chunk[..], chunk_len).await?; + + // save left over bytes + self.chunk = bytes; + + Ok(Some(file_client)) + } +} + #[cfg(test)] mod tests { use super::*; @@ -280,6 +395,7 @@ mod tests { }; use assert_matches::assert_matches; use futures_util::stream::StreamExt; + use rand::Rng; use reth_interfaces::{ p2p::{ bodies::downloader::BodyDownloader, @@ -288,7 +404,7 @@ mod tests { test_utils::TestConsensus, }; use reth_provider::test_utils::create_test_provider_factory; - use std::sync::Arc; + use std::{mem, sync::Arc}; #[tokio::test] async fn streams_bodies_from_buffer() { @@ -350,6 +466,8 @@ mod tests { #[tokio::test] async fn test_download_headers_from_file() { + reth_tracing::init_test_tracing(); + // Generate some random blocks let (file, headers, _) = generate_bodies_file(0..=19).await; // now try to read them back @@ -395,4 +513,61 @@ mod tests { Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter(), &mut bodies)) ); } + + #[tokio::test] + async fn test_chunk_download_headers_from_file() { + reth_tracing::init_test_tracing(); + + // rig + + const MAX_BYTE_SIZE_HEADER: usize = 720; + + // Generate some random blocks + let (file, headers, bodies) = generate_bodies_file(0..=14).await; + // now try to read them back in chunks. + for header in &headers { + assert_eq!(720, mem::size_of_val(header)) + } + + // calculate min for chunk byte length range + let mut bodies_sizes = bodies.values().map(|body| body.size()).collect::>(); + bodies_sizes.sort(); + let max_block_size = MAX_BYTE_SIZE_HEADER + bodies_sizes.last().unwrap(); + let chunk_byte_len = rand::thread_rng().gen_range(max_block_size..=max_block_size + 10_000); + + trace!(target: "downloaders::file::test", chunk_byte_len); + + // init reader + let mut reader = ChunkedFileReader::from_file(file, chunk_byte_len as u64).await.unwrap(); + + let mut downloaded_headers: Vec = vec![]; + + let mut local_header = headers.first().unwrap().clone(); + + // test + + while let Some(client) = reader.next_chunk().await.unwrap() { + let sync_target = client.tip_header().unwrap(); + let sync_target_hash = sync_target.hash(); + + // construct headers downloader and use first header + let mut header_downloader = ReverseHeadersDownloaderBuilder::default() + .build(Arc::clone(&Arc::new(client)), Arc::new(TestConsensus::default())); + header_downloader.update_local_head(local_header.clone()); + header_downloader.update_sync_target(SyncTarget::Tip(sync_target_hash)); + + // get headers first + let mut downloaded_headers_chunk = header_downloader.next().await.unwrap().unwrap(); + + // export new local header to outer scope + local_header = sync_target; + + // reverse to make sure it's in the right order before comparing + downloaded_headers_chunk.reverse(); + downloaded_headers.extend_from_slice(&downloaded_headers_chunk); + } + + // the first header is not included in the response + assert_eq!(headers[1..], downloaded_headers); + } } diff --git a/crates/net/downloaders/src/file_codec.rs b/crates/net/downloaders/src/file_codec.rs index b73b15e88430e..156f3316c431c 100644 --- a/crates/net/downloaders/src/file_codec.rs +++ b/crates/net/downloaders/src/file_codec.rs @@ -31,9 +31,12 @@ impl Decoder for BlockFileCodec { if src.is_empty() { return Ok(None) } + let buf_slice = &mut src.as_ref(); - let body = Block::decode(buf_slice)?; + let body = + Block::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; src.advance(src.len() - buf_slice.len()); + Ok(Some(body)) } } From bc485d939e6f32383fa16d50a556d1ad9fe0e2c6 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 15 Apr 2024 14:02:30 +0200 Subject: [PATCH 163/700] refactor: remove unused codecs (#7639) --- crates/storage/codecs/Cargo.toml | 6 +-- crates/storage/codecs/README.md | 11 ----- crates/storage/codecs/derive/Cargo.toml | 5 -- crates/storage/codecs/derive/src/lib.rs | 64 ------------------------- 4 files changed, 1 insertion(+), 85 deletions(-) diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index b9cba081151af..f585accf69cf9 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -30,10 +30,6 @@ proptest.workspace = true proptest-derive.workspace = true [features] -default = ["compact", "std"] +default = ["std"] std = ["alloy-primitives/std", "bytes/std"] -compact = ["reth-codecs-derive/compact"] -scale = ["reth-codecs-derive/scale"] -postcard = ["reth-codecs-derive/postcard"] -no_codec = ["reth-codecs-derive/no_codec"] optimism = ["reth-codecs-derive/optimism"] diff --git a/crates/storage/codecs/README.md b/crates/storage/codecs/README.md index bb542160d8c7c..69a4dd3d5bd67 100644 --- a/crates/storage/codecs/README.md +++ b/crates/storage/codecs/README.md @@ -6,14 +6,3 @@ Examples: - [`Header` struct](../../primitives/src/header.rs) - [DB usage](../db/src/kv/codecs/scale.rs) - -### Features - -Feature defines what is the main codec used by `#[main_codec]`. However it is still possible to define them directly: `#[use_scale]`, `#[use_postcat]`, `#[no_codec]`. - -```rust -default = ["scale"] -scale = ["reth-codecs-derive/scale"] -postcard = ["reth-codecs-derive/postcard"] -no_codec = ["reth-codecs-derive/no_codec"] -``` diff --git a/crates/storage/codecs/derive/Cargo.toml b/crates/storage/codecs/derive/Cargo.toml index 7be08c175654a..2b7c5311df3aa 100644 --- a/crates/storage/codecs/derive/Cargo.toml +++ b/crates/storage/codecs/derive/Cargo.toml @@ -25,9 +25,4 @@ similar-asserts.workspace = true syn = { workspace = true, features = ["full", "extra-traits"] } [features] -default = ["compact"] -compact = [] -scale = [] -postcard = [] -no_codec = [] optimism = [] diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index 9af83148be68f..32e793637c8aa 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -39,64 +39,6 @@ pub fn derive_zstd(input: TokenStream) -> TokenStream { #[rustfmt::skip] #[allow(unreachable_code)] pub fn main_codec(args: TokenStream, input: TokenStream) -> TokenStream { - #[cfg(feature = "compact")] - return use_compact(args, input); - - #[cfg(feature = "scale")] - return use_scale(args, input); - - #[cfg(feature = "postcard")] - return use_postcard(args, input); - - #[cfg(feature = "no_codec")] - return no_codec(args, input); - - // no features - no_codec(args, input) -} - -#[proc_macro_attribute] -pub fn use_scale(_args: TokenStream, input: TokenStream) -> TokenStream { - let mut ast = parse_macro_input!(input as DeriveInput); - let compactable_types = ["u8", "u16", "u32", "i32", "i64", "u64", "f32", "f64"]; - - if let syn::Data::Struct(ref mut data) = &mut ast.data { - if let syn::Fields::Named(fields) = &mut data.fields { - for field in fields.named.iter_mut() { - if let syn::Type::Path(ref path) = field.ty { - if !path.path.segments.is_empty() { - let _type = format!("{}", path.path.segments[0].ident); - if compactable_types.contains(&_type.as_str()) { - field.attrs.push(syn::parse_quote! { - #[codec(compact)] - }); - } - } - } - } - } - } - - quote! { - #[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, serde::Serialize, serde::Deserialize)] - #ast - } - .into() -} - -#[proc_macro_attribute] -pub fn use_postcard(_args: TokenStream, input: TokenStream) -> TokenStream { - let ast = parse_macro_input!(input as DeriveInput); - - quote! { - #[derive(serde::Serialize, serde::Deserialize)] - #ast - } - .into() -} - -#[proc_macro_attribute] -pub fn use_compact(args: TokenStream, input: TokenStream) -> TokenStream { let ast = parse_macro_input!(input as DeriveInput); let with_zstd = args.clone().into_iter().any(|tk| tk.to_string() == "zstd"); @@ -176,9 +118,3 @@ pub fn add_arbitrary_tests(args: TokenStream, input: TokenStream) -> TokenStream } .into() } - -#[proc_macro_attribute] -pub fn no_codec(_args: TokenStream, input: TokenStream) -> TokenStream { - let ast = parse_macro_input!(input as DeriveInput); - quote! { #ast }.into() -} From f387f7bd925098a12739c078e2c5191c415db5b2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 15 Apr 2024 14:06:27 +0100 Subject: [PATCH 164/700] refactor(exex): make finished height an enum (#7652) --- crates/exex/src/manager.rs | 52 +++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index f12a6e7564111..8fe6d48f6ceb5 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -168,14 +168,7 @@ pub struct ExExManager { is_ready: watch::Sender, /// The finished height of all ExEx's. - /// - /// This is the lowest common denominator between all ExEx's. If an ExEx has not emitted a - /// `FinishedHeight` event, it will be `None`. - /// - /// This block is used to (amongst other things) determine what blocks are safe to prune. - /// - /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. - finished_height: watch::Sender>, + finished_height: watch::Sender, /// A handle to the ExEx manager. handle: ExExManagerHandle, @@ -196,7 +189,11 @@ impl ExExManager { let (handle_tx, handle_rx) = mpsc::unbounded_channel(); let (is_ready_tx, is_ready_rx) = watch::channel(true); - let (finished_height_tx, finished_height_rx) = watch::channel(None); + let (finished_height_tx, finished_height_rx) = watch::channel(if num_exexs == 0 { + FinishedHeight::NoExExs + } else { + FinishedHeight::NotReady + }); let current_capacity = Arc::new(AtomicUsize::new(max_capacity)); @@ -329,7 +326,7 @@ impl Future for ExExManager { } }); if let Ok(finished_height) = finished_height { - let _ = self.finished_height.send(Some(finished_height)); + let _ = self.finished_height.send(FinishedHeight::Height(finished_height)); } Poll::Pending @@ -354,14 +351,7 @@ pub struct ExExManagerHandle { /// The current capacity of the manager's internal notification buffer. current_capacity: Arc, /// The finished height of all ExEx's. - /// - /// This is the lowest common denominator between all ExEx's. If an ExEx has not emitted a - /// `FinishedHeight` event, it will be `None`. - /// - /// This block is used to (amongst other things) determine what blocks are safe to prune. - /// - /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. - finished_height: watch::Receiver>, + finished_height: watch::Receiver, } impl ExExManagerHandle { @@ -406,14 +396,7 @@ impl ExExManagerHandle { } /// The finished height of all ExEx's. - /// - /// This is the lowest common denominator between all ExEx's. If an ExEx has not emitted a - /// `FinishedHeight` event, it will be `None`. - /// - /// This block is used to (amongst other things) determine what blocks are safe to prune. - /// - /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. - pub fn finished_height(&mut self) -> Option { + pub fn finished_height(&mut self) -> FinishedHeight { *self.finished_height.borrow_and_update() } @@ -450,6 +433,23 @@ impl Clone for ExExManagerHandle { } } +/// The finished height of all ExEx's. +#[derive(Debug, Clone, Copy)] +pub enum FinishedHeight { + /// No ExEx's are installed, so there is no finished height. + NoExExs, + /// Not all ExExs emitted a `FinishedHeight` event yet. + NotReady, + /// The finished height of all ExEx's. + /// + /// This is the lowest common denominator between all ExEx's. + /// + /// This block is used to (amongst other things) determine what blocks are safe to prune. + /// + /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. + Height(BlockNumber), +} + #[cfg(test)] mod tests { #[tokio::test] From 855988994b809eb40672450357052633caa38da0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 15 Apr 2024 20:51:57 +0200 Subject: [PATCH 165/700] feat(op): import bodies (#7659) Co-authored-by: DaniPopes <57450786+DaniPopes@users.noreply.github.com> --- crates/primitives/src/transaction/mod.rs | 38 ++++++++++++++----- .../primitives/src/transaction/signature.rs | 6 +++ 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index abc810b9e2681..9e7c5d8e9d33d 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -956,8 +956,8 @@ impl TransactionSignedNoHash { // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) + if let Some(address) = get_deposit_or_null_address(&self.transaction, &self.signature) { + return Some(address) } let signature_hash = self.signature_hash(); @@ -976,11 +976,9 @@ impl TransactionSignedNoHash { buffer.clear(); self.transaction.encode_without_signature(buffer); - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) + if let Some(address) = get_deposit_or_null_address(&self.transaction, &self.signature) { + return Some(address) } self.signature.recover_signer_unchecked(keccak256(buffer)) @@ -1189,8 +1187,8 @@ impl TransactionSigned { // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) + if let Some(address) = get_deposit_or_null_address(&self.transaction, &self.signature) { + return Some(address) } let signature_hash = self.signature_hash(); self.signature.recover_signer(signature_hash) @@ -1205,8 +1203,8 @@ impl TransactionSigned { // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) + if let Some(address) = get_deposit_or_null_address(&self.transaction, &self.signature) { + return Some(address) } let signature_hash = self.signature_hash(); self.signature.recover_signer_unchecked(signature_hash) @@ -1781,6 +1779,26 @@ impl IntoRecoveredTransaction for TransactionSignedEcRecovered { } } +#[cfg(feature = "optimism")] +fn get_deposit_or_null_address( + transaction: &Transaction, + signature: &Signature, +) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let Transaction::Deposit(TxDeposit { from, .. }) = transaction { + return Some(*from) + } + // OP blocks below bedrock include transactions sent from the null address + if std::env::var_os(OP_RETH_MAINNET_BELOW_BEDROCK).as_deref() == Some("true".as_ref()) && + *signature == Signature::optimism_deposit_tx_signature() + { + return Some(Address::default()) + } + + None +} + #[cfg(test)] mod tests { use crate::{ diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 1f2c71b4a358f..a6bc8905acbb0 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -85,6 +85,12 @@ impl Signature { // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 self.odd_y_parity as u64 + chain_id * 2 + 35 } else { + #[cfg(feature = "optimism")] + if std::env::var_os(OP_RETH_MAINNET_BELOW_BEDROCK).as_deref() == Some("true".as_ref()) && + *self == Self::optimism_deposit_tx_signature() + { + return 0 + } self.odd_y_parity as u64 + 27 } } From 6f210f1366aba16a22f47a75729dccad48601a13 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 15 Apr 2024 20:24:28 +0100 Subject: [PATCH 166/700] docs(discv5): additional info regarding requested enrs on bootstrap (#7655) --- crates/net/discv5/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 8a027d180cc96..e3f7f93acb6e9 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -285,6 +285,7 @@ impl Discv5 { } } + // If a session is established, the ENR is added straight away to discv5 kbuckets Ok(_ = join_all(enr_requests).await) } From c59c41701dfa812c16a97e7b2cbaebe44a9ebf01 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 15 Apr 2024 21:30:35 +0200 Subject: [PATCH 167/700] Convert `Header` and `Block` rpc to primitives (#7660) --- Cargo.lock | 36 +++++++++++------------ Cargo.toml | 26 ++++++++--------- crates/primitives/src/block.rs | 45 ++++++++++++++++++++++++++++- crates/primitives/src/header.rs | 45 +++++++++++++++++++++++++++++ crates/primitives/src/withdrawal.rs | 17 +++++++++++ crates/rpc/rpc/src/txpool.rs | 11 +++---- 6 files changed, 141 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a71369a251fa..6f84c29c88df5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-eips", "alloy-primitives", @@ -178,7 +178,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -196,7 +196,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-primitives", "alloy-serde", @@ -218,7 +218,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-primitives", "serde", @@ -230,7 +230,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-consensus", "alloy-eips", @@ -246,7 +246,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -288,7 +288,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -338,7 +338,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -358,7 +358,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-consensus", "alloy-eips", @@ -380,7 +380,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-primitives", "alloy-serde", @@ -390,7 +390,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-consensus", "alloy-eips", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -420,7 +420,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-primitives", "serde", @@ -430,7 +430,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-primitives", "async-trait", @@ -443,7 +443,7 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-consensus", "alloy-network", @@ -518,7 +518,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -536,7 +536,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=8cb0307#8cb0307b9bdb6cef9058d2d1a2219c8d212a7421" +source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -7550,7 +7550,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=f604dc4#f604dc4d4cd1f013c8bd488b2f28356a77fa2094" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=67f9968#67f9968fe56e5968ada322d084a98dd6a405ccdb" dependencies = [ "alloy-primitives", "alloy-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index 7765c76920889..6ed1ab9adce9a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -267,7 +267,7 @@ revm = { version = "8.0.0", features = [ revm-primitives = { version = "3.1.0", features = [ "std", ], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "f604dc4" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "67f9968" } # eth alloy-chains = "0.1.15" @@ -276,20 +276,20 @@ alloy-dyn-abi = "0.7.0" alloy-sol-types = "0.7.0" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "8cb0307" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } # misc aquamarine = "0.5" diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index b22007b474d3d..2b43e32514d98 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,11 +1,12 @@ use crate::{ - Address, Bytes, GotExpected, Header, SealedHeader, TransactionSigned, + Address, Bytes, GotExpected, Header, SealedHeader, Signature, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, B256, }; use alloy_rlp::{RlpDecodable, RlpEncodable}; #[cfg(any(test, feature = "arbitrary"))] use proptest::prelude::{any, prop_compose}; use reth_codecs::derive_arbitrary; +use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::ops::Deref; @@ -147,6 +148,48 @@ impl Deref for Block { } } +impl TryFrom for Block { + type Error = ConversionError; + + fn try_from(block: reth_rpc_types::Block) -> Result { + let body = { + let transactions: Result, ConversionError> = match block + .transactions + { + reth_rpc_types::BlockTransactions::Full(transactions) => transactions + .into_iter() + .map(|tx| { + let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; + Ok(TransactionSigned::from_transaction_and_signature( + tx.try_into()?, + Signature { + r: signature.r, + s: signature.s, + odd_y_parity: signature + .y_parity + .unwrap_or(reth_rpc_types::Parity(false)) + .0, + }, + )) + }) + .collect(), + reth_rpc_types::BlockTransactions::Hashes(_) | + reth_rpc_types::BlockTransactions::Uncle => { + return Err(ConversionError::MissingFullTransactions); + } + }; + transactions? + }; + + Ok(Self { + header: block.header.try_into()?, + body, + ommers: Default::default(), + withdrawals: block.withdrawals.map(Into::into), + }) + } +} + /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct BlockWithSenders { diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 0516d32271b65..f12dc3d590e86 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -16,6 +16,7 @@ use bytes::BufMut; #[cfg(any(test, feature = "arbitrary"))] use proptest::prelude::*; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; +use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::{mem, ops::Deref}; @@ -485,6 +486,50 @@ impl Decodable for Header { } } +impl TryFrom for Header { + type Error = ConversionError; + + fn try_from(header: reth_rpc_types::Header) -> Result { + Ok(Self { + base_fee_per_gas: header + .base_fee_per_gas + .map(|base_fee_per_gas| { + base_fee_per_gas.try_into().map_err(ConversionError::BaseFeePerGasConversion) + }) + .transpose()?, + beneficiary: header.miner, + blob_gas_used: header + .blob_gas_used + .map(|blob_gas_used| { + blob_gas_used.try_into().map_err(ConversionError::BlobGasUsedConversion) + }) + .transpose()?, + difficulty: header.difficulty, + excess_blob_gas: header + .excess_blob_gas + .map(|excess_blob_gas| { + excess_blob_gas.try_into().map_err(ConversionError::ExcessBlobGasConversion) + }) + .transpose()?, + extra_data: header.extra_data, + gas_limit: header.gas_limit.try_into().map_err(ConversionError::GasLimitConversion)?, + gas_used: header.gas_used.try_into().map_err(ConversionError::GasUsedConversion)?, + logs_bloom: header.logs_bloom, + mix_hash: header.mix_hash.unwrap_or_default(), + nonce: u64::from_be_bytes(header.nonce.unwrap_or_default().0), + number: header.number.ok_or(ConversionError::MissingBlockNumber)?, + ommers_hash: header.uncles_hash, + parent_beacon_block_root: header.parent_beacon_block_root, + parent_hash: header.parent_hash, + receipts_root: header.receipts_root, + state_root: header.state_root, + timestamp: header.timestamp, + transactions_root: header.transactions_root, + withdrawals_root: header.withdrawals_root, + }) + } +} + /// Errors that can occur during header sanity checks. #[derive(thiserror::Error, Debug, PartialEq, Eq, Clone)] pub enum HeaderValidationError { diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index f2eba55864833..730fb291c0430 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -36,6 +36,17 @@ impl Withdrawal { } } +impl From for Withdrawal { + fn from(withdrawal: reth_rpc_types::Withdrawal) -> Self { + Self { + index: withdrawal.index, + validator_index: withdrawal.index, + address: withdrawal.address, + amount: withdrawal.amount, + } + } +} + /// Represents a collection of Withdrawals. #[main_codec] #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodableWrapper, RlpDecodableWrapper)] @@ -104,6 +115,12 @@ impl DerefMut for Withdrawals { } } +impl From> for Withdrawals { + fn from(withdrawals: Vec) -> Self { + Self(withdrawals.into_iter().map(Into::into).collect()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index f9def07b1b97e..9fb61c3916339 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -1,6 +1,6 @@ use async_trait::async_trait; use jsonrpsee::core::RpcResult as Result; -use reth_primitives::{Address, U256, U64}; +use reth_primitives::Address; use reth_rpc_api::TxPoolApiServer; use reth_rpc_types::{ txpool::{TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus}, @@ -69,10 +69,7 @@ where async fn txpool_status(&self) -> Result { trace!(target: "rpc::eth", "Serving txpool_status"); let all = self.pool.all_transactions(); - Ok(TxpoolStatus { - pending: U64::from(all.pending.len()), - queued: U64::from(all.queued.len()), - }) + Ok(TxpoolStatus { pending: all.pending.len() as u64, queued: all.queued.len() as u64 }) } /// Returns a summary of all the transactions currently pending for inclusion in the next @@ -97,8 +94,8 @@ where TxpoolInspectSummary { to: tx.to(), value: tx.value(), - gas: U256::from(tx.gas_limit()), - gas_price: U256::from(tx.transaction.max_fee_per_gas()), + gas: tx.gas_limit() as u128, + gas_price: tx.transaction.max_fee_per_gas(), }, ); } From fc4bb53f9dec81189724ef952ab833923faa3139 Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Tue, 16 Apr 2024 05:31:06 +0800 Subject: [PATCH 168/700] fix: check if regexp match is null in label-pr script (#7654) Co-authored-by: Oliver Nordbjerg --- .github/scripts/label_pr.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/scripts/label_pr.js b/.github/scripts/label_pr.js index 9a6810ef34910..85206c0e66e66 100644 --- a/.github/scripts/label_pr.js +++ b/.github/scripts/label_pr.js @@ -8,7 +8,7 @@ module.exports = async ({ github, context }) => { const pattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) ${repoUrl}/issues/(?\\d+)`, 'i') const re = prBody.match(pattern); - const issueNumber = re.groups?.issue_number; + const issueNumber = re?.groups?.issue_number; if (!issueNumber) { console.log("No issue reference found in PR description."); From d4a8ef92d7856a689ffc3243c6144dd7a3edeff5 Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Tue, 16 Apr 2024 03:28:54 +0530 Subject: [PATCH 169/700] refactor: replace ```calculate_next_block_base_fee``` with alloy's builtin function (#7641) Co-authored-by: Matthias Seitz --- crates/primitives/src/basefee.rs | 279 +++++++++------------- crates/primitives/src/header.rs | 12 +- crates/rpc/rpc/src/eth/api/fee_history.rs | 12 +- crates/rpc/rpc/src/eth/api/fees.rs | 14 +- crates/rpc/rpc/src/eth/api/server.rs | 43 ++-- 5 files changed, 153 insertions(+), 207 deletions(-) diff --git a/crates/primitives/src/basefee.rs b/crates/primitives/src/basefee.rs index 442cb66409014..fb5ca7571f546 100644 --- a/crates/primitives/src/basefee.rs +++ b/crates/primitives/src/basefee.rs @@ -1,168 +1,111 @@ -//! Helpers for working with EIP-1559 base fee - -/// Calculate the base fee for the next block based on the EIP-1559 specification. -/// -/// This function calculates the base fee for the next block according to the rules defined in the -/// EIP-1559. EIP-1559 introduces a new transaction pricing mechanism that includes a -/// fixed-per-block network fee that is burned and dynamically adjusts block sizes to handle -/// transient congestion. -/// -/// For each block, the base fee per gas is determined by the gas used in the parent block and the -/// target gas (the block gas limit divided by the elasticity multiplier). The algorithm increases -/// the base fee when blocks are congested and decreases it when they are under the target gas -/// usage. The base fee per gas is always burned. -/// -/// Parameters: -/// - `gas_used`: The gas used in the current block. -/// - `gas_limit`: The gas limit of the current block. -/// - `base_fee`: The current base fee per gas. -/// - `base_fee_params`: Base fee parameters such as elasticity multiplier and max change -/// denominator. -/// -/// Returns: -/// The calculated base fee for the next block as a `u64`. -/// -/// For more information, refer to the [EIP-1559 spec](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md). -pub fn calculate_next_block_base_fee( - gas_used: u64, - gas_limit: u64, - base_fee: u64, - base_fee_params: crate::BaseFeeParams, -) -> u64 { - // Calculate the target gas by dividing the gas limit by the elasticity multiplier. - let gas_target = gas_limit / base_fee_params.elasticity_multiplier as u64; - - match gas_used.cmp(&gas_target) { - // If the gas used in the current block is equal to the gas target, the base fee remains the - // same (no increase). - std::cmp::Ordering::Equal => base_fee, - // If the gas used in the current block is greater than the gas target, calculate a new - // increased base fee. - std::cmp::Ordering::Greater => { - // Calculate the increase in base fee based on the formula defined by EIP-1559. - base_fee + - (std::cmp::max( - // Ensure a minimum increase of 1. - 1, - base_fee as u128 * (gas_used - gas_target) as u128 / - (gas_target as u128 * base_fee_params.max_change_denominator), - ) as u64) - } - // If the gas used in the current block is less than the gas target, calculate a new - // decreased base fee. - std::cmp::Ordering::Less => { - // Calculate the decrease in base fee based on the formula defined by EIP-1559. - base_fee.saturating_sub( - (base_fee as u128 * (gas_target - gas_used) as u128 / - (gas_target as u128 * base_fee_params.max_change_denominator)) - as u64, - ) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[cfg(feature = "optimism")] - use crate::chain::{OP_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS}; - - #[test] - fn calculate_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1125000000, 1083333333, 1053571428, 1179939062, 1116028649, 918084097, 1063811730, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calculate_next_block_base_fee( - gas_used[i], - gas_limit[i], - base_fee[i], - crate::BaseFeeParams::ethereum(), - ) - ); - } - } - - #[cfg(feature = "optimism")] - #[test] - fn calculate_optimism_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1100000048, 1080000000, 1065714297, 1167067046, 1128881311, 1028254188, 1098203452, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calculate_next_block_base_fee( - gas_used[i], - gas_limit[i], - base_fee[i], - OP_BASE_FEE_PARAMS, - ) - ); - } - } - - #[cfg(feature = "optimism")] - #[test] - fn calculate_optimism_sepolia_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1180000000, 1146666666, 1122857142, 1244299375, 1189416692, 1028254188, 1144836295, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calculate_next_block_base_fee( - gas_used[i], - gas_limit[i], - base_fee[i], - OP_SEPOLIA_BASE_FEE_PARAMS, - ) - ); - } - } -} +//! Helpers for working with EIP-1559 base fee + +// re-export +#[doc(inline)] +pub use alloy_eips::eip1559::calc_next_block_base_fee; + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(feature = "optimism")] + use crate::chain::{OP_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS}; + + #[test] + fn calculate_base_fee_success() { + let base_fee = [ + 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, + 1, 2, + ]; + let gas_used = [ + 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, + 10000000, + ]; + let gas_limit = [ + 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, + 18000000, 18000000, + ]; + let next_base_fee = [ + 1125000000, 1083333333, 1053571428, 1179939062, 1116028649, 918084097, 1063811730, 1, + 2, 3, + ]; + + for i in 0..base_fee.len() { + assert_eq!( + next_base_fee[i], + calc_next_block_base_fee( + gas_used[i] as u128, + gas_limit[i] as u128, + base_fee[i] as u128, + crate::BaseFeeParams::ethereum(), + ) as u64 + ); + } + } + + #[cfg(feature = "optimism")] + #[test] + fn calculate_optimism_base_fee_success() { + let base_fee = [ + 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, + 1, 2, + ]; + let gas_used = [ + 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, + 10000000, + ]; + let gas_limit = [ + 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, + 18000000, 18000000, + ]; + let next_base_fee = [ + 1100000048, 1080000000, 1065714297, 1167067046, 1128881311, 1028254188, 1098203452, 1, + 2, 3, + ]; + + for i in 0..base_fee.len() { + assert_eq!( + next_base_fee[i], + calc_next_block_base_fee( + gas_used[i] as u128, + gas_limit[i] as u128, + base_fee[i] as u128, + OP_BASE_FEE_PARAMS, + ) as u64 + ); + } + } + + #[cfg(feature = "optimism")] + #[test] + fn calculate_optimism_sepolia_base_fee_success() { + let base_fee = [ + 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, + 1, 2, + ]; + let gas_used = [ + 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, + 10000000, + ]; + let gas_limit = [ + 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, + 18000000, 18000000, + ]; + let next_base_fee = [ + 1180000000, 1146666666, 1122857142, 1244299375, 1189416692, 1028254188, 1144836295, 1, + 2, 3, + ]; + + for i in 0..base_fee.len() { + assert_eq!( + next_base_fee[i], + calc_next_block_base_fee( + gas_used[i] as u128, + gas_limit[i] as u128, + base_fee[i] as u128, + OP_SEPOLIA_BASE_FEE_PARAMS, + ) as u64 + ); + } + } +} diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index f12dc3d590e86..3846679dc2598 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -1,7 +1,7 @@ #[cfg(any(test, feature = "arbitrary"))] use crate::block::{generate_valid_header, valid_header_strategy}; use crate::{ - basefee::calculate_next_block_base_fee, + basefee::calc_next_block_base_fee, constants, constants::{ ALLOWED_FUTURE_BLOCK_TIME_SECONDS, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH, @@ -247,12 +247,12 @@ impl Header { /// /// Returns a `None` if no base fee is set, no EIP-1559 support pub fn next_block_base_fee(&self, base_fee_params: BaseFeeParams) -> Option { - Some(calculate_next_block_base_fee( - self.gas_used, - self.gas_limit, - self.base_fee_per_gas?, + Some(calc_next_block_base_fee( + self.gas_used as u128, + self.gas_limit as u128, + self.base_fee_per_gas? as u128, base_fee_params, - )) + ) as u64) } /// Calculate excess blob gas for the next block according to the EIP-4844 spec. diff --git a/crates/rpc/rpc/src/eth/api/fee_history.rs b/crates/rpc/rpc/src/eth/api/fee_history.rs index 1d62a4aa1941f..4029dc7f91bf9 100644 --- a/crates/rpc/rpc/src/eth/api/fee_history.rs +++ b/crates/rpc/rpc/src/eth/api/fee_history.rs @@ -7,7 +7,7 @@ use futures::{ }; use metrics::atomics::AtomicU64; use reth_primitives::{ - basefee::calculate_next_block_base_fee, + basefee::calc_next_block_base_fee, eip4844::{calc_blob_gasprice, calculate_excess_blob_gas}, ChainSpec, Receipt, SealedBlock, TransactionSigned, B256, }; @@ -370,12 +370,12 @@ impl FeeHistoryEntry { /// Returns the base fee for the next block according to the EIP-1559 spec. pub fn next_block_base_fee(&self, chain_spec: &ChainSpec) -> u64 { - calculate_next_block_base_fee( - self.gas_used, - self.gas_limit, - self.base_fee_per_gas, + calc_next_block_base_fee( + self.gas_used as u128, + self.gas_limit as u128, + self.base_fee_per_gas as u128, chain_spec.base_fee_params(self.timestamp), - ) + ) as u64 } /// Returns the blob fee for the next block according to the EIP-4844 spec. diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs index 9b7445a2f16aa..d93b83d8909ce 100644 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ b/crates/rpc/rpc/src/eth/api/fees.rs @@ -9,7 +9,7 @@ use crate::{ }; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{basefee::calculate_next_block_base_fee, BlockNumberOrTag, U256}; +use reth_primitives::{BlockNumberOrTag, U256}; use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_types::FeeHistory; use reth_transaction_pool::TransactionPool; @@ -187,12 +187,12 @@ where // // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().expect("is present"); - base_fee_per_gas.push(calculate_next_block_base_fee( - last_header.gas_used, - last_header.gas_limit, - last_header.base_fee_per_gas.unwrap_or_default(), - self.provider().chain_spec().base_fee_params(last_header.timestamp), - ) as u128); + base_fee_per_gas.push( + self.provider().chain_spec().base_fee_params(last_header.timestamp).next_block_base_fee( + last_header.gas_used as u128, + last_header.gas_limit as u128, + last_header.base_fee_per_gas.unwrap_or_default() as u128, + )); // Same goes for the `base_fee_per_blob_gas`: // > "[..] includes the next block after the newest of the returned range, because this value can be derived from the newest block. diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index d51c2bf945daa..6be1a88af81e6 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -1,16 +1,10 @@ //! Implementation of the [`jsonrpsee`] generated [`reth_rpc_api::EthApiServer`] trait //! Handles RPC requests for the `eth_` namespace. -use super::EthApiSpec; -use crate::{ - eth::{ - api::{EthApi, EthTransactions}, - error::EthApiError, - revm_utils::EvmOverrides, - }, - result::{internal_rpc_err, ToRpcResult}, -}; use jsonrpsee::core::RpcResult as Result; +use serde_json::Value; +use tracing::trace; + use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_primitives::{ @@ -28,8 +22,17 @@ use reth_rpc_types::{ StateContext, SyncStatus, TransactionRequest, Work, }; use reth_transaction_pool::TransactionPool; -use serde_json::Value; -use tracing::trace; + +use crate::{ + eth::{ + api::{EthApi, EthTransactions}, + error::EthApiError, + revm_utils::EvmOverrides, + }, + result::{internal_rpc_err, ToRpcResult}, +}; + +use super::EthApiSpec; #[async_trait::async_trait] impl EthApiServer for EthApi @@ -435,6 +438,8 @@ where #[cfg(test)] mod tests { + use jsonrpsee::types::error::INVALID_PARAMS_CODE; + use crate::{ eth::{ cache::EthStateCache, gas_oracle::GasPriceOracle, FeeHistoryCache, @@ -442,13 +447,12 @@ mod tests { }, EthApi, }; - use jsonrpsee::types::error::INVALID_PARAMS_CODE; use reth_evm_ethereum::EthEvmConfig; use reth_interfaces::test_utils::{generators, generators::Rng}; use reth_network_api::noop::NoopNetwork; use reth_primitives::{ - basefee::calculate_next_block_base_fee, constants::ETHEREUM_BLOCK_GAS_LIMIT, BaseFeeParams, - Block, BlockNumberOrTag, Header, TransactionSigned, B256, + constants::ETHEREUM_BLOCK_GAS_LIMIT, BaseFeeParams, Block, BlockNumberOrTag, Header, + TransactionSigned, B256, }; use reth_provider::{ test_utils::{MockEthProvider, NoopProvider}, @@ -565,12 +569,11 @@ mod tests { // Add final base fee (for the next block outside of the request) let last_header = last_header.unwrap(); - base_fees_per_gas.push(calculate_next_block_base_fee( - last_header.gas_used, - last_header.gas_limit, - last_header.base_fee_per_gas.unwrap_or_default(), - BaseFeeParams::ethereum(), - ) as u128); + base_fees_per_gas.push(BaseFeeParams::ethereum().next_block_base_fee( + last_header.gas_used as u128, + last_header.gas_limit as u128, + last_header.base_fee_per_gas.unwrap_or_default() as u128, + )); let eth_api = build_test_eth_api(mock_provider); From 711de3b08d62857d2c3959468eb327c443f776ca Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 16 Apr 2024 01:05:37 +0200 Subject: [PATCH 170/700] feat(discv5): add tracing (#7663) --- crates/net/discv5/src/lib.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index e3f7f93acb6e9..cee4eaf7c26f4 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -310,13 +310,16 @@ impl Discv5 { discv5.with_kbuckets(|kbuckets| kbuckets.read().iter_ref().count()), ); + // make sure node is connected to each subtree in the network by target + // selection (ref kademlia) + let target = get_lookup_target(log2_distance, local_node_id); + trace!(target: "net::discv5", + target=format!("{:#?}", target), lookup_interval=format!("{:#?}", lookup_interval), "starting periodic lookup query" ); - // make sure node is connected to each subtree in the network by target - // selection (ref kademlia) - let target = get_lookup_target(log2_distance, local_node_id); + if log2_distance < MAX_LOG2_DISTANCE { // try to populate bucket one step further away log2_distance += 1 @@ -331,6 +334,7 @@ impl Discv5 { "periodic lookup query failed" ), Ok(peers) => trace!(target: "net::discv5", + target=format!("{:#?}", target), lookup_interval=format!("{:#?}", lookup_interval), peers_count=peers.len(), peers=format!("[{:#}]", peers.iter() From 516e8368754637fb26a36a2935d3b7c67778ce80 Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Tue, 16 Apr 2024 17:58:24 +0800 Subject: [PATCH 171/700] feat: add rpc trait bindings for optimism specific endpoints (#7621) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/rpc/rpc-api/Cargo.toml | 1 + crates/rpc/rpc-api/src/lib.rs | 1 + crates/rpc/rpc-api/src/optimism.rs | 365 +++++++++++++++++++++++++++++ 4 files changed, 368 insertions(+) create mode 100644 crates/rpc/rpc-api/src/optimism.rs diff --git a/Cargo.lock b/Cargo.lock index 6f84c29c88df5..72cb77b76c369 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7222,6 +7222,7 @@ dependencies = [ "reth-node-api", "reth-primitives", "reth-rpc-types", + "serde", "serde_json", ] diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 4c1618d0c5b0a..d8bf076288dec 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -20,6 +20,7 @@ reth-node-api.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } serde_json.workspace = true +serde = { workspace = true, features = ["derive"] } [features] client = ["jsonrpsee/client", "jsonrpsee/async-client"] diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index f97c0b19feff4..82af34a86d730 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -26,6 +26,7 @@ mod ganache; mod hardhat; mod mev; mod net; +mod optimism; mod otterscan; mod reth; mod rpc; diff --git a/crates/rpc/rpc-api/src/optimism.rs b/crates/rpc/rpc-api/src/optimism.rs new file mode 100644 index 0000000000000..80d60415d4737 --- /dev/null +++ b/crates/rpc/rpc-api/src/optimism.rs @@ -0,0 +1,365 @@ +#![allow(missing_docs)] +#![allow(unreachable_pub)] +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives::{Address, BlockNumber, ChainId, B256}; +use reth_rpc_types::BlockNumberOrTag; +use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, net::IpAddr}; + +/// todo: move to reth_rpc_types + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct BlockId { + pub hash: B256, + pub number: BlockNumber, +} + +// https://github.com/ethereum-optimism/optimism/blob/develop/op-service/eth/id.go#L33 +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct L2BlockRef { + pub hash: B256, + pub number: BlockNumber, + pub parent_hash: B256, + pub timestamp: u64, + pub l1origin: BlockId, + pub sequence_number: u64, +} + +// https://github.com/ethereum-optimism/optimism/blob/develop/op-service/eth/id.go#L52 +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct L1BlockRef { + pub hash: B256, + pub number: BlockNumber, + pub parent_hash: B256, + pub timestamp: u64, +} + +// https://github.com/ethereum-optimism/optimism/blob/develop/op-service/eth/sync_status.go#L5 +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SyncStatus { + pub current_l1: L1BlockRef, + pub current_l1_finalized: L1BlockRef, + pub head_l1: L1BlockRef, + pub safe_l1: L1BlockRef, + pub finalized_l1: L1BlockRef, + pub unsafe_l2: L2BlockRef, + pub safe_l2: L2BlockRef, + pub finalized_l2: L2BlockRef, + pub pending_safe_l2: L2BlockRef, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OutputResponse { + pub version: B256, + pub output_root: B256, + pub block_ref: L2BlockRef, + pub withdrawal_storage_root: B256, + pub state_root: B256, + pub sync_status: SyncStatus, +} + +// https://github.com/ethereum-optimism/optimism/blob/c7ad0ebae5dca3bf8aa6f219367a95c15a15ae41/op-service/eth/types.go#L371 +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct SystemConfig { + pub batcher_addr: Address, + pub overhead: B256, + pub scalar: B256, + pub gas_limit: u64, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct Genesis { + pub l1: BlockId, + pub l2: BlockId, + pub l2_time: u64, + pub system_config: SystemConfig, +} + +// https://github.com/ethereum-optimism/optimism/blob/develop/op-node/rollup/types.go#L53 +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct RollupConfig { + pub genesis: Genesis, + pub block_time: u64, + pub max_sequencer_drift: u64, + pub seq_window_size: u64, + pub channel_timeout: u64, + /// todo use u128 to represent *big.Int? + #[serde(default, skip_serializing_if = "Option::is_none")] + pub l1_chain_id: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub l2_chain_id: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub regolith_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub canyon_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub delta_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ecotone_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub fjord_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub interop_time: Option, + pub batch_inbox_address: Address, + pub deposit_contract_address: Address, + pub l1_system_config_address: Address, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub protocol_versions_address: Option
, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub da_challenge_address: Option
, + pub da_challenge_window: u64, + pub da_resolve_window: u64, + pub use_plasma: bool, +} + +// https://github.com/ethereum-optimism/optimism/blob/8dd17a7b114a7c25505cd2e15ce4e3d0f7e3f7c1/op-node/p2p/store/iface.go#L13 +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TopicScores { + pub time_in_mesh: f64, + pub first_message_deliveries: f64, + pub mesh_message_deliveries: f64, + pub invalid_message_deliveries: f64, +} + +// https://github.com/ethereum-optimism/optimism/blob/8dd17a7b114a7c25505cd2e15ce4e3d0f7e3f7c1/op-node/p2p/store/iface.go#L20C6-L20C18 +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GossipScores { + pub total: f64, + pub blocks: TopicScores, + #[serde(rename = "IPColocationFactor")] + pub ip_colocation_factor: f64, + pub behavioral_penalty: f64, +} + +// https://github.com/ethereum-optimism/optimism/blob/8dd17a7b114a7c25505cd2e15ce4e3d0f7e3f7c1/op-node/p2p/store/iface.go#L31C1-L35C2 +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReqRespScores { + pub valid_responses: f64, + pub error_responses: f64, + pub rejected_payloads: f64, +} + +// https://github.com/ethereum-optimism/optimism/blob/8dd17a7b114a7c25505cd2e15ce4e3d0f7e3f7c1/op-node/p2p/store/iface.go#L81 +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PeerScores { + pub gossip: GossipScores, + pub req_resp: ReqRespScores, +} + +// https://github.com/ethereum-optimism/optimism/blob/develop/op-node/p2p/rpc_api.go#L15 +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PeerInfo { + #[serde(rename = "peerID")] + pub peer_id: String, + #[serde(rename = "nodeID")] + pub node_id: String, + pub user_agent: String, + pub protocol_version: String, + #[serde(rename = "ENR")] + pub enr: String, + pub addresses: Vec, + pub protocols: Option>, + /// 0: "NotConnected", 1: "Connected", + /// 2: "CanConnect" (gracefully disconnected) + /// 3: "CannotConnect" (tried but failed) + pub connectedness: u8, + /// 0: "Unknown", 1: "Inbound" (if the peer contacted us) + /// 2: "Outbound" (if we connected to them) + pub direction: u8, + pub protected: bool, + #[serde(rename = "chainID")] + pub chain_id: ChainId, + /// nanosecond + pub latency: u64, + pub gossip_blocks: bool, + #[serde(rename = "scores")] + pub peer_scores: PeerScores, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PeerDump { + pub total_connected: u32, + pub peers: HashMap, + pub banned_peers: Vec, + #[serde(rename = "bannedIPS")] + pub banned_ips: Vec, + // todo: should be IPNet + pub banned_subnets: Vec, +} + +// https://github.com/ethereum-optimism/optimism/blob/develop/op-node/p2p/rpc_server.go#L203 +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct PeerStats { + pub connected: u32, + pub table: u32, + #[serde(rename = "blocksTopic")] + pub blocks_topic: u32, + #[serde(rename = "blocksTopicV2")] + pub blocks_topic_v2: u32, + #[serde(rename = "blocksTopicV3")] + pub blocks_topic_v3: u32, + pub banned: u32, + pub known: u32, +} + +/// Optimism specified rpc interface. +/// https://docs.optimism.io/builders/node-operators/json-rpc +/// https://github.com/ethereum-optimism/optimism/blob/8dd17a7b114a7c25505cd2e15ce4e3d0f7e3f7c1/op-node/node/api.go#L114 +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "optimism"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "optimism"))] +pub trait OptimismApi { + /// Get the output root at a specific block. + #[method(name = "outputAtBlock")] + async fn optimism_output_at_block( + &self, + block_number: BlockNumberOrTag, + ) -> RpcResult; + + /// Get the synchronization status. + #[method(name = "syncStatus")] + async fn optimism_sync_status(&self) -> RpcResult; + + /// Get the rollup configuration parameters. + #[method(name = "rollupConfig")] + async fn optimism_rollup_config(&self) -> RpcResult; + + /// Get the software version. + #[method(name = "version")] + async fn optimism_version(&self) -> RpcResult; +} + +/// The opp2p namespace handles peer interactions. +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "opp2p"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "opp2p"))] +pub trait OpP2PApi { + /// Returns information of node + #[method(name = "self")] + async fn opp2p_self(&self) -> RpcResult; + + #[method(name = "peers")] + async fn opp2p_peers(&self) -> RpcResult; + + #[method(name = "peerStats")] + async fn opp2p_peer_stats(&self) -> RpcResult; + + #[method(name = "discoveryTable")] + async fn opp2p_discovery_table(&self) -> RpcResult>; + + #[method(name = "blockPeer")] + async fn opp2p_block_peer(&self, peer: String) -> RpcResult<()>; + + #[method(name = "listBlockedPeers")] + async fn opp2p_list_blocked_peers(&self) -> RpcResult>; + + #[method(name = "blocAddr")] + async fn opp2p_block_addr(&self, ip: IpAddr) -> RpcResult<()>; + + #[method(name = "unblockAddr")] + async fn opp2p_unblock_addr(&self, ip: IpAddr) -> RpcResult<()>; + + #[method(name = "listBlockedAddrs")] + async fn opp2p_list_blocked_addrs(&self) -> RpcResult>; + + /// todo: should be IPNet? + #[method(name = "blockSubnet")] + async fn opp2p_block_subnet(&self, subnet: String) -> RpcResult<()>; + + /// todo: should be IPNet? + #[method(name = "unblockSubnet")] + async fn opp2p_unblock_subnet(&self, subnet: String) -> RpcResult<()>; + + /// todo: should be IPNet? + #[method(name = "listBlockedSubnets")] + async fn opp2p_list_blocked_subnets(&self) -> RpcResult>; + + #[method(name = "protectPeer")] + async fn opp2p_protect_peer(&self, peer: String) -> RpcResult<()>; + + #[method(name = "unprotectPeer")] + async fn opp2p_unprotect_peer(&self, peer: String) -> RpcResult<()>; + + #[method(name = "connectPeer")] + async fn opp2p_connect_peer(&self, peer: String) -> RpcResult<()>; + + #[method(name = "disconnectPeer")] + async fn opp2p_disconnect_peer(&self, peer: String) -> RpcResult<()>; +} + +/// The admin namespace endpoints +/// https://github.com/ethereum-optimism/optimism/blob/c7ad0ebae5dca3bf8aa6f219367a95c15a15ae41/op-node/node/api.go#L28-L36 +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "admin"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "admin"))] +pub trait OpAdminApi { + #[method(name = "resetDerivationPipeline")] + async fn admin_reset_derivation_pipeline(&self) -> RpcResult<()>; + + #[method(name = "startSequencer")] + async fn admin_start_sequencer(&self, block_hash: B256) -> RpcResult<()>; + + #[method(name = "stopSequencer")] + async fn admin_stop_sequencer(&self) -> RpcResult; + + #[method(name = "sequencerActive")] + async fn admin_sequencer_active(&self) -> RpcResult; +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + fn test_helper<'a, T>(json_str: &'a str) + where + T: Serialize + Deserialize<'a>, + { + let deserialize = serde_json::from_str::(json_str).unwrap(); + assert_eq!(json!(json_str), json!(serde_json::to_string(&deserialize).unwrap())); + } + + #[test] + fn test_output_response() { + let output_response_json = r#"{"version":"0x0000000000000000000000000000000000000000000000000000000000000000","outputRoot":"0xf1119e7d0fef8c54ab799be80fc61f503cea4e5c0aa1cf7ac104ef3a104f3bd1","blockRef":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"hash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824","number":19665136},"sequenceNumber":4},"withdrawalStorageRoot":"0x5c9a29a8ad2ecf97fb4bdea74c715fd2c13fa87d4861414478bc4579601c3585","stateRoot":"0x16849c0a93d00bb2d7ceacda11a1478854d2bbb0a377b4d6793b67a3f05eb6fe","syncStatus":{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"head_l1":{"hash":"0xf98493dcc3d82fe9af339c0a81b0f96172a56764f9abcff464c740e0cb3ccee7","number":19665175,"parentHash":"0xfbab86e5b807916c7ddfa395db794cdf4162128b9770eb8eb829679d81d74328","timestamp":1713235763},"safe_l1":{"hash":"0xfb8f07e551eb65c3282aaefe9a4954c15672e0077b2a5a1db18fcd2126cbc922","number":19665115,"parentHash":"0xfc0d62788fb9cda1cacb54a0e53ca398289436a6b68d1ba69db2942500b4ce5f","timestamp":1713235031},"finalized_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"unsafe_l2":{"hash":"0x3540517a260316758a4872f7626e8b9e009968b6d8cfa9c11bfd3a03e7656bd5","number":118818499,"parentHash":"0x09f30550e6d6f217691e185bf1a2b4665b83f43fc8dbcc68c0bfd513e6805590","timestamp":1713235775,"l1origin":{"hash":"0x036003c1c6561123a2f6573b7a34e9598bd023199e259d91765ee2c8677d9c07","number":19665170},"sequenceNumber":0},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1}}}"#; + test_helper::(output_response_json); + } + + #[test] + fn serialize_sync_status() { + let sync_status_json = r#"{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"head_l1":{"hash":"0xfc5ab77c6c08662a3b4d85b8c86010b7aecfc2c0369e4458f80357530db8e919","number":19665141,"parentHash":"0x099792a293002b987f3507524b28614f399b2b5ed607788520963c251844113c","timestamp":1713235355},"safe_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"finalized_l1":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"unsafe_l2":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"hash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824","number":19665136},"sequenceNumber":4},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1}}"#; + test_helper::(sync_status_json); + } + + #[test] + fn test_rollup_config() { + let rollup_config_json = r#"{"genesis":{"l1":{"hash":"0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108","number":17422590},"l2":{"hash":"0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3","number":105235063},"l2_time":1686068903,"system_config":{"batcherAddr":"0x6887246668a3b87f54deb3b94ba47a6f63f32985","overhead":"0x00000000000000000000000000000000000000000000000000000000000000bc","scalar":"0x00000000000000000000000000000000000000000000000000000000000a6fe0","gasLimit":30000000}},"block_time":2,"max_sequencer_drift":600,"seq_window_size":3600,"channel_timeout":300,"l1_chain_id":1,"l2_chain_id":10,"regolith_time":0,"canyon_time":1704992401,"delta_time":1708560000,"ecotone_time":1710374401,"batch_inbox_address":"0xff00000000000000000000000000000000000010","deposit_contract_address":"0xbeb5fc579115071764c7423a4f12edde41f106ed","l1_system_config_address":"0x229047fed2591dbec1ef1118d64f7af3db9eb290","protocol_versions_address":"0x8062abc286f5e7d9428a0ccb9abd71e50d93b935","da_challenge_address":"0x0000000000000000000000000000000000000000","da_challenge_window":0,"da_resolve_window":0,"use_plasma":false}"#; + test_helper::(rollup_config_json); + } + + #[test] + fn test_peer_info() { + let peer_info_json = r#"{"peerID":"16Uiu2HAm2y6DXp6THWHCyquczNUh8gVAm4spo6hjP3Ns1dGRiAdE","nodeID":"75a52a90fe5f972171fefce2399ca5a73191c654e7c7ddfdd71edf4fca6697f0","userAgent":"","protocolVersion":"","ENR":"enr:-J-4QFOtI_hDBa_kilrQcg4iTJt9VMAuDLCbgAAKMa--WfxoPml1xDYxypUG7IsWga83FOlvr78LG3oH8CfzRzUmsDyGAYvKqIZ2gmlkgnY0gmlwhGxAaceHb3BzdGFja4Xc76gFAIlzZWNwMjU2azGhAnAON-FvpiWY2iG_LXJDYosknGyikaajPDd1cQARsVnBg3RjcIIkBoN1ZHCC0Vs","addresses":["/ip4/127.0.0.1/tcp/9222/p2p/16Uiu2HAm2y6DXp6THWHCyquczNUh8gVAm4spo6hjP3Ns1dGRiAdE","/ip4/192.168.1.71/tcp/9222/p2p/16Uiu2HAm2y6DXp6THWHCyquczNUh8gVAm4spo6hjP3Ns1dGRiAdE","/ip4/108.64.105.199/tcp/9222/p2p/16Uiu2HAm2y6DXp6THWHCyquczNUh8gVAm4spo6hjP3Ns1dGRiAdE"],"protocols":null,"connectedness":0,"direction":0,"protected":false,"chainID":0,"latency":0,"gossipBlocks":true,"scores":{"gossip":{"total":0.0,"blocks":{"timeInMesh":0.0,"firstMessageDeliveries":0.0,"meshMessageDeliveries":0.0,"invalidMessageDeliveries":0.0},"IPColocationFactor":0.0,"behavioralPenalty":0.0},"reqResp":{"validResponses":0.0,"errorResponses":0.0,"rejectedPayloads":0.0}}}"#; + test_helper::(peer_info_json); + } + + #[test] + fn test_peer_dump() { + let peer_dump_json = r#"{"totalConnected":20,"peers":{"16Uiu2HAkvNYscHu4V1uj6fVWkwrAMCRsqXDSq4mUbhpGq4LttYsC":{"peerID":"16Uiu2HAkvNYscHu4V1uj6fVWkwrAMCRsqXDSq4mUbhpGq4LttYsC","nodeID":"d693c5b58424016c0c38ec5539c272c754cb6b8007b322e0ecf16a4ee13f96fb","userAgent":"optimism","protocolVersion":"","ENR":"","addresses":["/ip4/20.249.62.215/tcp/9222/p2p/16Uiu2HAkvNYscHu4V1uj6fVWkwrAMCRsqXDSq4mUbhpGq4LttYsC"],"protocols":["/ipfs/ping/1.0.0","/meshsub/1.0.0","/meshsub/1.1.0","/opstack/req/payload_by_number/11155420/0","/floodsub/1.0.0","/ipfs/id/1.0.0","/ipfs/id/push/1.0.0"],"connectedness":1,"direction":1,"protected":false,"chainID":0,"latency":0,"gossipBlocks":true,"scores":{"gossip":{"total":-5.04,"blocks":{"timeInMesh":0.0,"firstMessageDeliveries":0.0,"meshMessageDeliveries":0.0,"invalidMessageDeliveries":0.0},"IPColocationFactor":0.0,"behavioralPenalty":0.0},"reqResp":{"validResponses":0.0,"errorResponses":0.0,"rejectedPayloads":0.0}}}},"bannedPeers":[],"bannedIPS":[],"bannedSubnets":[]}"#; + test_helper::(peer_dump_json); + } + + #[test] + fn test_peer_stats() { + let peer_stats_json = r#"{"connected":20,"table":94,"blocksTopic":20,"blocksTopicV2":18,"blocksTopicV3":20,"banned":0,"known":71}"#; + test_helper::(peer_stats_json); + } +} From 33b195af333832255f992bbbb62ba2e07fb5bab1 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 16 Apr 2024 12:09:19 +0200 Subject: [PATCH 172/700] fix(tree): overlapping parent block hashes (#7669) --- crates/blockchain-tree/src/blockchain_tree.rs | 95 ++++++++++++++++++- .../storage/provider/src/test_utils/blocks.rs | 4 +- 2 files changed, 94 insertions(+), 5 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index a05667209bade..d66258d1f44a4 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -28,7 +28,7 @@ use reth_provider::{ }; use reth_stages::{MetricEvent, MetricEventsSender}; use std::{ - collections::{BTreeMap, HashSet}, + collections::{btree_map::Entry, BTreeMap, HashSet}, sync::Arc, }; use tracing::{debug, error, info, instrument, trace, warn}; @@ -495,19 +495,30 @@ where } /// Get all block hashes from a sidechain that are not part of the canonical chain. - /// /// This is a one time operation per block. /// /// # Note /// /// This is not cached in order to save memory. fn all_chain_hashes(&self, chain_id: BlockChainId) -> BTreeMap { - // find chain and iterate over it, let mut chain_id = chain_id; let mut hashes = BTreeMap::new(); loop { let Some(chain) = self.state.chains.get(&chain_id) else { return hashes }; - hashes.extend(chain.blocks().values().map(|b| (b.number, b.hash()))); + + // The parent chains might contain blocks with overlapping numbers or numbers greater + // than original chain tip. Insert the block hash only if it's not present + // for the given block number and the block number does not exceed the + // original chain tip. + let latest_block_number = hashes + .last_key_value() + .map(|(number, _)| *number) + .unwrap_or_else(|| chain.tip().number); + for block in chain.blocks().values().filter(|b| b.number <= latest_block_number) { + if let Entry::Vacant(e) = hashes.entry(block.number) { + e.insert(block.hash()); + } + } let fork_block = chain.fork_block(); if let Some(next_chain_id) = self.block_indices().get_blocks_chain_id(&fork_block.hash) @@ -1590,6 +1601,82 @@ mod tests { ); } + #[test] + fn sidechain_block_hashes() { + let data = BlockChainTestData::default_from_number(11); + let (block1, exec1) = data.blocks[0].clone(); + let (block2, exec2) = data.blocks[1].clone(); + let (block3, exec3) = data.blocks[2].clone(); + let (block4, exec4) = data.blocks[3].clone(); + let genesis = data.genesis; + + // test pops execution results from vector, so order is from last to first. + let externals = + setup_externals(vec![exec3.clone(), exec2.clone(), exec4, exec3, exec2, exec1]); + + // last finalized block would be number 9. + setup_genesis(&externals.provider_factory, genesis); + + // make tree + let config = BlockchainTreeConfig::new(1, 2, 3, 2); + let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + // genesis block 10 is already canonical + tree.make_canonical(B256::ZERO).unwrap(); + + // make genesis block 10 as finalized + tree.finalize_block(10); + + assert_eq!( + tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(block4, BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + let mut block2a = block2; + let block2a_hash = B256::new([0x34; 32]); + block2a.set_hash(block2a_hash); + + assert_eq!( + tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) + ); + + let mut block3a = block3; + let block3a_hash = B256::new([0x35; 32]); + block3a.set_hash(block3a_hash); + block3a.set_parent_hash(block2a.hash()); + + assert_eq!( + tree.insert_block(block3a.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) /* TODO: this is incorrect, figure out why */ + ); + + let block3a_chain_id = + tree.state.block_indices.get_blocks_chain_id(&block3a.hash()).unwrap(); + assert_eq!( + tree.all_chain_hashes(block3a_chain_id), + BTreeMap::from([ + (block1.number, block1.hash()), + (block2a.number, block2a.hash()), + (block3a.number, block3a.hash()), + ]) + ); + } + #[test] fn cached_trie_updates() { let data = BlockChainTestData::default_from_number(11); diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index be6c61471616a..5c2c1e969ea7e 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -266,7 +266,9 @@ fn block3( .clone() .map(|slot| (U256::from(slot), (U256::ZERO, U256::from(slot)))), ), - ); + ) + .revert_account_info(number, address, Some(None)) + .revert_storage(number, address, Vec::new()); } let bundle = BundleStateWithReceipts::new( bundle_state_builder.build(), From b67f7f09038317896805cf706f9cfeed8bc9f3b6 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 16 Apr 2024 12:30:45 +0200 Subject: [PATCH 173/700] safer `TryFrom` for `Transaction` (#7667) --- crates/primitives/src/transaction/mod.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 9e7c5d8e9d33d..b2ae3dbe2fbd3 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -618,8 +618,12 @@ impl TryFrom for Transaction { type Error = ConversionError; fn try_from(tx: reth_rpc_types::Transaction) -> Result { - match tx.transaction_type { - None | Some(0) => { + match tx.transaction_type.map(TryInto::try_into).transpose().map_err(|_| { + ConversionError::Eip2718Error(Eip2718Error::UnexpectedType( + tx.transaction_type.unwrap(), + )) + })? { + None | Some(TxType::Legacy) => { // legacy if tx.max_fee_per_gas.is_some() || tx.max_priority_fee_per_gas.is_some() { return Err(ConversionError::Eip2718Error( @@ -640,7 +644,7 @@ impl TryFrom for Transaction { input: tx.input, })) } - Some(1u8) => { + Some(TxType::Eip2930) => { // eip2930 Ok(Transaction::Eip2930(TxEip2930 { chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, @@ -656,7 +660,7 @@ impl TryFrom for Transaction { gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, })) } - Some(2u8) => { + Some(TxType::Eip1559) => { // EIP-1559 Ok(Transaction::Eip1559(TxEip1559 { chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, @@ -677,7 +681,7 @@ impl TryFrom for Transaction { input: tx.input, })) } - Some(3u8) => { + Some(TxType::Eip4844) => { // EIP-4844 Ok(Transaction::Eip4844(TxEip4844 { chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, @@ -704,7 +708,8 @@ impl TryFrom for Transaction { .ok_or(ConversionError::MissingMaxFeePerBlobGas)?, })) } - Some(tx_type) => Err(Eip2718Error::UnexpectedType(tx_type).into()), + #[cfg(feature = "optimism")] + Some(TxType::Deposit) => todo!(), } } } From 8105291afa917b7cebe65c8a90ae1f4125436af3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Anda=20Estensen?= Date: Tue, 16 Apr 2024 12:41:19 +0200 Subject: [PATCH 174/700] chore: convert FromRecoveredTransaction into TryFromRecoveredTransaction (#7497) Co-authored-by: Matthias Seitz --- crates/primitives/src/lib.rs | 10 ++--- crates/primitives/src/transaction/error.rs | 12 ++++++ crates/primitives/src/transaction/mod.rs | 30 ++++++++++--- crates/primitives/src/transaction/tx_type.rs | 6 +++ crates/transaction-pool/src/maintain.rs | 15 ++++--- crates/transaction-pool/src/test_utils/gen.rs | 16 ++++--- .../transaction-pool/src/test_utils/mock.rs | 34 +++++++++------ crates/transaction-pool/src/traits.rs | 43 ++++++++++++++----- 8 files changed, 118 insertions(+), 48 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 303fd1e7a505a..c57bffed50488 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -94,11 +94,11 @@ pub use transaction::{ pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, - AccessList, AccessListItem, FromRecoveredTransaction, IntoRecoveredTransaction, - InvalidTransactionError, Signature, Transaction, TransactionKind, TransactionMeta, - TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, - TxEip4844, TxHashOrNumber, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, - EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, OP_RETH_MAINNET_BELOW_BEDROCK, + AccessList, AccessListItem, IntoRecoveredTransaction, InvalidTransactionError, Signature, + Transaction, TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, + TransactionSignedNoHash, TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, + TxHashOrNumber, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, OP_RETH_MAINNET_BELOW_BEDROCK, }; pub use withdrawal::{Withdrawal, Withdrawals}; diff --git a/crates/primitives/src/transaction/error.rs b/crates/primitives/src/transaction/error.rs index cf979f022d6e3..2b17fa7181a85 100644 --- a/crates/primitives/src/transaction/error.rs +++ b/crates/primitives/src/transaction/error.rs @@ -63,3 +63,15 @@ pub enum TransactionConversionError { #[error("Transaction is not supported for p2p")] UnsupportedForP2P, } + +/// Represents error variants than can happen when trying to convert a +/// [`TransactionSignedEcRecovered`](crate::TransactionSignedEcRecovered) transaction. +#[derive(Debug, Clone, Eq, PartialEq, thiserror::Error)] +pub enum TryFromRecoveredTransactionError { + /// Thrown if the transaction type is unsupported. + #[error("Unsupported transaction type: {0}")] + UnsupportedTransactionType(u8), + /// This error variant is used when a blob sidecar is missing. + #[error("Blob sidecar missing for an EIP-4844 transaction")] + BlobSidecarMissing, +} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index b2ae3dbe2fbd3..c72c70627ad2a 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -20,7 +20,9 @@ pub use eip1559::TxEip1559; pub use eip2930::TxEip2930; pub use eip4844::TxEip4844; -pub use error::{InvalidTransactionError, TransactionConversionError}; +pub use error::{ + InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, +}; pub use legacy::TxLegacy; pub use meta::TransactionMeta; #[cfg(feature = "c-kzg")] @@ -1744,16 +1746,30 @@ impl Decodable for TransactionSignedEcRecovered { /// /// This is a conversion trait that'll ensure transactions received via P2P can be converted to the /// transaction type that the transaction pool uses. -pub trait FromRecoveredTransaction { +pub trait TryFromRecoveredTransaction { + /// The error type returned by the transaction. + type Error; /// Converts to this type from the given [`TransactionSignedEcRecovered`]. - fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self; + fn try_from_recovered_transaction( + tx: TransactionSignedEcRecovered, + ) -> Result + where + Self: Sized; } // Noop conversion -impl FromRecoveredTransaction for TransactionSignedEcRecovered { +impl TryFromRecoveredTransaction for TransactionSignedEcRecovered { + type Error = TryFromRecoveredTransactionError; + #[inline] - fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self { - tx + fn try_from_recovered_transaction( + tx: TransactionSignedEcRecovered, + ) -> Result { + if tx.is_eip4844() { + Err(TryFromRecoveredTransactionError::BlobSidecarMissing) + } else { + Ok(tx) + } } } @@ -1768,7 +1784,7 @@ pub trait FromRecoveredPooledTransaction { fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self; } -/// The inverse of [`FromRecoveredTransaction`] that ensure the transaction can be sent over the +/// The inverse of [TryFromRecoveredTransaction] that ensure the transaction can be sent over the /// network pub trait IntoRecoveredTransaction { /// Converts to this type into a [`TransactionSignedEcRecovered`]. diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 8bb6c6956e718..84a099cb76605 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -169,6 +169,12 @@ impl Compact for TxType { } } +impl PartialEq for TxType { + fn eq(&self, other: &u8) -> bool { + *self as u8 == *other + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 49d9d029516ac..e2bdd44c22f68 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -13,8 +13,8 @@ use futures_util::{ }; use reth_primitives::{ fs::FsPathError, Address, BlockHash, BlockNumber, BlockNumberOrTag, - FromRecoveredPooledTransaction, FromRecoveredTransaction, IntoRecoveredTransaction, - PooledTransactionsElementEcRecovered, TransactionSigned, + FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, + TransactionSigned, TryFromRecoveredTransaction, }; use reth_provider::{ BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotification, ChainSpecProvider, @@ -333,9 +333,9 @@ pub async fn maintain_transaction_pool(

::Transaction::from_recovered_pooled_transaction, ) } else { - Some(

::Transaction::from_recovered_transaction( +

::Transaction::try_from_recovered_transaction( tx, - )) + ).ok() } }) .collect::>(); @@ -584,8 +584,13 @@ where let pool_transactions = txs_signed .into_iter() - .filter_map(|tx| tx.try_ecrecovered().map(::from_recovered_transaction)) + .filter_map(|tx| tx.try_ecrecovered()) + .filter_map(|tx| { + // Filter out errors +

::Transaction::try_from_recovered_transaction(tx).ok() + }) .collect::>(); + let outcome = pool.add_transactions(crate::TransactionOrigin::Local, pool_transactions).await; info!(target: "txpool", txs_file =?file_path, num_txs=%outcome.len(), "Successfully reinserted local transactions from file"); diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 614b4b00dfa17..52a3127c79b41 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -1,9 +1,9 @@ use crate::EthPooledTransaction; use rand::Rng; use reth_primitives::{ - constants::MIN_PROTOCOL_BASE_FEE, sign_message, AccessList, Address, Bytes, - FromRecoveredTransaction, Transaction, TransactionKind, TransactionSigned, TxEip1559, - TxEip4844, TxLegacy, B256, MAINNET, U256, + constants::MIN_PROTOCOL_BASE_FEE, sign_message, AccessList, Address, Bytes, Transaction, + TransactionKind, TransactionSigned, TryFromRecoveredTransaction, TxEip1559, TxEip4844, + TxLegacy, B256, MAINNET, U256, }; /// A generator for transactions for testing purposes. @@ -98,15 +98,17 @@ impl TransactionGenerator { /// Generates and returns a pooled EIP-1559 transaction with a random signer. pub fn gen_eip1559_pooled(&mut self) -> EthPooledTransaction { - EthPooledTransaction::from_recovered_transaction( + EthPooledTransaction::try_from_recovered_transaction( self.gen_eip1559().into_ecrecovered().unwrap(), ) + .unwrap() } + /// Generates and returns a pooled EIP-4844 transaction with a random signer. pub fn gen_eip4844_pooled(&mut self) -> EthPooledTransaction { - EthPooledTransaction::from_recovered_transaction( - self.gen_eip4844().into_ecrecovered().unwrap(), - ) + let tx = self.gen_eip4844().into_ecrecovered().unwrap(); + let encoded_length = tx.length_without_header(); + EthPooledTransaction::new(tx, encoded_length) } } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index f590764583f7c..d250b6c10bd31 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -13,9 +13,10 @@ use rand::{ }; use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, + transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, Bytes, FromRecoveredPooledTransaction, - FromRecoveredTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, - Signature, Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, + IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, + TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, }; @@ -720,8 +721,12 @@ impl PoolTransaction for MockTransaction { } } -impl FromRecoveredTransaction for MockTransaction { - fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self { +impl TryFromRecoveredTransaction for MockTransaction { + type Error = TryFromRecoveredTransactionError; + + fn try_from_recovered_transaction( + tx: TransactionSignedEcRecovered, + ) -> Result { let sender = tx.signer(); let transaction = tx.into_signed(); let hash = transaction.hash(); @@ -737,7 +742,7 @@ impl FromRecoveredTransaction for MockTransaction { to, value, input, - }) => MockTransaction::Legacy { + }) => Ok(MockTransaction::Legacy { hash, sender, nonce, @@ -747,7 +752,7 @@ impl FromRecoveredTransaction for MockTransaction { value, input, size, - }, + }), Transaction::Eip1559(TxEip1559 { chain_id: _, nonce, @@ -758,7 +763,7 @@ impl FromRecoveredTransaction for MockTransaction { value, input, access_list, - }) => MockTransaction::Eip1559 { + }) => Ok(MockTransaction::Eip1559 { hash, sender, nonce, @@ -770,7 +775,7 @@ impl FromRecoveredTransaction for MockTransaction { input, accesslist: access_list, size, - }, + }), Transaction::Eip4844(TxEip4844 { chain_id: _, nonce, @@ -783,7 +788,7 @@ impl FromRecoveredTransaction for MockTransaction { access_list, blob_versioned_hashes: _, max_fee_per_blob_gas, - }) => MockTransaction::Eip4844 { + }) => Ok(MockTransaction::Eip4844 { hash, sender, nonce, @@ -797,7 +802,7 @@ impl FromRecoveredTransaction for MockTransaction { accesslist: access_list, sidecar: BlobTransactionSidecar::default(), size, - }, + }), Transaction::Eip2930(TxEip2930 { chain_id: _, nonce, @@ -807,7 +812,7 @@ impl FromRecoveredTransaction for MockTransaction { value, input, access_list, - }) => MockTransaction::Eip2930 { + }) => Ok(MockTransaction::Eip2930 { hash, sender, nonce, @@ -818,7 +823,7 @@ impl FromRecoveredTransaction for MockTransaction { input, accesslist: access_list, size, - }, + }), _ => unreachable!("Invalid transaction type"), } } @@ -826,7 +831,10 @@ impl FromRecoveredTransaction for MockTransaction { impl FromRecoveredPooledTransaction for MockTransaction { fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { - FromRecoveredTransaction::from_recovered_transaction(tx.into_ecrecovered_transaction()) + TryFromRecoveredTransaction::try_from_recovered_transaction( + tx.into_ecrecovered_transaction(), + ) + .expect("Failed to convert from PooledTransactionsElementEcRecovered to MockTransaction") } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index bde36334c196b..459c0bf10015d 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -10,11 +10,12 @@ use crate::{ use futures_util::{ready, Stream}; use reth_eth_wire::HandleMempoolData; use reth_primitives::{ - kzg::KzgSettings, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, - FromRecoveredPooledTransaction, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, - PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, - TransactionKind, TransactionSignedEcRecovered, TxEip4844, TxHash, B256, EIP1559_TX_TYPE_ID, - EIP4844_TX_TYPE_ID, U256, + kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, + BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, + IntoRecoveredTransaction, PeerId, PooledTransactionsElement, + PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionKind, + TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip4844, TxHash, B256, + EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -756,7 +757,7 @@ pub trait PoolTransaction: + Send + Sync + FromRecoveredPooledTransaction - + FromRecoveredTransaction + + TryFromRecoveredTransaction + IntoRecoveredTransaction { /// Hash of the transaction. @@ -1111,12 +1112,32 @@ impl EthPoolTransaction for EthPooledTransaction { } } -impl FromRecoveredTransaction for EthPooledTransaction { - fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self { - // CAUTION: this should not be done for EIP-4844 transactions, as the blob sidecar is - // missing. +impl TryFromRecoveredTransaction for EthPooledTransaction { + type Error = TryFromRecoveredTransactionError; + + fn try_from_recovered_transaction( + tx: TransactionSignedEcRecovered, + ) -> Result { + // ensure we can handle the transaction type and its format + match tx.tx_type() as u8 { + 0..=EIP1559_TX_TYPE_ID => { + // supported + } + EIP4844_TX_TYPE_ID => { + // doesn't have a blob sidecar + return Err(TryFromRecoveredTransactionError::BlobSidecarMissing); + } + unsupported => { + // unsupported transaction type + return Err(TryFromRecoveredTransactionError::UnsupportedTransactionType( + unsupported, + )); + } + }; + let encoded_length = tx.length_without_header(); - EthPooledTransaction::new(tx, encoded_length) + let transaction = EthPooledTransaction::new(tx, encoded_length); + Ok(transaction) } } From 7f6a2b4cb555847d567378868150510fd0731b28 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 16 Apr 2024 12:45:27 +0200 Subject: [PATCH 175/700] refactor(exex, primitives): move finished exex height to primitives (#7670) --- crates/exex/src/manager.rs | 31 +++++++------------------------ crates/primitives/src/exex/mod.rs | 18 ++++++++++++++++++ crates/primitives/src/lib.rs | 2 ++ 3 files changed, 27 insertions(+), 24 deletions(-) create mode 100644 crates/primitives/src/exex/mod.rs diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 8fe6d48f6ceb5..32c4d8e26d219 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -13,7 +13,7 @@ use crate::ExExEvent; use futures::StreamExt; use metrics::Gauge; use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::BlockNumber; +use reth_primitives::{BlockNumber, FinishedExExHeight}; use reth_provider::CanonStateNotification; use reth_tracing::tracing::debug; use tokio::sync::{ @@ -168,7 +168,7 @@ pub struct ExExManager { is_ready: watch::Sender, /// The finished height of all ExEx's. - finished_height: watch::Sender, + finished_height: watch::Sender, /// A handle to the ExEx manager. handle: ExExManagerHandle, @@ -190,9 +190,9 @@ impl ExExManager { let (handle_tx, handle_rx) = mpsc::unbounded_channel(); let (is_ready_tx, is_ready_rx) = watch::channel(true); let (finished_height_tx, finished_height_rx) = watch::channel(if num_exexs == 0 { - FinishedHeight::NoExExs + FinishedExExHeight::NoExExs } else { - FinishedHeight::NotReady + FinishedExExHeight::NotReady }); let current_capacity = Arc::new(AtomicUsize::new(max_capacity)); @@ -326,7 +326,7 @@ impl Future for ExExManager { } }); if let Ok(finished_height) = finished_height { - let _ = self.finished_height.send(FinishedHeight::Height(finished_height)); + let _ = self.finished_height.send(FinishedExExHeight::Height(finished_height)); } Poll::Pending @@ -351,7 +351,7 @@ pub struct ExExManagerHandle { /// The current capacity of the manager's internal notification buffer. current_capacity: Arc, /// The finished height of all ExEx's. - finished_height: watch::Receiver, + finished_height: watch::Receiver, } impl ExExManagerHandle { @@ -396,7 +396,7 @@ impl ExExManagerHandle { } /// The finished height of all ExEx's. - pub fn finished_height(&mut self) -> FinishedHeight { + pub fn finished_height(&mut self) -> FinishedExExHeight { *self.finished_height.borrow_and_update() } @@ -433,23 +433,6 @@ impl Clone for ExExManagerHandle { } } -/// The finished height of all ExEx's. -#[derive(Debug, Clone, Copy)] -pub enum FinishedHeight { - /// No ExEx's are installed, so there is no finished height. - NoExExs, - /// Not all ExExs emitted a `FinishedHeight` event yet. - NotReady, - /// The finished height of all ExEx's. - /// - /// This is the lowest common denominator between all ExEx's. - /// - /// This block is used to (amongst other things) determine what blocks are safe to prune. - /// - /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. - Height(BlockNumber), -} - #[cfg(test)] mod tests { #[tokio::test] diff --git a/crates/primitives/src/exex/mod.rs b/crates/primitives/src/exex/mod.rs new file mode 100644 index 0000000000000..9fc2ace66dd1f --- /dev/null +++ b/crates/primitives/src/exex/mod.rs @@ -0,0 +1,18 @@ +use crate::BlockNumber; + +/// The finished height of all ExEx's. +#[derive(Debug, Clone, Copy)] +pub enum FinishedExExHeight { + /// No ExEx's are installed, so there is no finished height. + NoExExs, + /// Not all ExExs emitted a `FinishedHeight` event yet. + NotReady, + /// The finished height of all ExEx's. + /// + /// This is the lowest common denominator between all ExEx's. + /// + /// This block is used to (amongst other things) determine what blocks are safe to prune. + /// + /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. + Height(BlockNumber), +} diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index c57bffed50488..9c44738902a90 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -26,6 +26,7 @@ mod compression; pub mod constants; pub mod eip4844; mod error; +mod exex; pub mod fs; pub mod genesis; mod header; @@ -66,6 +67,7 @@ pub use constants::{ KECCAK_EMPTY, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, }; pub use error::{GotExpected, GotExpectedBoxed}; +pub use exex::FinishedExExHeight; pub use genesis::{ChainConfig, Genesis, GenesisAccount}; pub use header::{Header, HeaderValidationError, HeadersDirection, SealedHeader}; pub use integer_list::IntegerList; From af2dce3e651c80a5ea3c1462543097b66119df0b Mon Sep 17 00:00:00 2001 From: Luca Provini Date: Tue, 16 Apr 2024 12:50:17 +0200 Subject: [PATCH 176/700] P2P Sync e2e (#7529) Co-authored-by: Matthias Seitz --- Cargo.lock | 172 +++++++------- crates/node-e2e-tests/Cargo.toml | 6 + .../node-e2e-tests/src/chain_spec_builder.rs | 31 --- crates/node-e2e-tests/src/engine_api.rs | 66 ++++++ crates/node-e2e-tests/src/lib.rs | 18 +- crates/node-e2e-tests/src/network.rs | 44 ++++ crates/node-e2e-tests/src/node.rs | 125 +++++++++++ crates/node-e2e-tests/src/payload.rs | 85 +++++++ crates/node-e2e-tests/src/test_suite.rs | 41 ---- crates/node-e2e-tests/src/wallet.rs | 18 +- crates/node-e2e-tests/tests/it/eth.rs | 210 ++++++------------ crates/node-e2e-tests/tests/it/main.rs | 1 + crates/node-e2e-tests/tests/it/p2p.rs | 76 +++++++ crates/payload/builder/src/events.rs | 5 +- 14 files changed, 587 insertions(+), 311 deletions(-) delete mode 100644 crates/node-e2e-tests/src/chain_spec_builder.rs create mode 100644 crates/node-e2e-tests/src/engine_api.rs create mode 100644 crates/node-e2e-tests/src/network.rs create mode 100644 crates/node-e2e-tests/src/node.rs create mode 100644 crates/node-e2e-tests/src/payload.rs delete mode 100644 crates/node-e2e-tests/src/test_suite.rs create mode 100644 crates/node-e2e-tests/tests/it/p2p.rs diff --git a/Cargo.lock b/Cargo.lock index 72cb77b76c369..3b209fd4b3f9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,7 +332,7 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -471,7 +471,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", "syn-solidity", "tiny-keccak", ] @@ -489,7 +489,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.58", + "syn 2.0.57", "syn-solidity", ] @@ -652,7 +652,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -825,9 +825,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.8" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60" +checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" dependencies = [ "brotli", "flate2", @@ -872,7 +872,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -883,7 +883,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -915,7 +915,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -1043,7 +1043,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.58", + "syn 2.0.57", "which", ] @@ -1240,7 +1240,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", "synstructure", ] @@ -1280,9 +1280,9 @@ dependencies = [ [[package]] name = "brotli" -version = "4.0.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "125740193d7fee5cc63ab9e16c2fdc4e07c74ba755cc53b327d6ea029e9fc569" +checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1291,9 +1291,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "3.0.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65622a320492e09b5e0ac436b14c54ff68199bac392d0e89a6832c4518eea525" +checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1349,7 +1349,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -1541,7 +1541,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.1", + "strsim 0.11.0", ] [[package]] @@ -1553,7 +1553,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -1630,13 +1630,13 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" -version = "7.1.1" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" +checksum = "7c64043d6c7b7a4c58e39e7efccfdea7b93d885a795d0c054a69dbbf4dd52686" dependencies = [ "crossterm", - "strum 0.26.2", - "strum_macros 0.26.2", + "strum 0.25.0", + "strum_macros 0.25.3", "unicode-width", ] @@ -1990,7 +1990,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -2147,7 +2147,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -2180,7 +2180,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core 0.20.8", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -2249,9 +2249,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.9" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "zeroize", @@ -2286,7 +2286,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -2437,7 +2437,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -2634,7 +2634,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -2647,7 +2647,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -2658,7 +2658,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -2970,7 +2970,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -3169,9 +3169,9 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" dependencies = [ "cfg-if", "crunchy", @@ -3462,7 +3462,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -3561,7 +3561,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -3711,7 +3711,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -4092,7 +4092,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -4388,12 +4388,13 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.3" +version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ "bitflags 2.5.0", "libc", + "redox_syscall 0.4.1", ] [[package]] @@ -4638,7 +4639,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -4778,7 +4779,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -4893,15 +4894,20 @@ dependencies = [ "alloy-signer-wallet", "eyre", "futures-util", + "jsonrpsee", "rand 0.8.5", "reth", + "reth-db", "reth-node-core", "reth-node-ethereum", + "reth-payload-builder", "reth-primitives", + "reth-rpc", "reth-tracing", "secp256k1 0.27.0", "serde_json", "tokio", + "tokio-stream", ] [[package]] @@ -5045,7 +5051,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -5283,9 +5289,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" dependencies = [ "memchr", "thiserror", @@ -5335,7 +5341,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -5364,7 +5370,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -5544,7 +5550,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" dependencies = [ "proc-macro2", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -5902,9 +5908,9 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom 0.2.14", "libredox", @@ -5957,9 +5963,9 @@ checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "regress" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" +checksum = "d06f9a1f7cd8473611ba1a480cf35f9c5cffc2954336ba90a982fdb7e7d7f51e" dependencies = [ "hashbrown 0.14.3", "memchr", @@ -6261,7 +6267,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -6705,7 +6711,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.58", + "syn 2.0.57", "trybuild", ] @@ -7923,9 +7929,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rusty-fork" @@ -8142,7 +8148,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -8216,7 +8222,7 @@ dependencies = [ "darling 0.20.8", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -8241,7 +8247,7 @@ checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -8551,9 +8557,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strsim" -version = "0.11.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" [[package]] name = "strum" @@ -8583,7 +8589,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -8596,7 +8602,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -8664,9 +8670,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.58" +version = "2.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "11a6ae1e52eb25aab8f3fb9fca13be982a373b8f1157ca14b897a825ba4a2d35" dependencies = [ "proc-macro2", "quote", @@ -8682,7 +8688,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -8699,7 +8705,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -8791,7 +8797,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -8830,7 +8836,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -8988,7 +8994,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -9201,7 +9207,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -9664,7 +9670,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", "wasm-bindgen-shared", ] @@ -9698,7 +9704,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9761,9 +9767,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.1.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "winapi" @@ -10024,9 +10030,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.20" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" +checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" [[package]] name = "xmltree" @@ -10057,7 +10063,7 @@ checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", "synstructure", ] @@ -10078,7 +10084,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -10098,7 +10104,7 @@ checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", "synstructure", ] @@ -10119,7 +10125,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] @@ -10141,7 +10147,7 @@ checksum = "7b4e5997cbf58990550ef1f0e5124a05e47e1ebd33a84af25739be6031a62c20" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.57", ] [[package]] diff --git a/crates/node-e2e-tests/Cargo.toml b/crates/node-e2e-tests/Cargo.toml index ff84a89432f7e..d3811f5f5cf1d 100644 --- a/crates/node-e2e-tests/Cargo.toml +++ b/crates/node-e2e-tests/Cargo.toml @@ -12,10 +12,16 @@ reth-node-core.workspace = true reth-primitives.workspace = true reth-node-ethereum.workspace = true reth-tracing.workspace = true +reth-db.workspace = true +reth-rpc.workspace = true +reth-payload-builder = { workspace = true, features = ["test-utils"] } + +jsonrpsee.workspace = true futures-util.workspace = true eyre.workspace = true tokio.workspace = true +tokio-stream.workspace = true serde_json.workspace = true rand.workspace = true secp256k1.workspace = true diff --git a/crates/node-e2e-tests/src/chain_spec_builder.rs b/crates/node-e2e-tests/src/chain_spec_builder.rs deleted file mode 100644 index d0adc624cbbc9..0000000000000 --- a/crates/node-e2e-tests/src/chain_spec_builder.rs +++ /dev/null @@ -1,31 +0,0 @@ -use std::sync::Arc; - -use reth_primitives::{ChainSpec, Genesis}; - -/// Helper struct to configure the chain spec as needed for e2e tests -#[must_use = "call `build` to construct the chainspec"] -pub struct ChainSpecBuilder { - chain_spec: ChainSpec, -} - -impl ChainSpecBuilder { - /// Creates a new chain spec builder with the static genesis.json - pub fn new() -> Self { - let genesis: Genesis = - serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - - Self { chain_spec: genesis.into() } - } - - /// Overrides the genesis block with the given one - #[allow(unused)] - pub fn with_genesis(mut self, genesis: Genesis) -> Self { - self.chain_spec.genesis = genesis; - self - } - - /// Builds the chain spec - pub fn build(self) -> Arc { - Arc::new(self.chain_spec) - } -} diff --git a/crates/node-e2e-tests/src/engine_api.rs b/crates/node-e2e-tests/src/engine_api.rs new file mode 100644 index 0000000000000..06c9afa311ca6 --- /dev/null +++ b/crates/node-e2e-tests/src/engine_api.rs @@ -0,0 +1,66 @@ +use jsonrpsee::http_client::HttpClient; +use reth::{ + providers::CanonStateNotificationStream, + rpc::{ + api::EngineApiClient, + types::engine::{ExecutionPayloadEnvelopeV3, ForkchoiceState}, + }, +}; +use reth_node_ethereum::EthEngineTypes; +use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes, PayloadId}; +use reth_primitives::B256; + +/// Helper for engine api operations +pub struct EngineApiHelper { + pub canonical_stream: CanonStateNotificationStream, + pub engine_api_client: HttpClient, +} + +impl EngineApiHelper { + /// Retrieves a v3 payload from the engine api + pub async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> eyre::Result { + Ok(EngineApiClient::::get_payload_v3(&self.engine_api_client, payload_id) + .await?) + } + + /// Submits a payload to the engine api + pub async fn submit_payload( + &self, + payload: EthBuiltPayload, + eth_attr: EthPayloadBuilderAttributes, + ) -> eyre::Result { + // setup payload for submission + let envelope_v3 = ExecutionPayloadEnvelopeV3::from(payload); + let payload_v3 = envelope_v3.execution_payload; + + // submit payload to engine api + let submission = EngineApiClient::::new_payload_v3( + &self.engine_api_client, + payload_v3, + vec![], + eth_attr.parent_beacon_block_root.unwrap(), + ) + .await?; + assert!(submission.is_valid()); + Ok(submission.latest_valid_hash.unwrap()) + } + + /// Sends forkchoice update to the engine api + pub async fn update_forkchoice(&self, hash: B256) -> eyre::Result<()> { + EngineApiClient::::fork_choice_updated_v2( + &self.engine_api_client, + ForkchoiceState { + head_block_hash: hash, + safe_block_hash: hash, + finalized_block_hash: hash, + }, + None, + ) + .await?; + + Ok(()) + } +} diff --git a/crates/node-e2e-tests/src/lib.rs b/crates/node-e2e-tests/src/lib.rs index 7448d336452b7..2799c4fe3fc7a 100644 --- a/crates/node-e2e-tests/src/lib.rs +++ b/crates/node-e2e-tests/src/lib.rs @@ -1,6 +1,14 @@ -/// Helper type to configure the chain spec as needed for e2e tests -mod chain_spec_builder; -/// Test suite for e2e tests -pub mod test_suite; +/// Wrapper type to create test nodes +pub mod node; + /// Helper type to yield accounts from mnemonic -mod wallet; +pub mod wallet; + +/// Helper for payload operations +mod payload; + +/// Helper for network operations +mod network; + +/// Helper for engine api operations +mod engine_api; diff --git a/crates/node-e2e-tests/src/network.rs b/crates/node-e2e-tests/src/network.rs new file mode 100644 index 0000000000000..341b0d7d0aea9 --- /dev/null +++ b/crates/node-e2e-tests/src/network.rs @@ -0,0 +1,44 @@ +use futures_util::StreamExt; +use reth::network::{NetworkEvent, NetworkEvents, NetworkHandle, PeersInfo}; +use reth_primitives::NodeRecord; +use reth_tracing::tracing::info; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// Helper for network operations +pub struct NetworkHelper { + network_events: UnboundedReceiverStream, + network: NetworkHandle, +} + +impl NetworkHelper { + /// Creates a new network helper + pub fn new(network: NetworkHandle) -> Self { + let network_events = network.event_listener(); + Self { network_events, network } + } + + /// Adds a peer to the network node via network handle + pub async fn add_peer(&mut self, node_record: NodeRecord) { + self.network.peers_handle().add_peer(node_record.id, node_record.tcp_addr()); + + match self.network_events.next().await { + Some(NetworkEvent::PeerAdded(_)) => (), + _ => panic!("Expected a peer added event"), + } + } + + /// Returns the network node record + pub fn record(&self) -> NodeRecord { + self.network.local_node_record() + } + + /// Expects a session to be established + pub async fn expect_session(&mut self) { + match self.network_events.next().await { + Some(NetworkEvent::SessionEstablished { remote_addr, .. }) => { + info!(?remote_addr, "Session established") + } + _ => panic!("Expected session established event"), + } + } +} diff --git a/crates/node-e2e-tests/src/node.rs b/crates/node-e2e-tests/src/node.rs new file mode 100644 index 0000000000000..f4e94b4ae47c9 --- /dev/null +++ b/crates/node-e2e-tests/src/node.rs @@ -0,0 +1,125 @@ +use crate::{engine_api::EngineApiHelper, network::NetworkHelper, payload::PayloadHelper}; +use alloy_rpc_types::BlockNumberOrTag; +use eyre::Ok; +use reth::{ + api::FullNodeComponents, + builder::FullNode, + providers::{BlockReaderIdExt, CanonStateSubscriptions}, + rpc::{ + eth::{error::EthResult, EthTransactions}, + types::engine::PayloadAttributes, + }, +}; + +use reth_node_ethereum::EthEngineTypes; +use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_primitives::{Address, Bytes, B256}; + +use std::time::{SystemTime, UNIX_EPOCH}; +use tokio_stream::StreamExt; + +/// An helper struct to handle node actions +pub struct NodeHelper +where + Node: FullNodeComponents, +{ + pub inner: FullNode, + payload: PayloadHelper, + pub network: NetworkHelper, + pub engine_api: EngineApiHelper, +} + +impl NodeHelper +where + Node: FullNodeComponents, +{ + /// Creates a new test node + pub async fn new(node: FullNode) -> eyre::Result { + let builder = node.payload_builder.clone(); + + Ok(Self { + inner: node.clone(), + network: NetworkHelper::new(node.network.clone()), + payload: PayloadHelper::new(builder).await?, + engine_api: EngineApiHelper { + engine_api_client: node.auth_server_handle().http_client(), + canonical_stream: node.provider.canonical_state_stream(), + }, + }) + } + + /// Advances the node forward + pub async fn advance(&mut self, raw_tx: Bytes) -> eyre::Result<(B256, B256)> { + // push tx into pool via RPC server + let tx_hash = self.inject_tx(raw_tx).await?; + + // trigger new payload building draining the pool + let eth_attr = self.payload.new_payload().await.unwrap(); + + // first event is the payload attributes + self.payload.expect_attr_event(eth_attr.clone()).await?; + + // wait for the payload builder to have finished building + self.payload.wait_for_built_payload(eth_attr.payload_id()).await; + + // trigger resolve payload via engine api + self.engine_api.get_payload_v3(eth_attr.payload_id()).await?; + + // ensure we're also receiving the built payload as event + let payload = self.payload.expect_built_payload().await?; + + // submit payload via engine api + let block_hash = self.engine_api.submit_payload(payload, eth_attr.clone()).await?; + + // trigger forkchoice update via engine api to commit the block to the blockchain + self.engine_api.update_forkchoice(block_hash).await?; + + // assert the block has been committed to the blockchain + self.assert_new_block(tx_hash, block_hash).await?; + Ok((block_hash, tx_hash)) + } + + /// Injects a raw transaction into the node tx pool via RPC server + async fn inject_tx(&mut self, raw_tx: Bytes) -> EthResult { + let eth_api = self.inner.rpc_registry.eth_api(); + eth_api.send_raw_transaction(raw_tx).await + } + + /// Asserts that a new block has been added to the blockchain + /// and the tx has been included in the block + pub async fn assert_new_block( + &mut self, + tip_tx_hash: B256, + block_hash: B256, + ) -> eyre::Result<()> { + // get head block from notifications stream and verify the tx has been pushed to the + // pool is actually present in the canonical block + let head = self.engine_api.canonical_stream.next().await.unwrap(); + let tx = head.tip().transactions().next(); + assert_eq!(tx.unwrap().hash().as_slice(), tip_tx_hash.as_slice()); + + // wait for the block to commit + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + // make sure the block hash we submitted via FCU engine api is the new latest block + // using an RPC call + let latest_block = + self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)?.unwrap(); + assert_eq!(latest_block.hash_slow(), block_hash); + Ok(()) + } +} + +/// Helper function to create a new eth payload attributes +pub fn eth_payload_attributes() -> EthPayloadBuilderAttributes { + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + EthPayloadBuilderAttributes::new(B256::ZERO, attributes) +} diff --git a/crates/node-e2e-tests/src/payload.rs b/crates/node-e2e-tests/src/payload.rs new file mode 100644 index 0000000000000..a23f7225f19f4 --- /dev/null +++ b/crates/node-e2e-tests/src/payload.rs @@ -0,0 +1,85 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +use futures_util::StreamExt; +use reth::{ + api::{EngineTypes, PayloadBuilderAttributes}, + rpc::types::engine::PayloadAttributes, +}; +use reth_node_ethereum::EthEngineTypes; +use reth_payload_builder::{ + EthBuiltPayload, EthPayloadBuilderAttributes, Events, PayloadBuilderHandle, PayloadId, +}; +use reth_primitives::{Address, B256}; +use tokio_stream::wrappers::BroadcastStream; + +/// Helper for payload operations +pub struct PayloadHelper { + pub payload_event_stream: BroadcastStream>, + payload_builder: PayloadBuilderHandle, +} + +impl PayloadHelper { + /// Creates a new payload helper + pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { + let payload_events = payload_builder.subscribe().await?; + let payload_event_stream = payload_events.into_stream(); + Ok(Self { payload_event_stream, payload_builder }) + } + + /// Creates a new payload job from static attributes + pub async fn new_payload(&self) -> eyre::Result { + let attributes = eth_payload_attributes(); + self.payload_builder.new_payload(attributes.clone()).await.unwrap(); + Ok(attributes) + } + + /// Asserts that the next event is a payload attributes event + pub async fn expect_attr_event( + &mut self, + attrs: EthPayloadBuilderAttributes, + ) -> eyre::Result<()> { + let first_event = self.payload_event_stream.next().await.unwrap()?; + if let reth::payload::Events::Attributes(attr) = first_event { + assert_eq!(attrs.timestamp, attr.timestamp()); + } else { + panic!("Expect first event as payload attributes.") + } + Ok(()) + } + + /// Wait until the best built payload is ready + pub async fn wait_for_built_payload(&self, payload_id: PayloadId) { + loop { + let payload = self.payload_builder.best_payload(payload_id).await.unwrap().unwrap(); + if payload.block().body.is_empty() { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + continue; + } + break; + } + } + + /// Expects the next event to be a built payload event or panics + pub async fn expect_built_payload(&mut self) -> eyre::Result { + let second_event = self.payload_event_stream.next().await.unwrap()?; + if let reth::payload::Events::BuiltPayload(payload) = second_event { + Ok(payload) + } else { + panic!("Expect a built payload event."); + } + } +} + +/// Helper function to create a new eth payload attributes +fn eth_payload_attributes() -> EthPayloadBuilderAttributes { + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + EthPayloadBuilderAttributes::new(B256::ZERO, attributes) +} diff --git a/crates/node-e2e-tests/src/test_suite.rs b/crates/node-e2e-tests/src/test_suite.rs deleted file mode 100644 index c656e3d6cb2e4..0000000000000 --- a/crates/node-e2e-tests/src/test_suite.rs +++ /dev/null @@ -1,41 +0,0 @@ -use crate::{chain_spec_builder::ChainSpecBuilder, wallet::Wallet}; -use alloy_network::eip2718::Encodable2718; -use reth_primitives::{Bytes, ChainSpec, B256}; -use std::sync::Arc; - -/// Mnemonic used to derive the test accounts -const TEST_MNEMONIC: &str = "test test test test test test test test test test test junk"; - -/// Helper struct to customize the chain spec during e2e tests -pub struct TestSuite { - wallet: Wallet, -} - -impl Default for TestSuite { - fn default() -> Self { - Self::new() - } -} - -impl TestSuite { - /// Creates a new e2e test suite with a test account prefunded with 10_000 ETH from genesis - /// allocations and the eth mainnet latest chainspec. - pub fn new() -> Self { - let wallet = Wallet::new(TEST_MNEMONIC); - Self { wallet } - } - - /// Creates a signed transfer tx and returns its hash and raw bytes - pub async fn transfer_tx(&self) -> (B256, Bytes) { - let tx = self.wallet.transfer_tx().await; - (tx.trie_hash(), tx.encoded_2718().into()) - } - - /// Chain spec for e2e eth tests - /// - /// Includes 20 prefunded accounts with 10_000 ETH each derived from mnemonic "test test test - /// test test test test test test test test junk". - pub fn chain_spec(&self) -> Arc { - ChainSpecBuilder::new().build() - } -} diff --git a/crates/node-e2e-tests/src/wallet.rs b/crates/node-e2e-tests/src/wallet.rs index 80800e27e8318..2351d0a0b1a2b 100644 --- a/crates/node-e2e-tests/src/wallet.rs +++ b/crates/node-e2e-tests/src/wallet.rs @@ -1,9 +1,7 @@ -use alloy_consensus::TxEnvelope; -use alloy_network::{EthereumSigner, TransactionBuilder}; +use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; use alloy_rpc_types::TransactionRequest; use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; -use reth_primitives::{Address, U256}; - +use reth_primitives::{Address, Bytes, U256}; /// One of the accounts of the genesis allocations. pub struct Wallet { inner: LocalWallet, @@ -17,7 +15,7 @@ impl Wallet { } /// Creates a static transfer and signs it - pub(crate) async fn transfer_tx(&self) -> TxEnvelope { + pub async fn transfer_tx(&self) -> Bytes { let tx = TransactionRequest { nonce: Some(0), value: Some(U256::from(100)), @@ -28,6 +26,14 @@ impl Wallet { ..Default::default() }; let signer = EthereumSigner::from(self.inner.clone()); - tx.build(&signer).await.unwrap() + tx.build(&signer).await.unwrap().encoded_2718().into() + } +} + +const TEST_MNEMONIC: &str = "test test test test test test test test test test test junk"; + +impl Default for Wallet { + fn default() -> Self { + Wallet::new(TEST_MNEMONIC) } } diff --git a/crates/node-e2e-tests/tests/it/eth.rs b/crates/node-e2e-tests/tests/it/eth.rs index 5a0bae95be319..5686c6e18e5a7 100644 --- a/crates/node-e2e-tests/tests/it/eth.rs +++ b/crates/node-e2e-tests/tests/it/eth.rs @@ -1,117 +1,50 @@ -use futures_util::StreamExt; -use node_e2e_tests::test_suite::TestSuite; +use node_e2e_tests::{node::NodeHelper, wallet::Wallet}; use reth::{ - builder::{NodeBuilder, NodeHandle}, - payload::EthPayloadBuilderAttributes, - providers::{BlockReaderIdExt, CanonStateSubscriptions}, - rpc::{ - api::EngineApiClient, - eth::EthTransactions, - types::engine::{ExecutionPayloadEnvelopeV3, ForkchoiceState, PayloadAttributes}, - }, + self, + args::RpcServerArgs, + builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; -use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; -use reth_primitives::{Address, BlockNumberOrTag, B256}; -use std::time::{SystemTime, UNIX_EPOCH}; +use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; +use std::sync::Arc; #[tokio::test] async fn can_run_eth_node() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let tasks = TaskManager::current(); - let test_suite = TestSuite::new(); + + let exec = TaskManager::current(); + let exec = exec.executor(); + + // Chain spec with test allocs + let genesis: Genesis = serde_json::from_str(include_str!("../../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); // Node setup let node_config = NodeConfig::test() - .with_chain(test_suite.chain_spec()) + .with_chain(chain_spec) + .with_unused_ports() .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); - let NodeHandle { mut node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(tasks.executor()) + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(exec) .node(EthereumNode::default()) .launch() .await?; + let mut node = NodeHelper::new(node).await?; - // setup engine api events and payload service events - let mut notifications = node.provider.canonical_state_stream(); - let payload_events = node.payload_builder.subscribe().await?; - let mut payload_event_stream = payload_events.into_stream(); - - // push tx into pool via RPC server - let eth_api = node.rpc_registry.eth_api(); - let (expected_hash, raw_tx) = test_suite.transfer_tx().await; - - eth_api.send_raw_transaction(raw_tx).await?; - - // trigger new payload building draining the pool - let eth_attr = eth_payload_attributes(); - let payload_id = node.payload_builder.new_payload(eth_attr.clone()).await?; - - // first event is the payload attributes - let first_event = payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::Attributes(attr) = first_event { - assert_eq!(eth_attr.timestamp, attr.timestamp); - } else { - panic!("Expect first event as payload attributes.") - } - - // wait until an actual payload is built before we resolve it via engine api - loop { - let payload = node.payload_builder.best_payload(payload_id).await.unwrap().unwrap(); - if payload.block().body.is_empty() { - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - continue; - } - break; - } - - let client = node.engine_http_client(); - // trigger resolve payload via engine api - let _ = client.get_payload_v3(payload_id).await?; - - // ensure we're also receiving the built payload as event - let second_event = payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::BuiltPayload(payload) = second_event { - // setup payload for submission - let envelope_v3 = ExecutionPayloadEnvelopeV3::from(payload); - let payload_v3 = envelope_v3.execution_payload; - - // submit payload to engine api - let submission = client - .new_payload_v3(payload_v3, vec![], eth_attr.parent_beacon_block_root.unwrap()) - .await?; - assert!(submission.is_valid()); - - // get latest valid hash from blockchain tree - let hash = submission.latest_valid_hash.unwrap(); - - // trigger forkchoice update via engine api to commit the block to the blockchain - let fcu = client - .fork_choice_updated_v2( - ForkchoiceState { - head_block_hash: hash, - safe_block_hash: hash, - finalized_block_hash: hash, - }, - None, - ) - .await?; - assert!(fcu.is_valid()); - - // get head block from notifications stream and verify the tx has been pushed to the pool - // is actually present in the canonical block - let head = notifications.next().await.unwrap(); - let tx = head.tip().transactions().next(); - assert_eq!(tx.unwrap().hash().as_slice(), expected_hash.as_slice()); - - // make sure the block hash we submitted via FCU engine api is the new latest block using an - // RPC call - let latest_block = node.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)?.unwrap(); - assert_eq!(latest_block.hash_slow(), hash); - } else { - panic!("Expect a built payload event."); - } + // Configure wallet from test mnemonic and create dummy transfer tx + let wallet = Wallet::default(); + let raw_tx = wallet.transfer_tx().await; + + // make the node advance + node.advance(raw_tx).await?; Ok(()) } @@ -120,42 +53,37 @@ async fn can_run_eth_node() -> eyre::Result<()> { #[cfg(unix)] async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let tasks = TaskManager::current(); - let test_suite = TestSuite::new(); + let exec = TaskManager::current(); + let exec = exec.executor(); + + // Chain spec with test allocs + let genesis: Genesis = serde_json::from_str(include_str!("../../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); // Node setup let node_config = NodeConfig::test() - .with_chain(test_suite.chain_spec()) + .with_chain(chain_spec) .with_rpc(RpcServerArgs::default().with_unused_ports().with_http().with_auth_ipc()); - let NodeHandle { mut node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(tasks.executor()) + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(exec) .node(EthereumNode::default()) .launch() .await?; + let mut node = NodeHelper::new(node).await?; - // setup engine api events and payload service events - let _notifications = node.provider.canonical_state_stream(); - let payload_events = node.payload_builder.subscribe().await?; - let mut payload_event_stream = payload_events.into_stream(); - - // push tx into pool via RPC server - let eth_api = node.rpc_registry.eth_api(); - let (_expected_hash, raw_tx) = test_suite.transfer_tx().await; + // Configure wallet from test mnemonic and create dummy transfer tx + let wallet = Wallet::default(); + let raw_tx = wallet.transfer_tx().await; - eth_api.send_raw_transaction(raw_tx).await?; - - // trigger new payload building draining the pool - let eth_attr = eth_payload_attributes(); - let _payload_id = node.payload_builder.new_payload(eth_attr.clone()).await?; - - // first event is the payload attributes - let first_event = payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::Attributes(attr) = first_event { - assert_eq!(eth_attr.timestamp, attr.timestamp); - } else { - panic!("Expect first event as payload attributes.") - } + // make the node advance + node.advance(raw_tx).await?; Ok(()) } @@ -164,32 +92,32 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { #[cfg(unix)] async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let tasks = TaskManager::current(); - let test_suite = TestSuite::new(); + let exec = TaskManager::current(); + let exec = exec.executor(); + + // Chain spec with test allocs + let genesis: Genesis = serde_json::from_str(include_str!("../../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); // Node setup - let node_config = NodeConfig::test().with_chain(test_suite.chain_spec()); - + let node_config = NodeConfig::test().with_chain(chain_spec); let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(tasks.executor()) + .testing_node(exec) .node(EthereumNode::default()) .launch() .await?; - let client = node.engine_ipc_client().await; + let node = NodeHelper::new(node).await?; + + // Ensure that the engine api client is not available + let client = node.inner.engine_ipc_client().await; assert!(client.is_none(), "ipc auth should be disabled by default"); Ok(()) } -fn eth_payload_attributes() -> EthPayloadBuilderAttributes { - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); - - let attributes = PayloadAttributes { - timestamp, - prev_randao: B256::ZERO, - suggested_fee_recipient: Address::ZERO, - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - }; - EthPayloadBuilderAttributes::new(B256::ZERO, attributes) -} diff --git a/crates/node-e2e-tests/tests/it/main.rs b/crates/node-e2e-tests/tests/it/main.rs index 9152307772f8c..ba13034645c2c 100644 --- a/crates/node-e2e-tests/tests/it/main.rs +++ b/crates/node-e2e-tests/tests/it/main.rs @@ -1,4 +1,5 @@ mod dev; mod eth; +mod p2p; fn main() {} diff --git a/crates/node-e2e-tests/tests/it/p2p.rs b/crates/node-e2e-tests/tests/it/p2p.rs new file mode 100644 index 0000000000000..d0a2716f59658 --- /dev/null +++ b/crates/node-e2e-tests/tests/it/p2p.rs @@ -0,0 +1,76 @@ +use std::sync::Arc; + +use node_e2e_tests::{node::NodeHelper, wallet::Wallet}; +use reth::{ + args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, + builder::{NodeBuilder, NodeConfig, NodeHandle}, + tasks::TaskManager, +}; +use reth_node_ethereum::EthereumNode; +use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; + +#[tokio::test] +async fn can_sync() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let genesis: Genesis = serde_json::from_str(include_str!("../../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_network(network_config) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut first_node = NodeHelper::new(node.clone()).await?; + + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(exec) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut second_node = NodeHelper::new(node).await?; + + let wallet = Wallet::default(); + let raw_tx = wallet.transfer_tx().await; + + // Make them peer + first_node.network.add_peer(second_node.network.record()).await; + second_node.network.add_peer(first_node.network.record()).await; + + // Make sure they establish a new session + first_node.network.expect_session().await; + second_node.network.expect_session().await; + + // Make the first node advance + let (block_hash, tx_hash) = first_node.advance(raw_tx.clone()).await?; + + // only send forkchoice update to second node + second_node.engine_api.update_forkchoice(block_hash).await?; + + // expect second node advanced via p2p gossip + second_node.assert_new_block(tx_hash, block_hash).await?; + + Ok(()) +} diff --git a/crates/payload/builder/src/events.rs b/crates/payload/builder/src/events.rs index 66f299bbb31f4..4df81030fca89 100644 --- a/crates/payload/builder/src/events.rs +++ b/crates/payload/builder/src/events.rs @@ -1,4 +1,3 @@ -use futures_util::Stream; use reth_engine_primitives::EngineTypes; use tokio::sync::broadcast; use tokio_stream::{ @@ -26,9 +25,7 @@ pub struct PayloadEvents { impl PayloadEvents { // Convert this receiver into a stream of PayloadEvents. - pub fn into_stream( - self, - ) -> impl Stream, BroadcastStreamRecvError>> { + pub fn into_stream(self) -> BroadcastStream> { BroadcastStream::new(self.receiver) } /// Asynchronously receives the next payload event. From cd5be8e08455d4ae52e4d8e94194f291f0ee671c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 16 Apr 2024 13:21:50 +0200 Subject: [PATCH 177/700] chore: extract evm processor batch handling (#7671) --- crates/revm/src/batch.rs | 156 ++++++++++++++++++++++++++ crates/revm/src/lib.rs | 2 + crates/revm/src/optimism/processor.rs | 18 ++- crates/revm/src/processor.rs | 137 +++++----------------- 4 files changed, 192 insertions(+), 121 deletions(-) create mode 100644 crates/revm/src/batch.rs diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs new file mode 100644 index 0000000000000..14538f57d0e7b --- /dev/null +++ b/crates/revm/src/batch.rs @@ -0,0 +1,156 @@ +//! Helper for handling execution of multiple blocks. + +use crate::{precompile::Address, primitives::alloy_primitives::BlockNumber}; +use reth_interfaces::executor::BlockExecutionError; +use reth_primitives::{ + PruneMode, PruneModes, PruneSegmentError, Receipt, Receipts, MINIMUM_PRUNING_DISTANCE, +}; +use revm::db::states::bundle_state::BundleRetention; + +/// Takes care of: +/// - recording receipts during execution of multiple blocks. +/// - pruning receipts according to the pruning configuration. +/// - batch range if known +#[derive(Debug, Default)] +pub struct BlockBatchRecord { + /// Pruning configuration. + prune_modes: PruneModes, + /// The collection of receipts. + /// Outer vector stores receipts for each block sequentially. + /// The inner vector stores receipts ordered by transaction number. + /// + /// If receipt is None it means it is pruned. + receipts: Receipts, + /// Memoized address pruning filter. + /// Empty implies that there is going to be addresses to include in the filter in a future + /// block. None means there isn't any kind of configuration. + pruning_address_filter: Option<(u64, Vec

)>, + /// First block will be initialized to `None` + /// and be set to the block number of first block executed. + first_block: Option, + /// The maximum known block. + tip: Option, +} + +impl BlockBatchRecord { + /// Create a new receipts recorder with the given pruning configuration. + pub fn new(prune_modes: PruneModes) -> Self { + Self { prune_modes, ..Default::default() } + } + + /// Set prune modes. + pub fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.prune_modes = prune_modes; + } + + /// Set the first block number of the batch. + pub fn set_first_block(&mut self, first_block: BlockNumber) { + self.first_block = Some(first_block); + } + + /// Returns the first block of the batch if known. + pub const fn first_block(&self) -> Option { + self.first_block + } + + /// Set tip - highest known block number. + pub fn set_tip(&mut self, tip: BlockNumber) { + self.tip = Some(tip); + } + + /// Returns the tip of the batch if known. + pub const fn tip(&self) -> Option { + self.tip + } + + /// Returns the recorded receipts. + pub fn receipts(&self) -> &Receipts { + &self.receipts + } + + /// Returns all recorded receipts. + pub fn take_receipts(&mut self) -> Receipts { + std::mem::take(&mut self.receipts) + } + + /// Returns the [BundleRetention] for the given block based on the configured prune modes. + pub fn bundle_retention(&self, block_number: BlockNumber) -> BundleRetention { + if self.tip.map_or(true, |tip| { + !self + .prune_modes + .account_history + .map_or(false, |mode| mode.should_prune(block_number, tip)) && + !self + .prune_modes + .storage_history + .map_or(false, |mode| mode.should_prune(block_number, tip)) + }) { + BundleRetention::Reverts + } else { + BundleRetention::PlainState + } + } + + /// Save receipts to the executor. + pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { + let mut receipts = receipts.into_iter().map(Some).collect(); + // Prune receipts if necessary. + self.prune_receipts(&mut receipts)?; + // Save receipts. + self.receipts.push(receipts); + Ok(()) + } + + /// Prune receipts according to the pruning configuration. + fn prune_receipts( + &mut self, + receipts: &mut Vec>, + ) -> Result<(), PruneSegmentError> { + let (first_block, tip) = match self.first_block.zip(self.tip) { + Some((block, tip)) => (block, tip), + _ => return Ok(()), + }; + + let block_number = first_block + self.receipts.len() as u64; + + // Block receipts should not be retained + if self.prune_modes.receipts == Some(PruneMode::Full) || + // [`PruneSegment::Receipts`] takes priority over [`PruneSegment::ContractLogs`] + self.prune_modes.receipts.map_or(false, |mode| mode.should_prune(block_number, tip)) + { + receipts.clear(); + return Ok(()) + } + + // All receipts from the last 128 blocks are required for blockchain tree, even with + // [`PruneSegment::ContractLogs`]. + let prunable_receipts = + PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(block_number, tip); + if !prunable_receipts { + return Ok(()) + } + + let contract_log_pruner = self.prune_modes.receipts_log_filter.group_by_block(tip, None)?; + + if !contract_log_pruner.is_empty() { + let (prev_block, filter) = self.pruning_address_filter.get_or_insert((0, Vec::new())); + for (_, addresses) in contract_log_pruner.range(*prev_block..=block_number) { + filter.extend(addresses.iter().copied()); + } + } + + for receipt in receipts.iter_mut() { + let inner_receipt = receipt.as_ref().expect("receipts have not been pruned"); + + // If there is an address_filter, and it does not contain any of the + // contract addresses, then remove this receipts + if let Some((_, filter)) = &self.pruning_address_filter { + if !inner_receipt.logs.iter().any(|log| filter.contains(&log.address)) { + receipt.take(); + } + } + } + + Ok(()) + } +} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index a4ad940284111..05f60cfba8eaa 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -14,6 +14,8 @@ pub mod database; /// revm implementation of reth block and transaction executors. mod factory; +pub mod batch; + /// new revm account state executor pub mod processor; diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs index ef421d46a2d17..78940c8b50667 100644 --- a/crates/revm/src/optimism/processor.rs +++ b/crates/revm/src/optimism/processor.rs @@ -1,9 +1,8 @@ use crate::processor::{compare_receipts_root_and_logs_bloom, EVMProcessor}; +use reth_evm::ConfigureEvm; use reth_interfaces::executor::{ BlockExecutionError, BlockValidationError, OptimismBlockExecutionError, }; - -use reth_evm::ConfigureEvm; use reth_primitives::{ proofs::calculate_receipt_root_optimism, revm_primitives::ResultAndState, BlockWithSenders, Bloom, ChainSpec, Hardfork, Receipt, ReceiptWithBloom, TxType, B256, U256, @@ -72,7 +71,7 @@ where self.stats.receipt_root_duration += time.elapsed(); } - self.save_receipts(receipts) + self.batch_record.save_receipts(receipts) } fn execute_transactions( @@ -186,11 +185,10 @@ where } fn take_output_state(&mut self) -> BundleStateWithReceipts { - let receipts = std::mem::take(&mut self.receipts); BundleStateWithReceipts::new( self.evm.context.evm.db.take_bundle(), - receipts, - self.first_block.unwrap_or_default(), + self.batch_record.take_receipts(), + self.batch_record.first_block().unwrap_or_default(), ) } @@ -314,8 +312,8 @@ mod tests { ) .unwrap(); - let tx_receipt = executor.receipts[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts[0][1].as_ref().unwrap(); + let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); + let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); // deposit_receipt_version is not present in pre canyon transactions assert!(deposit_receipt.deposit_receipt_version.is_none()); @@ -388,8 +386,8 @@ mod tests { ) .expect("Executing a block while canyon is active should not fail"); - let tx_receipt = executor.receipts[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts[0][1].as_ref().unwrap(); + let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); + let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); // deposit_receipt_version is set to 1 for post canyon deposit transactions assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index 38bacd7f1d728..51ebfa66a9fa1 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -1,7 +1,7 @@ #[cfg(not(feature = "optimism"))] use revm::DatabaseCommit; use revm::{ - db::{states::bundle_state::BundleRetention, StateDBBox}, + db::StateDBBox, inspector_handle_register, interpreter::Host, primitives::{CfgEnvWithHandlerCfg, ResultAndState}, @@ -19,8 +19,7 @@ use reth_primitives::revm::env::fill_op_tx_env; use reth_primitives::revm::env::fill_tx_env; use reth_primitives::{ Address, Block, BlockNumber, BlockWithSenders, Bloom, ChainSpec, GotExpected, Hardfork, Header, - PruneMode, PruneModes, PruneSegmentError, Receipt, ReceiptWithBloom, Receipts, - TransactionSigned, Withdrawals, B256, MINIMUM_PRUNING_DISTANCE, U256, + PruneModes, Receipt, ReceiptWithBloom, Receipts, TransactionSigned, Withdrawals, B256, U256, }; #[cfg(not(feature = "optimism"))] use reth_provider::BundleStateWithReceipts; @@ -29,6 +28,7 @@ use reth_provider::{ }; use crate::{ + batch::BlockBatchRecord, database::StateProviderDatabase, eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, stack::{InspectorStack, InspectorStackConfig}, @@ -56,23 +56,8 @@ pub struct EVMProcessor<'a, EvmConfig> { pub(crate) chain_spec: Arc, /// revm instance that contains database and env environment. pub(crate) evm: Evm<'a, InspectorStack, StateDBBox<'a, ProviderError>>, - /// The collection of receipts. - /// Outer vector stores receipts for each block sequentially. - /// The inner vector stores receipts ordered by transaction number. - /// - /// If receipt is None it means it is pruned. - pub(crate) receipts: Receipts, - /// First block will be initialized to `None` - /// and be set to the block number of first block executed. - pub(crate) first_block: Option, - /// The maximum known block. - tip: Option, - /// Pruning configuration. - prune_modes: PruneModes, - /// Memoized address pruning filter. - /// Empty implies that there is going to be addresses to include in the filter in a future - /// block. None means there isn't any kind of configuration. - pruning_address_filter: Option<(u64, Vec
)>, + /// Keeps track of the recorded receipts and pruning configuration. + pub(crate) batch_record: BlockBatchRecord, /// Execution stats pub(crate) stats: BlockExecutorStats, /// The type that is able to configure the EVM environment. @@ -113,11 +98,7 @@ where EVMProcessor { chain_spec, evm, - receipts: Receipts::new(), - first_block: None, - tip: None, - prune_modes: PruneModes::none(), - pruning_address_filter: None, + batch_record: BlockBatchRecord::default(), stats: BlockExecutorStats::default(), _evm_config: evm_config, } @@ -130,7 +111,17 @@ where /// Configure the executor with the given block. pub fn set_first_block(&mut self, num: BlockNumber) { - self.first_block = Some(num); + self.batch_record.set_first_block(num); + } + + /// Saves the receipts to the batch record. + pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { + self.batch_record.save_receipts(receipts) + } + + /// Returns the recorded receipts. + pub fn receipts(&self) -> &Receipts { + self.batch_record.receipts() } /// Returns a reference to the database @@ -288,92 +279,16 @@ where self.stats.apply_post_execution_state_changes_duration += time.elapsed(); let time = Instant::now(); - let retention = if self.tip.map_or(true, |tip| { - !self - .prune_modes - .account_history - .map_or(false, |mode| mode.should_prune(block.number, tip)) && - !self - .prune_modes - .storage_history - .map_or(false, |mode| mode.should_prune(block.number, tip)) - }) { - BundleRetention::Reverts - } else { - BundleRetention::PlainState - }; + let retention = self.batch_record.bundle_retention(block.number); self.db_mut().merge_transitions(retention); self.stats.merge_transitions_duration += time.elapsed(); - if self.first_block.is_none() { - self.first_block = Some(block.number); + if self.batch_record.first_block().is_none() { + self.batch_record.set_first_block(block.number); } Ok(receipts) } - - /// Save receipts to the executor. - pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { - let mut receipts = receipts.into_iter().map(Option::Some).collect(); - // Prune receipts if necessary. - self.prune_receipts(&mut receipts)?; - // Save receipts. - self.receipts.push(receipts); - Ok(()) - } - - /// Prune receipts according to the pruning configuration. - fn prune_receipts( - &mut self, - receipts: &mut Vec>, - ) -> Result<(), PruneSegmentError> { - let (first_block, tip) = match self.first_block.zip(self.tip) { - Some((block, tip)) => (block, tip), - _ => return Ok(()), - }; - - let block_number = first_block + self.receipts.len() as u64; - - // Block receipts should not be retained - if self.prune_modes.receipts == Some(PruneMode::Full) || - // [`PruneSegment::Receipts`] takes priority over [`PruneSegment::ContractLogs`] - self.prune_modes.receipts.map_or(false, |mode| mode.should_prune(block_number, tip)) - { - receipts.clear(); - return Ok(()) - } - - // All receipts from the last 128 blocks are required for blockchain tree, even with - // [`PruneSegment::ContractLogs`]. - let prunable_receipts = - PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(block_number, tip); - if !prunable_receipts { - return Ok(()) - } - - let contract_log_pruner = self.prune_modes.receipts_log_filter.group_by_block(tip, None)?; - - if !contract_log_pruner.is_empty() { - let (prev_block, filter) = self.pruning_address_filter.get_or_insert((0, Vec::new())); - for (_, addresses) in contract_log_pruner.range(*prev_block..=block_number) { - filter.extend(addresses.iter().copied()); - } - } - - for receipt in receipts.iter_mut() { - let inner_receipt = receipt.as_ref().expect("receipts have not been pruned"); - - // If there is an address_filter, and it does not contain any of the - // contract addresses, then remove this receipts - if let Some((_, filter)) = &self.pruning_address_filter { - if !inner_receipt.logs.iter().any(|log| filter.contains(&log.address)) { - receipt.take(); - } - } - } - - Ok(()) - } } /// Default Ethereum implementation of the [BlockExecutor] trait for the [EVMProcessor]. @@ -407,7 +322,8 @@ where self.stats.receipt_root_duration += time.elapsed(); } - self.save_receipts(receipts) + self.batch_record.save_receipts(receipts)?; + Ok(()) } fn execute_transactions( @@ -470,11 +386,10 @@ where fn take_output_state(&mut self) -> BundleStateWithReceipts { self.stats.log_debug(); - let receipts = std::mem::take(&mut self.receipts); BundleStateWithReceipts::new( self.evm.context.evm.db.take_bundle(), - receipts, - self.first_block.unwrap_or_default(), + self.batch_record.take_receipts(), + self.batch_record.first_block().unwrap_or_default(), ) } @@ -488,11 +403,11 @@ where EvmConfig: ConfigureEvm, { fn set_tip(&mut self, tip: BlockNumber) { - self.tip = Some(tip); + self.batch_record.set_tip(tip); } fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.prune_modes = prune_modes; + self.batch_record.set_prune_modes(prune_modes); } } From 0aae8c4210a40a7503826d0b3ae745198c531adc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 16 Apr 2024 13:59:00 +0200 Subject: [PATCH 178/700] chore: move BlockExecutorStats (#7672) --- crates/revm/src/batch.rs | 33 +++++++++++++++++++ crates/revm/src/processor.rs | 6 ++-- .../storage/provider/src/traits/executor.rs | 33 ------------------- crates/storage/provider/src/traits/mod.rs | 2 +- 4 files changed, 36 insertions(+), 38 deletions(-) diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 14538f57d0e7b..544a74a5c0968 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -6,6 +6,8 @@ use reth_primitives::{ PruneMode, PruneModes, PruneSegmentError, Receipt, Receipts, MINIMUM_PRUNING_DISTANCE, }; use revm::db::states::bundle_state::BundleRetention; +use std::time::Duration; +use tracing::debug; /// Takes care of: /// - recording receipts during execution of multiple blocks. @@ -154,3 +156,34 @@ impl BlockBatchRecord { Ok(()) } } + +/// Block execution statistics. Contains duration of each step of block execution. +#[derive(Clone, Debug, Default)] +pub struct BlockExecutorStats { + /// Execution duration. + pub execution_duration: Duration, + /// Time needed to apply output of revm execution to revm cached state. + pub apply_state_duration: Duration, + /// Time needed to apply post execution state changes. + pub apply_post_execution_state_changes_duration: Duration, + /// Time needed to merge transitions and create reverts. + /// It this time transitions are applies to revm bundle state. + pub merge_transitions_duration: Duration, + /// Time needed to calculate receipt roots. + pub receipt_root_duration: Duration, +} + +impl BlockExecutorStats { + /// Log duration to debug level log. + pub fn log_debug(&self) { + debug!( + target: "evm", + evm_transact = ?self.execution_duration, + apply_state = ?self.apply_state_duration, + apply_post_state = ?self.apply_post_execution_state_changes_duration, + merge_transitions = ?self.merge_transitions_duration, + receipt_root = ?self.receipt_root_duration, + "Execution time" + ); + } +} diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index 51ebfa66a9fa1..fbed5eae072b8 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -23,12 +23,10 @@ use reth_primitives::{ }; #[cfg(not(feature = "optimism"))] use reth_provider::BundleStateWithReceipts; -use reth_provider::{ - BlockExecutor, BlockExecutorStats, ProviderError, PrunableBlockExecutor, StateProvider, -}; +use reth_provider::{BlockExecutor, ProviderError, PrunableBlockExecutor, StateProvider}; use crate::{ - batch::BlockBatchRecord, + batch::{BlockBatchRecord, BlockExecutorStats}, database::StateProviderDatabase, eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, stack::{InspectorStack, InspectorStackConfig}, diff --git a/crates/storage/provider/src/traits/executor.rs b/crates/storage/provider/src/traits/executor.rs index d36913da4d57c..bddfeb03eaf76 100644 --- a/crates/storage/provider/src/traits/executor.rs +++ b/crates/storage/provider/src/traits/executor.rs @@ -3,8 +3,6 @@ use crate::{bundle_state::BundleStateWithReceipts, StateProvider}; use reth_interfaces::executor::BlockExecutionError; use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, U256}; -use std::time::Duration; -use tracing::debug; /// Executor factory that would create the EVM with particular state provider. /// @@ -70,34 +68,3 @@ pub trait PrunableBlockExecutor: BlockExecutor { /// Set prune modes. fn set_prune_modes(&mut self, prune_modes: PruneModes); } - -/// Block execution statistics. Contains duration of each step of block execution. -#[derive(Clone, Debug, Default)] -pub struct BlockExecutorStats { - /// Execution duration. - pub execution_duration: Duration, - /// Time needed to apply output of revm execution to revm cached state. - pub apply_state_duration: Duration, - /// Time needed to apply post execution state changes. - pub apply_post_execution_state_changes_duration: Duration, - /// Time needed to merge transitions and create reverts. - /// It this time transitions are applies to revm bundle state. - pub merge_transitions_duration: Duration, - /// Time needed to calculate receipt roots. - pub receipt_root_duration: Duration, -} - -impl BlockExecutorStats { - /// Log duration to debug level log. - pub fn log_debug(&self) { - debug!( - target: "evm", - evm_transact = ?self.execution_duration, - apply_state = ?self.apply_state_duration, - apply_post_state = ?self.apply_post_execution_state_changes_duration, - merge_transitions = ?self.merge_transitions_duration, - receipt_root = ?self.receipt_root_duration, - "Execution time" - ); - } -} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 7b64d09d556b2..c9623cb0c21a8 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -49,7 +49,7 @@ mod withdrawals; pub use withdrawals::WithdrawalsProvider; mod executor; -pub use executor::{BlockExecutor, BlockExecutorStats, ExecutorFactory, PrunableBlockExecutor}; +pub use executor::{BlockExecutor, ExecutorFactory, PrunableBlockExecutor}; mod chain; pub use chain::{ From 5c2542778d433031857e3aaceb30418d4e1f08cd Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 16 Apr 2024 14:14:30 +0200 Subject: [PATCH 179/700] feat(discv5): add enr to tracing (#7664) --- crates/net/discv5/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index cee4eaf7c26f4..7618c511a3dc5 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -402,6 +402,7 @@ impl Discv5 { Err(err) => { trace!(target: "net::discovery::discv5", %err, + ?enr, "discovered peer is unreachable" ); From f8fb334e353c02e8fdd84e4d975025bcb8192e26 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 16 Apr 2024 10:01:19 -0400 Subject: [PATCH 180/700] chore: include block num and hash in bctree debug logs (#7665) --- crates/blockchain-tree/src/blockchain_tree.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index d66258d1f44a4..a278263e8cd8a 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -430,8 +430,8 @@ where chain_id: BlockChainId, block_validation_kind: BlockValidationKind, ) -> Result { - debug!(target: "blockchain_tree", "Inserting block into side chain"); let block_num_hash = block.num_hash(); + debug!(target: "blockchain_tree", ?block_num_hash, ?chain_id, "Inserting block into side chain"); // Create a new sidechain by forking the given chain, or append the block if the parent // block is the top of the given chain. let block_hashes = self.all_chain_hashes(chain_id); @@ -458,9 +458,9 @@ where BlockAttachment::HistoricalFork }; - debug!(target: "blockchain_tree", "Appending block to side chain"); let block_hash = block.hash(); let block_number = block.number; + debug!(target: "blockchain_tree", ?block_hash, ?block_number, "Appending block to side chain"); parent_chain.append_block( block, block_hashes, From 90b07427b932a80bba78519789634124db33c4e7 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 16 Apr 2024 16:10:40 +0200 Subject: [PATCH 181/700] chore(tree): `CanonicalError::is_block_hash_not_found` (#7675) --- crates/consensus/beacon/src/engine/mod.rs | 14 ++------------ crates/interfaces/src/blockchain_tree/error.rs | 9 +++++++++ 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f0a2de8677ae6..e24c63312e477 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1434,12 +1434,7 @@ where if let Err((hash, error)) = self.try_make_sync_target_canonical(downloaded_num_hash) { - if !matches!( - error, - CanonicalError::BlockchainTree( - BlockchainTreeError::BlockHashNotFoundInChain { .. } - ) - ) { + if !error.is_block_hash_not_found() { if error.is_fatal() { error!(target: "consensus::engine", %error, "Encountered fatal error while making sync target canonical: {:?}, {:?}", error, hash); } else { @@ -1572,12 +1567,7 @@ where // if we failed to make the FCU's head canonical, because we don't have that // block yet, then we can try to make the inserted block canonical if we know // it's part of the canonical chain: if it's the safe or the finalized block - if matches!( - err, - CanonicalError::BlockchainTree( - BlockchainTreeError::BlockHashNotFoundInChain { .. } - ) - ) { + if err.is_block_hash_not_found() { // if the inserted block is the currently targeted `finalized` or `safe` // block, we will attempt to make them canonical, // because they are also part of the canonical chain and diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index 4f4261ab6ca8e..e08211a4fe603 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -74,6 +74,15 @@ impl CanonicalError { pub fn is_fatal(&self) -> bool { matches!(self, Self::CanonicalCommit(_) | Self::CanonicalRevert(_)) } + + /// Returns `true` if the underlying error matches + /// [BlockchainTreeError::BlockHashNotFoundInChain]. + pub fn is_block_hash_not_found(&self) -> bool { + matches!( + self, + CanonicalError::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. }) + ) + } } /// Error thrown when inserting a block failed because the block is considered invalid. From 49ceeaf0cc060c500ce7f887e6043ffba6090eb8 Mon Sep 17 00:00:00 2001 From: jn Date: Tue, 16 Apr 2024 07:36:06 -0700 Subject: [PATCH 182/700] refactor: Drop reth_primitives::Log (#7651) Co-authored-by: Matthias Seitz --- .../interfaces/src/test_utils/generators.rs | 10 +- crates/net/eth-wire-types/src/receipts.rs | 22 ++-- crates/primitives/src/log.rs | 84 +++++++------- crates/primitives/src/proofs.rs | 104 ++++++++++-------- crates/primitives/src/receipt.rs | 59 +++++----- crates/primitives/src/revm/compat.rs | 12 +- crates/rpc/rpc-types-compat/src/log.rs | 8 +- crates/rpc/rpc/src/eth/api/transactions.rs | 2 +- crates/rpc/rpc/src/eth/logs_utils.rs | 6 +- .../storage/provider/src/test_utils/blocks.rs | 57 +++++----- examples/db-access.rs | 2 +- examples/exex/op-bridge/src/main.rs | 2 +- 12 files changed, 183 insertions(+), 185 deletions(-) diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index fc201758effc2..b3cd847b248af 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -374,11 +374,11 @@ pub fn random_receipt( pub fn random_log(rng: &mut R, address: Option
, topics_count: Option) -> Log { let data_byte_count = rng.gen::() as usize; let topics_count = topics_count.unwrap_or_else(|| rng.gen()) as usize; - Log { - address: address.unwrap_or_else(|| rng.gen()), - topics: std::iter::repeat_with(|| rng.gen()).take(topics_count).collect(), - data: std::iter::repeat_with(|| rng.gen()).take(data_byte_count).collect::>().into(), - } + Log::new_unchecked( + address.unwrap_or_else(|| rng.gen()), + std::iter::repeat_with(|| rng.gen()).take(topics_count).collect(), + std::iter::repeat_with(|| rng.gen()).take(data_byte_count).collect::>().into(), + ) } #[cfg(test)] diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index 3f260de0ef1fe..87a0e10deac54 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -91,7 +91,7 @@ mod tests { message: GetReceipts(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(), - ]) + ]), } ); } @@ -109,14 +109,14 @@ mod tests { tx_type: TxType::Legacy, cumulative_gas_used: 0x1u64, logs: vec![ - Log { - address: hex!("0000000000000000000000000000000000000011").into(), - topics: vec![ + Log::new_unchecked( + hex!("0000000000000000000000000000000000000011").into(), + vec![ hex!("000000000000000000000000000000000000000000000000000000000000dead").into(), hex!("000000000000000000000000000000000000000000000000000000000000beef").into(), ], - data: hex!("0100ff")[..].into(), - }, + hex!("0100ff")[..].into(), + ), ], success: false, #[cfg(feature = "optimism")] @@ -148,14 +148,14 @@ mod tests { tx_type: TxType::Legacy, cumulative_gas_used: 0x1u64, logs: vec![ - Log { - address: hex!("0000000000000000000000000000000000000011").into(), - topics: vec![ + Log::new_unchecked( + hex!("0000000000000000000000000000000000000011").into(), + vec![ hex!("000000000000000000000000000000000000000000000000000000000000dead").into(), hex!("000000000000000000000000000000000000000000000000000000000000beef").into(), ], - data: hex!("0100ff")[..].into(), - }, + hex!("0100ff")[..].into(), + ), ], success: false, #[cfg(feature = "optimism")] diff --git a/crates/primitives/src/log.rs b/crates/primitives/src/log.rs index 8e7ae5fbf0a7a..79227d4f9bd8e 100644 --- a/crates/primitives/src/log.rs +++ b/crates/primitives/src/log.rs @@ -1,41 +1,7 @@ -use crate::{Address, Bloom, Bytes, B256}; -use alloy_primitives::Log as AlloyLog; -use alloy_rlp::{RlpDecodable, RlpEncodable}; -use reth_codecs::{main_codec, Compact}; +use crate::Bloom; -/// Ethereum Log -#[main_codec(rlp)] -#[derive(Clone, Debug, PartialEq, Eq, RlpDecodable, RlpEncodable, Default)] -pub struct Log { - /// Contract that emitted this log. - pub address: Address, - /// Topics of the log. The number of logs depend on what `LOG` opcode is used. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=5)" - ) - )] - pub topics: Vec, - /// Arbitrary length data. - pub data: Bytes, -} - -impl From for Log { - fn from(mut log: AlloyLog) -> Self { - Self { - address: log.address, - topics: std::mem::take(log.data.topics_mut_unchecked()), - data: log.data.data, - } - } -} - -impl From for AlloyLog { - fn from(log: Log) -> AlloyLog { - AlloyLog::new_unchecked(log.address, log.topics, log.data) - } -} +/// Re-export `Log` from `alloy_primitives`. +pub use alloy_primitives::Log; /// Calculate receipt logs bloom. pub fn logs_bloom<'a, It>(logs: It) -> Bloom @@ -45,7 +11,7 @@ where let mut bloom = Bloom::ZERO; for log in logs { bloom.m3_2048(log.address.as_slice()); - for topic in &log.topics { + for topic in log.topics() { bloom.m3_2048(topic.as_slice()); } } @@ -54,8 +20,45 @@ where #[cfg(test)] mod tests { - use super::*; + use alloy_primitives::{Address, Bytes, Log as AlloyLog, B256}; + use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; + use reth_codecs::{main_codec, Compact}; + + /// This type is kept for compatibility tests after the codec support was added to + /// alloy-primitives Log type natively + #[main_codec(rlp)] + #[derive(Clone, Debug, PartialEq, Eq, RlpDecodable, RlpEncodable, Default)] + struct Log { + /// Contract that emitted this log. + address: Address, + /// Topics of the log. The number of logs depend on what `LOG` opcode is used. + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=5)" + ) + )] + topics: Vec, + /// Arbitrary length data. + data: Bytes, + } + + impl From for Log { + fn from(mut log: AlloyLog) -> Self { + Self { + address: log.address, + topics: std::mem::take(log.data.topics_mut_unchecked()), + data: log.data.data, + } + } + } + + impl From for AlloyLog { + fn from(log: Log) -> AlloyLog { + AlloyLog::new_unchecked(log.address, log.topics, log.data) + } + } proptest! { #[test] @@ -70,7 +73,8 @@ mod tests { // Create alloy_log from log and then convert it to buffer and compare compacted_alloy_log and compacted_log let alloy_log = AlloyLog::new_unchecked(log.address, log.topics, log.data); let mut compacted_alloy_log = Vec::::new(); - let _len = alloy_log.to_compact(&mut compacted_alloy_log); + let alloy_len = alloy_log.to_compact(&mut compacted_alloy_log); + assert_eq!(len, alloy_len); assert_eq!(compacted_log, compacted_alloy_log); } } diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index b99f2689ce40d..d08fc10a63c86 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -264,7 +264,7 @@ mod tests { bloom, constants::EMPTY_ROOT_HASH, hex_literal::hex, Block, GenesisAccount, Log, TxType, GOERLI, HOLESKY, MAINNET, SEPOLIA, }; - use alloy_primitives::b256; + use alloy_primitives::{b256, LogData}; use alloy_rlp::Decodable; use std::collections::HashMap; @@ -338,32 +338,36 @@ mod tests { logs: vec![ Log { address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - topics: vec![ - b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - b256!("0000000000000000000000000000000000000000000000000000000000000000"), - ], - data: Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001")), + data: LogData::new_unchecked( + vec![ + b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + b256!("0000000000000000000000000000000000000000000000000000000000000000"), + ], + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001")) + ) }, Log { address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - topics: vec![ - b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - b256!("0000000000000000000000000000000000000000000000000000000000000000"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - ], - data: Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001")), + data: LogData::new_unchecked( + vec![ + b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + b256!("0000000000000000000000000000000000000000000000000000000000000000"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + ], + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001")) + ) }, Log { address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - topics: vec![ + data: LogData::new_unchecked( + vec![ b256!("0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"), b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - ], - data: Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003")), + ], Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003"))) }, ], #[cfg(feature = "optimism")] @@ -382,32 +386,32 @@ mod tests { logs: vec![ Log { address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - topics: vec![ + data: LogData::new_unchecked(vec![ b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), b256!("0000000000000000000000000000000000000000000000000000000000000000"), ], - data: Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001")), + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001"))) }, Log { address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - topics: vec![ + data: LogData::new_unchecked(vec![ b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), b256!("0000000000000000000000000000000000000000000000000000000000000000"), b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), ], - data: Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001")), + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001"))) }, Log { address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - topics: vec![ + data: LogData::new_unchecked(vec![ b256!("0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"), b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), ], - data: Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003")), + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003"))) }, ], #[cfg(feature = "optimism")] @@ -426,62 +430,62 @@ mod tests { logs: vec![ Log { address: hex!("4200000000000000000000000000000000000006").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), b256!("000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"), b256!("0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"), ], - data: Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d8000")), + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d8000"))) }, Log { address: hex!("cf8e7e6b26f407dee615fc4db18bf829e7aa8c09").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), b256!("0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"), b256!("0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"), ], - data: Bytes::from_static(&hex!("000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2")), + Bytes::from_static(&hex!("000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2"))) }, Log { address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"), ], - data: Bytes::from_static(&hex!("000000000000000000000000000000000000000000000009bd50642785c15736000000000000000000000000000000000000000000011bb7ac324f724a29bbbf")), + Bytes::from_static(&hex!("000000000000000000000000000000000000000000000009bd50642785c15736000000000000000000000000000000000000000000011bb7ac324f724a29bbbf"))) }, Log { address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"), b256!("00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"), b256!("0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"), ], - data: Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2")), + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2"))) }, Log { address: hex!("6d0f8d488b669aa9ba2d0f0b7b75a88bf5051cd3").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), b256!("0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"), b256!("000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"), ], - data: Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000014bc73062aea8093")), + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000014bc73062aea8093"))) }, Log { address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"), ], - data: Bytes::from_static(&hex!("00000000000000000000000000000000000000000000002f122cfadc1ca82a35000000000000000000000000000000000000000000000665879dc0609945d6d1")), + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000002f122cfadc1ca82a35000000000000000000000000000000000000000000000665879dc0609945d6d1"))) }, Log { address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"), b256!("00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"), b256!("000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"), ], - data: Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e200000000000000000000000000000000000000000000000014bc73062aea80930000000000000000000000000000000000000000000000000000000000000000")), + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e200000000000000000000000000000000000000000000000014bc73062aea80930000000000000000000000000000000000000000000000000000000000000000"))) }, ], #[cfg(feature = "optimism")] @@ -500,32 +504,32 @@ mod tests { logs: vec![ Log { address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), b256!("0000000000000000000000000000000000000000000000000000000000000000"), b256!("000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"), b256!("000000000000000000000000000000000000000000000000000000000011a1d3"), ], - data: Default::default(), + Default::default()) }, Log { address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("9d89e36eadf856db0ad9ffb5a569e07f95634dddd9501141ecf04820484ad0dc"), b256!("000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"), b256!("000000000000000000000000000000000000000000000000000000000011a1d3"), ], - data: Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000")), + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000"))) }, - Log { + Log { address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(), - topics: vec![ + data: LogData::new_unchecked( vec![ b256!("110d160a1bedeea919a88fbc4b2a9fb61b7e664084391b6ca2740db66fef80fe"), b256!("00000000000000000000000084d47f6eea8f8d87910448325519d1bb45c2972a"), b256!("000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"), b256!("000000000000000000000000000000000000000000000000000000000011a1d3"), ], - data: Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007717500762343034303661353035646234633961386163316433306335633332303265370000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000")), + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007717500762343034303661353035646234633961386163316433306335633332303265370000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000"))) }, ], #[cfg(feature = "optimism")] @@ -544,7 +548,10 @@ mod tests { #[cfg(feature = "optimism")] #[test] fn check_receipt_root_optimism() { - let logs = vec![Log { address: Address::ZERO, topics: vec![], data: Default::default() }]; + let logs = vec![Log { + address: Address::ZERO, + data: LogData::new_unchecked(vec![], Default::default()), + }]; let bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); let receipt = ReceiptWithBloom { receipt: Receipt { @@ -565,7 +572,10 @@ mod tests { #[cfg(not(feature = "optimism"))] #[test] fn check_receipt_root_optimism() { - let logs = vec![Log { address: Address::ZERO, topics: vec![], data: Default::default() }]; + let logs = vec![Log { + address: Address::ZERO, + data: LogData::new_unchecked(vec![], Default::default()), + }]; let bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); let receipt = ReceiptWithBloom { receipt: Receipt { diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 6b158770f55cb..2a25b2de81f15 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,6 +1,7 @@ #[cfg(feature = "zstd-codec")] use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::{logs_bloom, Bloom, Bytes, Log, PruneSegmentError, TxType, B256}; +use crate::{logs_bloom, Bloom, Bytes, PruneSegmentError, TxType, B256}; +use alloy_primitives::Log; use alloy_rlp::{length_of_length, Decodable, Encodable}; use bytes::{Buf, BufMut}; #[cfg(any(test, feature = "arbitrary"))] @@ -118,13 +119,15 @@ impl Receipts { /// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). pub fn gas_spent_by_tx(&self) -> Result, PruneSegmentError> { - let Some(block_r) = self.last() else { return Ok(vec![]) }; + let Some(block_r) = self.last() else { + return Ok(vec![]); + }; let mut out = Vec::with_capacity(block_r.len()); for (id, tx_r) in block_r.iter().enumerate() { if let Some(receipt) = tx_r.as_ref() { out.push((id as u64, receipt.cumulative_gas_used)); } else { - return Err(PruneSegmentError::ReceiptsPruned) + return Err(PruneSegmentError::ReceiptsPruned); } } Ok(out) @@ -308,7 +311,7 @@ impl ReceiptWithBloom { let b = &mut &**buf; let rlp_head = alloy_rlp::Header::decode(b)?; if !rlp_head.list { - return Err(alloy_rlp::Error::UnexpectedString) + return Err(alloy_rlp::Error::UnexpectedString); } let started_len = b.len(); @@ -353,7 +356,7 @@ impl ReceiptWithBloom { return Err(alloy_rlp::Error::ListLengthMismatch { expected: rlp_head.payload_length, got: consumed, - }) + }); } *buf = *b; Ok(this) @@ -506,7 +509,7 @@ impl<'a> ReceiptWithBloomEncoder<'a> { fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { if matches!(self.receipt.tx_type, TxType::Legacy) { self.encode_fields(out); - return + return; } let mut payload = Vec::new(); @@ -578,14 +581,14 @@ mod tests { receipt: Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 0x1u64, - logs: vec![Log { - address: address!("0000000000000000000000000000000000000011"), - topics: vec![ + logs: vec![Log::new_unchecked( + address!("0000000000000000000000000000000000000011"), + vec![ b256!("000000000000000000000000000000000000000000000000000000000000dead"), b256!("000000000000000000000000000000000000000000000000000000000000beef"), ], - data: bytes!("0100ff"), - }], + bytes!("0100ff"), + )], success: false, #[cfg(feature = "optimism")] deposit_nonce: None, @@ -612,14 +615,14 @@ mod tests { receipt: Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 0x1u64, - logs: vec![Log { - address: address!("0000000000000000000000000000000000000011"), - topics: vec![ + logs: vec![Log::new_unchecked( + address!("0000000000000000000000000000000000000011"), + vec![ b256!("000000000000000000000000000000000000000000000000000000000000dead"), b256!("000000000000000000000000000000000000000000000000000000000000beef"), ], - data: bytes!("0100ff"), - }], + bytes!("0100ff"), + )], success: false, #[cfg(feature = "optimism")] deposit_nonce: None, @@ -692,20 +695,16 @@ mod tests { success: true, tx_type: TxType::Legacy, logs: vec![ - Log { - address: address!("4bf56695415f725e43c3e04354b604bcfb6dfb6e"), - topics: vec![b256!( - "c69dc3d7ebff79e41f525be431d5cd3cc08f80eaf0f7819054a726eeb7086eb9" - )], - data: Bytes::from(vec![1; 0xffffff]), - }, - Log { - address: address!("faca325c86bf9c2d5b413cd7b90b209be92229c2"), - topics: vec![b256!( - "8cca58667b1e9ffa004720ac99a3d61a138181963b294d270d91c53d36402ae2" - )], - data: Bytes::from(vec![1; 0xffffff]), - }, + Log::new_unchecked( + address!("4bf56695415f725e43c3e04354b604bcfb6dfb6e"), + vec![b256!("c69dc3d7ebff79e41f525be431d5cd3cc08f80eaf0f7819054a726eeb7086eb9")], + Bytes::from(vec![1; 0xffffff]), + ), + Log::new_unchecked( + address!("faca325c86bf9c2d5b413cd7b90b209be92229c2"), + vec![b256!("8cca58667b1e9ffa004720ac99a3d61a138181963b294d270d91c53d36402ae2")], + Bytes::from(vec![1; 0xffffff]), + ), ], #[cfg(feature = "optimism")] deposit_nonce: None, diff --git a/crates/primitives/src/revm/compat.rs b/crates/primitives/src/revm/compat.rs index 32713f72efbbf..6c9474f7cf283 100644 --- a/crates/primitives/src/revm/compat.rs +++ b/crates/primitives/src/revm/compat.rs @@ -1,19 +1,9 @@ -use crate::{ - revm_primitives::{AccountInfo, Log}, - Account, Address, Log as RethLog, TransactionKind, KECCAK_EMPTY, U256, -}; +use crate::{revm_primitives::AccountInfo, Account, Address, TransactionKind, KECCAK_EMPTY, U256}; use revm::{ interpreter::gas::validate_initial_tx_gas, primitives::{MergeSpec, ShanghaiSpec}, }; -/// Check equality between Revm and Reth `Log`s. -pub fn is_log_equal(revm_log: &Log, reth_log: &RethLog) -> bool { - revm_log.address == reth_log.address && - revm_log.data.data == reth_log.data && - revm_log.topics() == reth_log.topics -} - /// Converts a Revm [`AccountInfo`] into a Reth [`Account`]. /// /// Sets `bytecode_hash` to `None` if `code_hash` is [`KECCAK_EMPTY`]. diff --git a/crates/rpc/rpc-types-compat/src/log.rs b/crates/rpc/rpc-types-compat/src/log.rs index 15261cbf9d052..2b6d33c428acd 100644 --- a/crates/rpc/rpc-types-compat/src/log.rs +++ b/crates/rpc/rpc-types-compat/src/log.rs @@ -4,7 +4,7 @@ #[inline] pub fn from_primitive_log(log: reth_primitives::Log) -> reth_rpc_types::Log { reth_rpc_types::Log { - inner: log.into(), + inner: log, block_hash: None, block_number: None, block_timestamp: None, @@ -14,9 +14,3 @@ pub fn from_primitive_log(log: reth_primitives::Log) -> reth_rpc_types::Log { removed: false, } } - -/// Converts from a [reth_rpc_types::Log] to a [reth_primitives::Log] -#[inline] -pub fn to_primitive_log(log: reth_rpc_types::Log) -> reth_primitives::Log { - log.inner.into() -} diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 95bf1528212dc..43a75b68ba1d8 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -1707,7 +1707,7 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( let mut logs = Vec::with_capacity(receipt.logs.len()); for (tx_log_idx, log) in receipt.logs.into_iter().enumerate() { let rpclog = Log { - inner: log.into(), + inner: log, block_hash: Some(meta.block_hash), block_number: Some(meta.block_number), block_timestamp: Some(meta.timestamp), diff --git a/crates/rpc/rpc/src/eth/logs_utils.rs b/crates/rpc/rpc/src/eth/logs_utils.rs index 4a7a0b6ae07c4..5785912ac994a 100644 --- a/crates/rpc/rpc/src/eth/logs_utils.rs +++ b/crates/rpc/rpc/src/eth/logs_utils.rs @@ -22,7 +22,7 @@ where for log in receipt.logs.iter() { if log_matches_filter(block_num_hash, log, filter) { let log = Log { - inner: log.clone().into(), + inner: log.clone(), block_hash: Some(block_num_hash.hash), block_number: Some(block_num_hash.number), transaction_hash: Some(tx_hash), @@ -90,7 +90,7 @@ pub(crate) fn append_matching_block_logs( } let log = Log { - inner: log.clone().into(), + inner: log.clone(), block_hash: Some(block_num_hash.hash), block_number: Some(block_num_hash.number), transaction_hash, @@ -118,7 +118,7 @@ pub(crate) fn log_matches_filter( (!params.filter_block_range(block.number) || !params.filter_block_hash(block.hash) || !params.filter_address(&log.address) || - !params.filter_topics(&log.topics)) + !params.filter_topics(log.topics())) { return false } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 5c2c1e969ea7e..39b6d3535b482 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,14 +1,15 @@ //! Dummy blocks and data for tests use crate::{BundleStateWithReceipts, DatabaseProviderRW}; +use alloy_primitives::Log; use alloy_rlp::Decodable; use reth_db::{database::Database, models::StoredBlockBodyIndices, tables}; use reth_primitives::{ - b256, + alloy_primitives, b256, hex_literal::hex, proofs::{state_root_unhashed, storage_root_unhashed}, revm::compat::into_reth_acc, - Address, BlockNumber, Bytes, Header, Log, Receipt, Receipts, SealedBlock, - SealedBlockWithSenders, TxType, Withdrawal, Withdrawals, B256, U256, + Address, BlockNumber, Bytes, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, + TxType, Withdrawal, Withdrawals, B256, U256, }; use revm::{ db::BundleState, @@ -150,11 +151,11 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, BundleStateWithReceip tx_type: TxType::Eip2930, success: true, cumulative_gas_used: 300, - logs: vec![Log { - address: Address::new([0x60; 20]), - topics: vec![B256::with_last_byte(1), B256::with_last_byte(2)], - data: Bytes::default(), - }], + logs: vec![Log::new_unchecked( + Address::new([0x60; 20]), + vec![B256::with_last_byte(1), B256::with_last_byte(2)], + Bytes::default(), + )], #[cfg(feature = "optimism")] deposit_nonce: None, #[cfg(feature = "optimism")] @@ -208,11 +209,11 @@ fn block2( tx_type: TxType::Eip1559, success: false, cumulative_gas_used: 400, - logs: vec![Log { - address: Address::new([0x61; 20]), - topics: vec![B256::with_last_byte(3), B256::with_last_byte(4)], - data: Bytes::default(), - }], + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], #[cfg(feature = "optimism")] deposit_nonce: None, #[cfg(feature = "optimism")] @@ -276,11 +277,11 @@ fn block3( tx_type: TxType::Eip1559, success: true, cumulative_gas_used: 400, - logs: vec![Log { - address: Address::new([0x61; 20]), - topics: vec![B256::with_last_byte(3), B256::with_last_byte(4)], - data: Bytes::default(), - }], + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], #[cfg(feature = "optimism")] deposit_nonce: None, #[cfg(feature = "optimism")] @@ -365,11 +366,11 @@ fn block4( tx_type: TxType::Eip1559, success: true, cumulative_gas_used: 400, - logs: vec![Log { - address: Address::new([0x61; 20]), - topics: vec![B256::with_last_byte(3), B256::with_last_byte(4)], - data: Bytes::default(), - }], + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], #[cfg(feature = "optimism")] deposit_nonce: None, #[cfg(feature = "optimism")] @@ -449,11 +450,11 @@ fn block5( tx_type: TxType::Eip1559, success: true, cumulative_gas_used: 400, - logs: vec![Log { - address: Address::new([0x61; 20]), - topics: vec![B256::with_last_byte(3), B256::with_last_byte(4)], - data: Bytes::default(), - }], + logs: vec![Log::new_unchecked( + Address::new([0x61; 20]), + vec![B256::with_last_byte(3), B256::with_last_byte(4)], + Bytes::default(), + )], #[cfg(feature = "optimism")] deposit_nonce: None, #[cfg(feature = "optimism")] diff --git a/examples/db-access.rs b/examples/db-access.rs index 41d462204a41a..c076b76dcf246 100644 --- a/examples/db-access.rs +++ b/examples/db-access.rs @@ -201,7 +201,7 @@ fn receipts_provider_example Date: Tue, 16 Apr 2024 16:43:13 +0200 Subject: [PATCH 183/700] chore(engine): `BeaconConsensusEngine::current_engine_hook_context` (#7676) --- .../beacon/src/engine/hooks/controller.rs | 18 +++++----- .../consensus/beacon/src/engine/hooks/mod.rs | 4 +-- .../beacon/src/engine/hooks/prune.rs | 4 +-- .../beacon/src/engine/hooks/static_file.rs | 4 +-- crates/consensus/beacon/src/engine/mod.rs | 34 +++++++++---------- 5 files changed, 31 insertions(+), 33 deletions(-) diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 33ae74c83e9b1..48343d4804f5e 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -1,5 +1,5 @@ use crate::hooks::{ - EngineContext, EngineHook, EngineHookDBAccessLevel, EngineHookError, EngineHookEvent, + EngineHook, EngineHookContext, EngineHookDBAccessLevel, EngineHookError, EngineHookEvent, EngineHooks, }; use std::{ @@ -49,7 +49,7 @@ impl EngineHooksController { pub(crate) fn poll_active_db_write_hook( &mut self, cx: &mut Context<'_>, - args: EngineContext, + args: EngineHookContext, ) -> Poll> { let Some(mut hook) = self.active_db_write_hook.take() else { return Poll::Pending }; @@ -98,7 +98,7 @@ impl EngineHooksController { pub(crate) fn poll_next_hook( &mut self, cx: &mut Context<'_>, - args: EngineContext, + args: EngineHookContext, db_write_active: bool, ) -> Poll> { let Some(mut hook) = self.hooks.pop_front() else { return Poll::Pending }; @@ -127,7 +127,7 @@ impl EngineHooksController { &mut self, cx: &mut Context<'_>, hook: &mut Box, - args: EngineContext, + args: EngineHookContext, db_write_active: bool, ) -> Poll> { // Hook with DB write access level is not allowed to run due to already running hook with DB @@ -166,7 +166,7 @@ impl EngineHooksController { #[cfg(test)] mod tests { use crate::hooks::{ - EngineContext, EngineHook, EngineHookDBAccessLevel, EngineHookEvent, EngineHooks, + EngineHook, EngineHookContext, EngineHookDBAccessLevel, EngineHookEvent, EngineHooks, EngineHooksController, }; use futures::poll; @@ -212,7 +212,7 @@ mod tests { fn poll( &mut self, _cx: &mut Context<'_>, - _ctx: EngineContext, + _ctx: EngineHookContext, ) -> Poll> { self.results.pop_front().map_or(Poll::Pending, Poll::Ready) } @@ -226,7 +226,7 @@ mod tests { async fn poll_active_db_write_hook() { let mut controller = EngineHooksController::new(EngineHooks::new()); - let context = EngineContext { tip_block_number: 2, finalized_block_number: Some(1) }; + let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; // No currently running hook with DB write access is set let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); @@ -277,7 +277,7 @@ mod tests { #[tokio::test] async fn poll_next_hook_db_write_active() { - let context = EngineContext { tip_block_number: 2, finalized_block_number: Some(1) }; + let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; let mut hook_rw = TestHook::new_rw("read-write"); hook_rw.add_result(Ok(EngineHookEvent::Started)); @@ -311,7 +311,7 @@ mod tests { #[tokio::test] async fn poll_next_hook_db_write_inactive() { - let context = EngineContext { tip_block_number: 2, finalized_block_number: Some(1) }; + let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; let hook_rw_1_name = "read-write-1"; let mut hook_rw_1 = TestHook::new_rw(hook_rw_1_name); diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs index ce149717ada0c..3e78e484817b8 100644 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ b/crates/consensus/beacon/src/engine/hooks/mod.rs @@ -48,7 +48,7 @@ pub trait EngineHook: Send + Sync + 'static { fn poll( &mut self, cx: &mut Context<'_>, - ctx: EngineContext, + ctx: EngineHookContext, ) -> Poll>; /// Returns [db access level][`EngineHookDBAccessLevel`] the hook needs. @@ -57,7 +57,7 @@ pub trait EngineHook: Send + Sync + 'static { /// Engine context passed to the [hook polling function][`EngineHook::poll`]. #[derive(Copy, Clone, Debug)] -pub struct EngineContext { +pub struct EngineHookContext { /// Tip block number. pub tip_block_number: BlockNumber, /// Finalized block number, if known. diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index c8d6d74a5ba2f..a9bb4f05bd427 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -1,7 +1,7 @@ //! Prune hook for the engine implementation. use crate::{ - engine::hooks::{EngineContext, EngineHook, EngineHookError, EngineHookEvent}, + engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, hooks::EngineHookDBAccessLevel, }; use futures::FutureExt; @@ -121,7 +121,7 @@ impl EngineHook for PruneHook { fn poll( &mut self, cx: &mut Context<'_>, - ctx: EngineContext, + ctx: EngineHookContext, ) -> Poll> { // Try to spawn a pruner match self.try_spawn_pruner(ctx.tip_block_number) { diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 5b77ec39d638d..2cff68e1d26c3 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -1,7 +1,7 @@ //! StaticFile hook for the engine implementation. use crate::{ - engine::hooks::{EngineContext, EngineHook, EngineHookError, EngineHookEvent}, + engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, hooks::EngineHookDBAccessLevel, }; use futures::FutureExt; @@ -135,7 +135,7 @@ impl EngineHook for StaticFileHook { fn poll( &mut self, cx: &mut Context<'_>, - ctx: EngineContext, + ctx: EngineHookContext, ) -> Poll> { let Some(finalized_block_number) = ctx.finalized_block_number else { trace!(target: "consensus::engine::hooks::static_file", ?ctx, "Finalized block number is not available"); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index e24c63312e477..7b2a689af02b4 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -4,7 +4,7 @@ use crate::{ message::OnForkChoiceUpdated, metrics::EngineMetrics, }, - hooks::{EngineContext, EngineHooksController}, + hooks::{EngineHookContext, EngineHooksController}, sync::{EngineSyncController, EngineSyncEvent}, }; use futures::{Future, StreamExt}; @@ -326,6 +326,17 @@ where Ok((this, handle)) } + /// Returns current [EngineHookContext] that's used for polling engine hooks. + fn current_engine_hook_context(&self) -> RethResult { + Ok(EngineHookContext { + tip_block_number: self.blockchain.canonical_tip().number, + finalized_block_number: self + .blockchain + .finalized_block_number() + .map_err(RethError::Provider)?, + }) + } + /// Called to resolve chain forks and ensure that the Execution layer is working with the latest /// valid chain. /// @@ -1831,16 +1842,9 @@ where loop { // Poll a running hook with db write access first, as we will not be able to process // any engine messages until it's finished. - if let Poll::Ready(result) = this.hooks.poll_active_db_write_hook( - cx, - EngineContext { - tip_block_number: this.blockchain.canonical_tip().number, - finalized_block_number: this - .blockchain - .finalized_block_number() - .map_err(RethError::Provider)?, - }, - )? { + if let Poll::Ready(result) = + this.hooks.poll_active_db_write_hook(cx, this.current_engine_hook_context()?)? + { this.on_hook_result(result)?; continue } @@ -1906,13 +1910,7 @@ where if !this.forkchoice_state_tracker.is_latest_invalid() { if let Poll::Ready(result) = this.hooks.poll_next_hook( cx, - EngineContext { - tip_block_number: this.blockchain.canonical_tip().number, - finalized_block_number: this - .blockchain - .finalized_block_number() - .map_err(RethError::Provider)?, - }, + this.current_engine_hook_context()?, this.sync.is_pipeline_active(), )? { this.on_hook_result(result)?; From 493f41a12607a1d3d11d11692d6e30dea0b03e5f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 16 Apr 2024 17:03:01 +0200 Subject: [PATCH 184/700] feat: add missing hardfork convenience fns (#7678) --- crates/primitives/src/chain/spec.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index f8b599f8dc794..20da8b7be574b 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -779,6 +779,18 @@ impl ChainSpec { .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp)) } + /// Convenience method to check if [Hardfork::Byzantium] is active at a given block number. + #[inline] + pub fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { + self.fork(Hardfork::Byzantium).active_at_block(block_number) + } + + /// Convenience method to check if [Hardfork::SpuriousDragon] is active at a given block number. + #[inline] + pub fn is_spurious_dragon_active_at_block(&self, block_number: u64) -> bool { + self.fork(Hardfork::SpuriousDragon).active_at_block(block_number) + } + /// Convenience method to check if [Hardfork::Homestead] is active at a given block number. #[inline] pub fn is_homestead_active_at_block(&self, block_number: u64) -> bool { From 566480b472e2630047fab96d0c42dda761a18850 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 16 Apr 2024 16:54:31 +0100 Subject: [PATCH 185/700] chore: move optimism bootnodes to `reth-primitives` (#7657) --- crates/net/discv5/src/config.rs | 31 +--------------- crates/net/network/src/config.rs | 13 +------ crates/primitives/src/chain/spec.rs | 17 +++++++-- crates/primitives/src/lib.rs | 4 ++ crates/primitives/src/net.rs | 57 +++++++++++++++++++++++++++++ 5 files changed, 78 insertions(+), 44 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index eb8b409de2d9e..8f517f422e06f 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -24,26 +24,6 @@ pub const OPSTACK: &[u8] = b"opstack"; /// Default is 60 seconds. const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; -/// Optimism mainnet and base mainnet boot nodes. -/// Added from -const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &[ - // OP Labs - "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", - "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", - "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", - // Base - "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", - "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", - "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", - "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", - "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301" -]; - -/// Optimism sepolia and base sepolia boot nodes. -const BOOT_NODES_OP_SEPOLIA_AND_BASE_SEPOLIA: &[&str] = &[ - "enode://09d1a6110757b95628cc54ab6cc50a29773075ed00e3a25bd9388807c9a6c007664e88646a6fefd82baad5d8374ba555e426e8aed93f0f0c517e2eb5d929b2a2@34.65.21.188:30304?discport=30303" -]; - /// Builds a [`Config`]. #[derive(Debug, Default)] pub struct ConfigBuilder { @@ -137,16 +117,6 @@ impl ConfigBuilder { self } - /// Add optimism mainnet boot nodes. - pub fn add_optimism_mainnet_boot_nodes(self) -> Self { - self.add_serialized_unsigned_boot_nodes(BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET) - } - - /// Add optimism sepolia boot nodes. - pub fn add_optimism_sepolia_boot_nodes(self) -> Self { - self.add_serialized_unsigned_boot_nodes(BOOT_NODES_OP_SEPOLIA_AND_BASE_SEPOLIA) - } - /// Set [`ForkId`], and key used to identify it, to set in local [`Enr`](discv5::enr::Enr). pub fn fork(mut self, key: &'static [u8], value: ForkId) -> Self { self.fork = Some((key, value)); @@ -301,6 +271,7 @@ mod test { use super::*; const MULTI_ADDRESSES: &str = "/ip4/184.72.129.189/udp/30301/p2p/16Uiu2HAmSG2hdLwyQHQmG4bcJBgD64xnW63WMTLcrNq6KoZREfGb,/ip4/3.231.11.52/udp/30301/p2p/16Uiu2HAmMy4V8bi3XP7KDfSLQcLACSvTLroRRwEsTyFUKo8NCkkp,/ip4/54.198.153.150/udp/30301/p2p/16Uiu2HAmSVsb7MbRf1jg3Dvd6a3n5YNqKQwn1fqHCFgnbqCsFZKe,/ip4/3.220.145.177/udp/30301/p2p/16Uiu2HAm74pBDGdQ84XCZK27GRQbGFFwQ7RsSqsPwcGmCR3Cwn3B,/ip4/3.231.138.188/udp/30301/p2p/16Uiu2HAmMnTiJwgFtSVGV14ZNpwAvS1LUoF4pWWeNtURuV6C3zYB"; + const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &["enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301"]; #[test] fn parse_boot_nodes() { diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index e47a6a65ab394..c5a1e12188311 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -13,8 +13,7 @@ use reth_discv5::config::OPSTACK; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; use reth_primitives::{ - mainnet_nodes, pk2id, sepolia_nodes, Chain, ChainSpec, ForkFilter, Head, NamedChain, - NodeRecord, PeerId, MAINNET, + mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, PeerId, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -132,14 +131,6 @@ impl NetworkConfig { builder = builder.fork(OPSTACK, fork_id) } - if chain == Chain::optimism_mainnet() || chain == Chain::base_mainnet() { - builder = builder.add_optimism_mainnet_boot_nodes() - } else if chain == Chain::from_named(NamedChain::OptimismSepolia) || - chain == Chain::from_named(NamedChain::BaseSepolia) - { - builder = builder.add_optimism_sepolia_boot_nodes() - } - self.set_discovery_v5(f(builder)) } @@ -578,7 +569,7 @@ mod tests { use super::*; use rand::thread_rng; use reth_dns_discovery::tree::LinkEntry; - use reth_primitives::ForkHash; + use reth_primitives::{Chain, ForkHash}; use reth_provider::test_utils::NoopProvider; use std::collections::BTreeMap; diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 20da8b7be574b..356f871906268 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -18,9 +18,12 @@ use std::{ pub use alloy_eips::eip1559::BaseFeeParams; #[cfg(feature = "optimism")] -pub(crate) use crate::constants::{ - OP_BASE_FEE_PARAMS, OP_CANYON_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS, - OP_SEPOLIA_CANYON_BASE_FEE_PARAMS, +pub(crate) use crate::{ + constants::{ + OP_BASE_FEE_PARAMS, OP_CANYON_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS, + OP_SEPOLIA_CANYON_BASE_FEE_PARAMS, + }, + net::{base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes}, }; /// The Ethereum mainnet spec @@ -938,6 +941,14 @@ impl ChainSpec { C::Goerli => Some(goerli_nodes()), C::Sepolia => Some(sepolia_nodes()), C::Holesky => Some(holesky_nodes()), + #[cfg(feature = "optimism")] + C::Base => Some(base_nodes()), + #[cfg(feature = "optimism")] + C::Optimism => Some(op_nodes()), + #[cfg(feature = "optimism")] + C::BaseGoerli | C::BaseSepolia => Some(base_testnet_nodes()), + #[cfg(feature = "optimism")] + C::OptimismSepolia | C::OptimismGoerli | C::OptimismKovan => Some(op_testnet_nodes()), _ => None, } } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 9c44738902a90..c14f719647283 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -145,6 +145,10 @@ pub use c_kzg as kzg; mod optimism { pub use crate::{ chain::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}, + net::{ + base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes, BASE_BOOTNODES, + BASE_TESTNET_BOOTNODES, OP_BOOTNODES, OP_TESTNET_BOOTNODES, + }, transaction::{TxDeposit, DEPOSIT_TX_TYPE_ID}, }; } diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index 7e582d0812e0a..48307d1b766e5 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -41,6 +41,39 @@ pub static HOLESKY_BOOTNODES : [&str; 2] = [ "enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303", ]; +#[cfg(feature = "optimism")] +/// OP Mainnet BOOTNODES +pub static OP_BOOTNODES: [&str; 3] = [ + "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", + "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", + "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", +]; + +#[cfg(feature = "optimism")] +/// OP TESTNET BOOTNODES +pub static OP_TESTNET_BOOTNODES: [&str; 3] = [ + "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", + "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", + "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", +]; + +#[cfg(feature = "optimism")] +/// Base BOOTNODES +pub static BASE_BOOTNODES: [&str; 5] = [ + "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", + "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", + "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", + "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", + "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301" +]; + +#[cfg(feature = "optimism")] +/// Base Testnet BOOTNODES +pub static BASE_TESTNET_BOOTNODES: [&str; 2] = [ + "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", + "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", +]; + /// Returns parsed mainnet nodes pub fn mainnet_nodes() -> Vec { parse_nodes(&MAINNET_BOOTNODES[..]) @@ -61,6 +94,30 @@ pub fn holesky_nodes() -> Vec { parse_nodes(&HOLESKY_BOOTNODES[..]) } +#[cfg(feature = "optimism")] +/// Returns parsed op-stack mainnet nodes +pub fn op_nodes() -> Vec { + parse_nodes(&OP_BOOTNODES[..]) +} + +#[cfg(feature = "optimism")] +/// Returns parsed op-stack testnet nodes +pub fn op_testnet_nodes() -> Vec { + parse_nodes(&OP_TESTNET_BOOTNODES[..]) +} + +#[cfg(feature = "optimism")] +/// Returns parsed op-stack mainnet nodes +pub fn base_nodes() -> Vec { + parse_nodes(&BASE_BOOTNODES[..]) +} + +#[cfg(feature = "optimism")] +/// Returns parsed op-stack testnet nodes +pub fn base_testnet_nodes() -> Vec { + parse_nodes(&BASE_TESTNET_BOOTNODES[..]) +} + /// Parses all the nodes pub fn parse_nodes(nodes: impl IntoIterator>) -> Vec { nodes.into_iter().map(|s| s.as_ref().parse().unwrap()).collect() From fb3f6f15597a8c7fabf4ce74ace287ad1b3b275d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 16 Apr 2024 18:20:42 +0200 Subject: [PATCH 186/700] chore: flatten node-api dep (#7682) --- Cargo.lock | 17 ++++++++++------- crates/consensus/beacon/Cargo.toml | 2 +- crates/consensus/beacon/src/engine/handle.rs | 2 +- crates/consensus/beacon/src/engine/message.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 2 +- crates/node-core/Cargo.toml | 3 ++- crates/node-core/src/args/rpc_server_args.rs | 3 ++- crates/node-core/src/engine_api_store.rs | 2 +- crates/payload/basic/Cargo.toml | 2 +- crates/payload/basic/src/lib.rs | 2 +- crates/payload/optimism/Cargo.toml | 3 ++- crates/payload/optimism/src/builder.rs | 2 +- crates/payload/optimism/src/payload.rs | 2 +- crates/rpc/rpc-api/Cargo.toml | 2 +- crates/rpc/rpc-api/src/engine.rs | 2 +- crates/rpc/rpc-builder/Cargo.toml | 3 ++- crates/rpc/rpc-builder/src/auth.rs | 3 ++- crates/rpc/rpc-builder/src/lib.rs | 10 ++++++---- crates/rpc/rpc-engine-api/Cargo.toml | 2 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 4 ++-- crates/rpc/rpc-engine-api/src/error.rs | 2 +- 21 files changed, 41 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3b209fd4b3f9d..62e25f7dace80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6159,9 +6159,9 @@ dependencies = [ "futures-core", "futures-util", "metrics", + "reth-engine-primitives", "reth-interfaces", "reth-metrics", - "reth-node-api", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -6185,9 +6185,9 @@ dependencies = [ "reth-config", "reth-db", "reth-downloaders", + "reth-engine-primitives", "reth-interfaces", "reth-metrics", - "reth-node-api", "reth-node-ethereum", "reth-payload-builder", "reth-payload-validator", @@ -6908,12 +6908,13 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-discv4", + "reth-engine-primitives", + "reth-evm", "reth-interfaces", "reth-metrics", "reth-net-nat", "reth-network", "reth-network-api", - "reth-node-api", "reth-primitives", "reth-provider", "reth-prune", @@ -7001,7 +7002,8 @@ version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", - "reth-node-api", + "reth-engine-primitives", + "reth-evm", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7225,7 +7227,7 @@ name = "reth-rpc-api" version = "0.2.0-beta.5" dependencies = [ "jsonrpsee", - "reth-node-api", + "reth-engine-primitives", "reth-primitives", "reth-rpc-types", "serde", @@ -7255,11 +7257,12 @@ dependencies = [ "metrics", "pin-project", "reth-beacon-consensus", + "reth-engine-primitives", + "reth-evm", "reth-interfaces", "reth-ipc", "reth-metrics", "reth-network-api", - "reth-node-api", "reth-node-ethereum", "reth-payload-builder", "reth-primitives", @@ -7293,9 +7296,9 @@ dependencies = [ "jsonrpsee-types", "metrics", "reth-beacon-consensus", + "reth-engine-primitives", "reth-interfaces", "reth-metrics", - "reth-node-api", "reth-node-ethereum", "reth-node-optimism", "reth-payload-builder", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 2c86a4d414e12..60279a65a3529 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -25,7 +25,7 @@ reth-payload-validator.workspace = true reth-prune.workspace = true reth-static-file.workspace = true reth-tokio-util.workspace = true -reth-node-api.workspace = true +reth-engine-primitives.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 56864e7b84b93..121a8fac0703b 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -5,8 +5,8 @@ use crate::{ BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, }; use futures::TryFutureExt; +use reth_engine_primitives::EngineTypes; use reth_interfaces::RethResult; -use reth_node_api::EngineTypes; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 5e18267229fd6..9b4324e5ac46d 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -3,8 +3,8 @@ use crate::{ BeaconConsensusEngineEvent, }; use futures::{future::Either, FutureExt}; +use reth_engine_primitives::EngineTypes; use reth_interfaces::{consensus::ForkchoiceState, RethResult}; -use reth_node_api::EngineTypes; use reth_payload_builder::error::PayloadBuilderError; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceUpdateError, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 7b2a689af02b4..3f4bd2ed1df16 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -9,6 +9,7 @@ use crate::{ }; use futures::{Future, StreamExt}; use reth_db::database::Database; +use reth_engine_primitives::{EngineTypes, PayloadAttributes, PayloadBuilderAttributes}; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, @@ -20,7 +21,6 @@ use reth_interfaces::{ sync::{NetworkSyncUpdater, SyncState}, RethError, RethResult, }; -use reth_node_api::{EngineTypes, PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::{ constants::EPOCH_SLOTS, stage::StageId, BlockNumHash, BlockNumber, Head, Header, SealedBlock, diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index e4f6e3330a65e..763cd00c6bf97 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -29,7 +29,8 @@ reth-config.workspace = true reth-discv4.workspace = true reth-net-nat.workspace = true reth-network-api.workspace = true -reth-node-api.workspace = true +reth-evm.workspace = true +reth-engine-primitives.workspace = true reth-tasks.workspace = true reth-consensus-common.workspace = true reth-auto-seal-consensus.workspace = true diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server_args.rs index 5e8e1b48c979e..e9a21616a540f 100644 --- a/crates/node-core/src/args/rpc_server_args.rs +++ b/crates/node-core/src/args/rpc_server_args.rs @@ -13,8 +13,9 @@ use clap::{ Arg, Args, Command, }; use rand::Rng; +use reth_engine_primitives::EngineTypes; +use reth_evm::ConfigureEvm; use reth_network_api::{NetworkInfo, Peers}; -use reth_node_api::{ConfigureEvm, EngineTypes}; use reth_provider::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, StateProviderFactory, diff --git a/crates/node-core/src/engine_api_store.rs b/crates/node-core/src/engine_api_store.rs index 4a273f439f2f3..1ff5a0a305ed9 100644 --- a/crates/node-core/src/engine_api_store.rs +++ b/crates/node-core/src/engine_api_store.rs @@ -1,7 +1,7 @@ //! Stores engine API messages to disk for later inspection and replay. use reth_beacon_consensus::BeaconEngineMessage; -use reth_node_api::EngineTypes; +use reth_engine_primitives::EngineTypes; use reth_primitives::fs::{self}; use reth_rpc_types::{ engine::{CancunPayloadFields, ForkchoiceState}, diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 00ca2432cf99a..dd2b824192ea1 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -20,7 +20,7 @@ reth-provider.workspace = true reth-payload-builder.workspace = true reth-tasks.workspace = true reth-interfaces.workspace = true -reth-node-api.workspace = true +reth-engine-primitives.workspace = true # ethereum alloy-rlp.workspace = true diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index ef8ce91087117..359a0fb16a036 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -11,8 +11,8 @@ use crate::metrics::PayloadBuilderMetrics; use futures_core::ready; use futures_util::FutureExt; +use reth_engine_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_interfaces::RethResult; -use reth_node_api::{BuiltPayload, PayloadBuilderAttributes}; use reth_payload_builder::{ database::CachedReads, error::PayloadBuilderError, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, diff --git a/crates/payload/optimism/Cargo.toml b/crates/payload/optimism/Cargo.toml index 8cafb343e0ed8..ebc776e746a45 100644 --- a/crates/payload/optimism/Cargo.toml +++ b/crates/payload/optimism/Cargo.toml @@ -19,7 +19,8 @@ reth-transaction-pool.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true -reth-node-api.workspace = true +reth-engine-primitives.workspace = true +reth-evm.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true diff --git a/crates/payload/optimism/src/builder.rs b/crates/payload/optimism/src/builder.rs index 06583af3227c3..1d1a2dadecdcb 100644 --- a/crates/payload/optimism/src/builder.rs +++ b/crates/payload/optimism/src/builder.rs @@ -5,7 +5,7 @@ use crate::{ payload::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}, }; use reth_basic_payload_builder::*; -use reth_node_api::ConfigureEvm; +use reth_evm::ConfigureEvm; use reth_payload_builder::error::PayloadBuilderError; use reth_primitives::{ constants::{BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS}, diff --git a/crates/payload/optimism/src/payload.rs b/crates/payload/optimism/src/payload.rs index 7a25689c4629e..0e4b0c82615a5 100644 --- a/crates/payload/optimism/src/payload.rs +++ b/crates/payload/optimism/src/payload.rs @@ -3,7 +3,7 @@ //! Optimism builder support use alloy_rlp::Encodable; -use reth_node_api::{BuiltPayload, PayloadBuilderAttributes}; +use reth_engine_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_primitives::{ revm::config::revm_spec_by_timestamp_after_merge, diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index d8bf076288dec..c2ada1e881927 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-rpc-types.workspace = true -reth-node-api.workspace = true +reth-engine-primitives.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index acdc99a7e0400..9304bbc5b8b4a 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -4,7 +4,7 @@ //! the consensus client. use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_node_api::EngineTypes; +use reth_engine_primitives::EngineTypes; use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, B256, U256, U64}; use reth_rpc_types::{ engine::{ diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 1769e78600549..ef79a7ed3df77 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -20,7 +20,8 @@ reth-rpc.workspace = true reth-rpc-api.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true -reth-node-api.workspace = true +reth-evm.workspace = true +reth-engine-primitives.workspace = true # rpc/net jsonrpsee = { workspace = true, features = ["server"] } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index c6fa2f5613403..3daa1dc2786f4 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -16,8 +16,9 @@ use jsonrpsee::{ use reth_ipc::client::IpcClientBuilder; pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; +use reth_engine_primitives::EngineTypes; +use reth_evm::ConfigureEvm; use reth_network_api::{NetworkInfo, Peers}; -use reth_node_api::{ConfigureEvm, EngineTypes}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, StateProviderFactory, diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ec29a6640bc06..ef5b8868c6f17 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -17,8 +17,8 @@ //! Configure only an http server with a selection of [RethRpcModule]s //! //! ``` +//! use reth_evm::ConfigureEvm; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_node_api::ConfigureEvm; //! use reth_provider::{ //! AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, //! ChangeSetReader, EvmEnvProvider, StateProviderFactory, @@ -77,8 +77,9 @@ //! //! //! ``` +//! use reth_engine_primitives::EngineTypes; +//! use reth_evm::ConfigureEvm; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_node_api::{ConfigureEvm, EngineTypes}; //! use reth_provider::{ //! AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, //! ChangeSetReader, EvmEnvProvider, StateProviderFactory, @@ -167,10 +168,11 @@ use jsonrpsee::{ server::{AlreadyStoppedError, IdProvider, RpcServiceBuilder, Server, ServerHandle}, Methods, RpcModule, }; +use reth_engine_primitives::EngineTypes; +use reth_evm::ConfigureEvm; use reth_ipc::server::IpcServer; pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_node_api::{ConfigureEvm, EngineTypes}; use reth_provider::{ AccountReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, StateProviderFactory, @@ -498,8 +500,8 @@ where /// # Example /// /// ```no_run + /// use reth_evm::ConfigureEvm; /// use reth_network_api::noop::NoopNetwork; - /// use reth_node_api::ConfigureEvm; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc_builder::RpcModuleBuilder; /// use reth_tasks::TokioTaskExecutor; diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 5ffb09294150d..2713639634e84 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -22,7 +22,7 @@ reth-beacon-consensus.workspace = true reth-payload-builder.workspace = true reth-tasks.workspace = true reth-rpc-types-compat.workspace = true -reth-node-api.workspace = true +reth-engine-primitives.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 32a267f53fa61..d84b6ed22e79d 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -2,11 +2,11 @@ use crate::{metrics::EngineApiMetrics, EngineApiError, EngineApiResult}; use async_trait::async_trait; use jsonrpsee_core::RpcResult; use reth_beacon_consensus::BeaconConsensusEngineHandle; -use reth_interfaces::consensus::ForkchoiceState; -use reth_node_api::{ +use reth_engine_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, EngineTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; +use reth_interfaces::consensus::ForkchoiceState; use reth_payload_builder::PayloadStore; use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hardfork, B256, U64}; use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 8a9a00a3b0469..8a7790cf03bab 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -2,7 +2,7 @@ use jsonrpsee_types::error::{ INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE, INVALID_PARAMS_MSG, SERVER_ERROR_MSG, }; use reth_beacon_consensus::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; -use reth_node_api::EngineObjectValidationError; +use reth_engine_primitives::EngineObjectValidationError; use reth_payload_builder::error::PayloadBuilderError; use reth_primitives::{B256, U256}; use thiserror::Error; From 8fc124f2e3c8640b404ed4e0ffd91b14f8b87ac0 Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 17 Apr 2024 03:37:55 +0800 Subject: [PATCH 187/700] chore(node): unify the format of command args (#7656) Signed-off-by: jsvisa Co-authored-by: Matthias Seitz Co-authored-by: Oliver Nordbjerg --- book/cli/reth/node.md | 236 ++++++++++--------- crates/node-core/src/args/rpc_server_args.rs | 36 +-- crates/node-core/src/args/txpool_args.rs | 18 +- 3 files changed, 157 insertions(+), 133 deletions(-) diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index ccbc9cd3f529e..dbfe7b1d49178 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -4,18 +4,21 @@ Start the node ```bash $ reth node --help + +Start the node + Usage: reth node [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --config @@ -24,26 +27,26 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] --with-unused-ports Sets all ports to unused, allowing the OS to choose random unused ports when sockets are bound. - + Mutually exclusive with `--instance`. -h, --help @@ -52,7 +55,7 @@ Options: Metrics: --metrics Enable Prometheus metrics. - + The metrics will be served at the given interface and port. Networking: @@ -65,19 +68,32 @@ Networking: --disable-discv4-discovery Disable Discv4 discovery + --enable-discv5-discovery + Enable Discv5 discovery + --discovery.addr - The UDP address to use for P2P discovery/networking - + The UDP address to use for devp2p peer discovery version 4 + [default: 0.0.0.0] --discovery.port - The UDP port to use for P2P discovery/networking - + The UDP port to use for devp2p peer discovery version 4 + [default: 30303] + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] + + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] + --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. - + --trusted-peers enode://abcd@192.168.0.1:30303 --trusted-only @@ -85,7 +101,7 @@ Networking: --bootnodes Comma separated enode URLs for P2P discovery bootstrap. - + Will fall back to a network-specific default if not specified. --peers-file @@ -94,12 +110,12 @@ Networking: --identity Custom node identity - + [default: reth/-/-gnu] --p2p-secret-key Secret key to use for this node. - + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. --no-persist-peers @@ -107,17 +123,17 @@ Networking: --nat NAT resolution method (any|none|upnp|publicip|extip:\) - + [default: any] --addr Network listening address - + [default: 0.0.0.0] --port Network listening port - + [default: 30303] --max-outbound-peers @@ -128,14 +144,14 @@ Networking: --pooled-tx-response-soft-limit Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. - + . - + [default: 2097152] --pooled-tx-pack-soft-limit Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB - + [default: 131072] RPC: @@ -144,17 +160,17 @@ RPC: --http.addr Http server address to listen on - + [default: 127.0.0.1] --http.port Http server port to listen on - + [default: 8545] --http.api Rpc Modules to be configured for the HTTP server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --http.corsdomain @@ -165,12 +181,12 @@ RPC: --ws.addr Ws server address to listen on - + [default: 127.0.0.1] --ws.port Ws server port to listen on - + [default: 8546] --ws.origins @@ -178,176 +194,184 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --ipcdisable - Disable the IPC-RPC server + Disable the IPC-RPC server --ipcpath Filename for IPC socket/pipe within the datadir - - [default: .ipc] + + [default: /tmp/reth.ipc] --authrpc.addr Auth server address to listen on - + [default: 127.0.0.1] --authrpc.port Auth server port to listen on - + [default: 8551] --authrpc.jwtsecret Path to a JWT secret to use for the authenticated engine-API RPC server. - + This will enforce JWT authentication for all requests coming from the consensus layer. - + If no path is provided, a secret will be generated and stored in the datadir under `//jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default. + --auth-ipc + Enable auth engine api over IPC + + --auth-ipc.path + Filename for auth IPC socket/pipe within the datadir + + [default: /tmp/reth_engine_api.ipc] + --rpc.jwtsecret Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and `--ws.api`. - + This is __not__ used for the authenticated engine-API RPC server, see `--authrpc.jwtsecret`. - --rpc-max-request-size + --rpc.max-request-size Set the maximum RPC request payload size for both HTTP and WS in megabytes - + [default: 15] - --rpc-max-response-size + --rpc.max-response-size Set the maximum RPC response payload size for both HTTP and WS in megabytes - + [default: 160] - [aliases: --rpc.returndata.limit] + [aliases: rpc.returndata.limit] - --rpc-max-subscriptions-per-connection + --rpc.max-subscriptions-per-connection Set the maximum concurrent subscriptions per connection - + [default: 1024] - --rpc-max-connections + --rpc.max-connections Maximum number of RPC server connections - + [default: 500] - --rpc-max-tracing-requests + --rpc.max-tracing-requests Maximum number of concurrent tracing requests - + [default: 14] - --rpc-max-blocks-per-filter + --rpc.max-blocks-per-filter Maximum number of blocks that could be scanned per filter request. (0 = entire chain) - + [default: 100000] - --rpc-max-logs-per-response + --rpc.max-logs-per-response Maximum number of logs that can be returned in a single response. (0 = no limit) - + [default: 20000] - --rpc-gas-cap + --rpc.gascap Maximum gas limit for `eth_call` and call tracing RPC methods - + [default: 50000000] RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache - + [default: 5000] --rpc-cache.max-receipts Max number receipts in cache - + [default: 2000] --rpc-cache.max-envs Max number of bytes for cached env data - + [default: 1000] --rpc-cache.max-concurrent-db-requests Max number of concurrent database requests - + [default: 512] Gas Price Oracle: --gpo.blocks Number of recent blocks to check for gas price - + [default: 20] --gpo.ignoreprice Gas Price below which gpo will ignore transactions - + [default: 2] --gpo.maxprice Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo - + [default: 500000000000] --gpo.percentile The percentile of gas prices to use for the estimate - + [default: 60] TxPool: - --txpool.pending_max_count + --txpool.pending-max-count Max number of transaction in the pending sub-pool - + [default: 10000] - --txpool.pending_max_size + --txpool.pending-max-size Max size of the pending sub-pool in megabytes - + [default: 20] - --txpool.basefee_max_count + --txpool.basefee-max-count Max number of transaction in the basefee sub-pool - + [default: 10000] - --txpool.basefee_max_size + --txpool.basefee-max-size Max size of the basefee sub-pool in megabytes - + [default: 20] - --txpool.queued_max_count + --txpool.queued-max-count Max number of transaction in the queued sub-pool - + [default: 10000] - --txpool.queued_max_size + --txpool.queued-max-size Max size of the queued sub-pool in megabytes - + [default: 20] --txpool.max_account_slots Max number of executable transaction slots guaranteed per account - + [default: 16] --txpool.pricebump Price bump (in %) for the transaction pool underpriced check - + [default: 10] --blobpool.pricebump Price bump percentage to replace an already existing blob transaction - + [default: 100] - --txpool.max_tx_input_bytes + --txpool.max-tx-input-bytes Max size in bytes of a single transaction allowed to enter the pool - + [default: 131072] - --txpool.max_cached_entries + --txpool.max-cached-entries The maximum number of blobs to keep in the in memory blob cache - + [default: 100] --txpool.nolocals @@ -362,33 +386,33 @@ TxPool: Builder: --builder.extradata Block extra data set by the payload builder - - [default: reth//] + + [default: reth/v0.2.0-beta.5/linux] --builder.gaslimit Target gas ceiling for built blocks - + [default: 30000000] --builder.interval The interval at which the job should build a new payload after the last (in seconds) - + [default: 1] --builder.deadline The deadline for when the payload builder job should resolve - + [default: 12] --builder.max-tasks Maximum number of tasks to spawn for building a payload - + [default: 3] Debug: --debug.continuous Prompt the downloader to download blocks one at a time. - + NOTE: This is for testing purposes only. --debug.terminate @@ -396,7 +420,7 @@ Debug: --debug.tip Set the chain tip manually for testing purposes. - + NOTE: This is a temporary flag --debug.max-block @@ -433,13 +457,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Dev testnet: --dev Start the node in dev mode - + This mode uses a local proof-of-authority consensus engine with either fixed block times or automatically mined blocks. Disables network discovery and enables local http server. @@ -451,7 +475,7 @@ Dev testnet: --dev.block-time Interval between blocks. - + Parses strings using [humantime::parse_duration] --dev.block-time 12s @@ -462,7 +486,7 @@ Pruning: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -472,12 +496,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -487,22 +511,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - - [default: /logs] + + [default: /root/.cache/reth/logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -510,12 +534,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -526,7 +550,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info @@ -535,4 +559,4 @@ Display: -q, --quiet Silence all log output -``` \ No newline at end of file +``` diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server_args.rs index e9a21616a540f..b12f2740aa68e 100644 --- a/crates/node-core/src/args/rpc_server_args.rs +++ b/crates/node-core/src/args/rpc_server_args.rs @@ -99,7 +99,7 @@ pub struct RpcServerArgs { #[arg(long = "ws.api", value_parser = RpcModuleSelectionValueParser::default())] pub ws_api: Option, - /// Disable the IPC-RPC server + /// Disable the IPC-RPC server #[arg(long)] pub ipcdisable: bool, @@ -124,6 +124,14 @@ pub struct RpcServerArgs { #[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false)] pub auth_jwtsecret: Option, + /// Enable auth engine API over IPC + #[arg(long)] + pub auth_ipc: bool, + + /// Filename for auth IPC socket/pipe within the datadir + #[arg(long = "auth-ipc.path", default_value_t = constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string())] + pub auth_ipc_path: String, + /// Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and /// `--ws.api`. /// @@ -133,37 +141,37 @@ pub struct RpcServerArgs { pub rpc_jwtsecret: Option, /// Set the maximum RPC request payload size for both HTTP and WS in megabytes. - #[arg(long, default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into())] + #[arg(long = "rpc.max-request-size", alias = "rpc-max-request-size", default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into())] pub rpc_max_request_size: MaxU32, /// Set the maximum RPC response payload size for both HTTP and WS in megabytes. - #[arg(long, visible_alias = "--rpc.returndata.limit", default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into())] + #[arg(long = "rpc.max-response-size", alias = "rpc-max-response-size", visible_alias = "rpc.returndata.limit", default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into())] pub rpc_max_response_size: MaxU32, /// Set the maximum concurrent subscriptions per connection. - #[arg(long, default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN.into())] + #[arg(long = "rpc.max-subscriptions-per-connection", alias = "rpc-max-subscriptions-per-connection", default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN.into())] pub rpc_max_subscriptions_per_connection: MaxU32, /// Maximum number of RPC server connections. - #[arg(long, value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS.into())] + #[arg(long = "rpc.max-connections", alias = "rpc-max-connections", value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS.into())] pub rpc_max_connections: MaxU32, /// Maximum number of concurrent tracing requests. - #[arg(long, value_name = "COUNT", default_value_t = constants::default_max_tracing_requests())] + #[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = constants::default_max_tracing_requests())] pub rpc_max_tracing_requests: usize, /// Maximum number of blocks that could be scanned per filter request. (0 = entire chain) - #[arg(long, value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_BLOCKS_PER_FILTER))] + #[arg(long = "rpc.max-blocks-per-filter", alias = "rpc-max-blocks-per-filter", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_BLOCKS_PER_FILTER))] pub rpc_max_blocks_per_filter: ZeroAsNoneU64, /// Maximum number of logs that can be returned in a single response. (0 = no limit) - #[arg(long, value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64))] + #[arg(long = "rpc.max-logs-per-response", alias = "rpc-max-logs-per-response", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64))] pub rpc_max_logs_per_response: ZeroAsNoneU64, /// Maximum gas limit for `eth_call` and call tracing RPC methods. #[arg( - long, - alias = "rpc.gascap", + long = "rpc.gascap", + alias = "rpc-gascap", value_name = "GAS_CAP", value_parser = RangedU64ValueParser::::new().range(1..), default_value_t = RPC_DEFAULT_GAS_CAP.into() @@ -177,14 +185,6 @@ pub struct RpcServerArgs { /// Gas price oracle configuration. #[command(flatten)] pub gas_price_oracle: GasPriceOracleArgs, - - /// Enable auth engine api over IPC - #[arg(long)] - pub auth_ipc: bool, - - /// Filename for auth IPC socket/pipe within the datadir - #[arg(long = "auth-ipc.path", default_value_t = constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string())] - pub auth_ipc_path: String, } impl RpcServerArgs { diff --git a/crates/node-core/src/args/txpool_args.rs b/crates/node-core/src/args/txpool_args.rs index ea1bf67b80ef0..db9e43d82bd59 100644 --- a/crates/node-core/src/args/txpool_args.rs +++ b/crates/node-core/src/args/txpool_args.rs @@ -14,28 +14,28 @@ use reth_transaction_pool::{ #[command(next_help_heading = "TxPool")] pub struct TxPoolArgs { /// Max number of transaction in the pending sub-pool. - #[arg(long = "txpool.pending_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.pending-max-count", alias = "txpool.pending_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] pub pending_max_count: usize, /// Max size of the pending sub-pool in megabytes. - #[arg(long = "txpool.pending_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.pending-max-size", alias = "txpool.pending_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] pub pending_max_size: usize, /// Max number of transaction in the basefee sub-pool - #[arg(long = "txpool.basefee_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.basefee-max-count", alias = "txpool.basefee_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] pub basefee_max_count: usize, /// Max size of the basefee sub-pool in megabytes. - #[arg(long = "txpool.basefee_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.basefee-max-size", alias = "txpool.basefee_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] pub basefee_max_size: usize, /// Max number of transaction in the queued sub-pool - #[arg(long = "txpool.queued_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.queued-max-count", alias = "txpool.queued_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] pub queued_max_count: usize, /// Max size of the queued sub-pool in megabytes. - #[arg(long = "txpool.queued_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.queued-max-size", alias = "txpool.queued_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] pub queued_max_size: usize, /// Max number of executable transaction slots guaranteed per account - #[arg(long = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] + #[arg(long = "txpool.max-account-slots", long = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] pub max_account_slots: usize, /// Price bump (in %) for the transaction pool underpriced check. @@ -47,11 +47,11 @@ pub struct TxPoolArgs { pub blob_transaction_price_bump: u128, /// Max size in bytes of a single transaction allowed to enter the pool - #[arg(long = "txpool.max_tx_input_bytes", default_value_t = DEFAULT_MAX_TX_INPUT_BYTES)] + #[arg(long = "txpool.max-tx-input-bytes", alias = "txpool.max_tx_input_bytes", default_value_t = DEFAULT_MAX_TX_INPUT_BYTES)] pub max_tx_input_bytes: usize, /// The maximum number of blobs to keep in the in memory blob cache. - #[arg(long = "txpool.max_cached_entries", default_value_t = DEFAULT_MAX_CACHED_BLOBS)] + #[arg(long = "txpool.max-cached-entries", alias = "txpool.max_cached_entries", default_value_t = DEFAULT_MAX_CACHED_BLOBS)] pub max_cached_entries: u32, /// Flag to disable local transaction exemptions. From bb67c40d56870c9e5311ff78805c2224612c3c08 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 16 Apr 2024 15:50:12 -0400 Subject: [PATCH 188/700] chore: remove DOS line endings from basefee.rs (#7685) --- crates/primitives/src/basefee.rs | 222 +++++++++++++++---------------- 1 file changed, 111 insertions(+), 111 deletions(-) diff --git a/crates/primitives/src/basefee.rs b/crates/primitives/src/basefee.rs index fb5ca7571f546..b886b41e92990 100644 --- a/crates/primitives/src/basefee.rs +++ b/crates/primitives/src/basefee.rs @@ -1,111 +1,111 @@ -//! Helpers for working with EIP-1559 base fee - -// re-export -#[doc(inline)] -pub use alloy_eips::eip1559::calc_next_block_base_fee; - -#[cfg(test)] -mod tests { - use super::*; - - #[cfg(feature = "optimism")] - use crate::chain::{OP_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS}; - - #[test] - fn calculate_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1125000000, 1083333333, 1053571428, 1179939062, 1116028649, 918084097, 1063811730, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - crate::BaseFeeParams::ethereum(), - ) as u64 - ); - } - } - - #[cfg(feature = "optimism")] - #[test] - fn calculate_optimism_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1100000048, 1080000000, 1065714297, 1167067046, 1128881311, 1028254188, 1098203452, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - OP_BASE_FEE_PARAMS, - ) as u64 - ); - } - } - - #[cfg(feature = "optimism")] - #[test] - fn calculate_optimism_sepolia_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1180000000, 1146666666, 1122857142, 1244299375, 1189416692, 1028254188, 1144836295, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - OP_SEPOLIA_BASE_FEE_PARAMS, - ) as u64 - ); - } - } -} +//! Helpers for working with EIP-1559 base fee + +// re-export +#[doc(inline)] +pub use alloy_eips::eip1559::calc_next_block_base_fee; + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(feature = "optimism")] + use crate::chain::{OP_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS}; + + #[test] + fn calculate_base_fee_success() { + let base_fee = [ + 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, + 1, 2, + ]; + let gas_used = [ + 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, + 10000000, + ]; + let gas_limit = [ + 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, + 18000000, 18000000, + ]; + let next_base_fee = [ + 1125000000, 1083333333, 1053571428, 1179939062, 1116028649, 918084097, 1063811730, 1, + 2, 3, + ]; + + for i in 0..base_fee.len() { + assert_eq!( + next_base_fee[i], + calc_next_block_base_fee( + gas_used[i] as u128, + gas_limit[i] as u128, + base_fee[i] as u128, + crate::BaseFeeParams::ethereum(), + ) as u64 + ); + } + } + + #[cfg(feature = "optimism")] + #[test] + fn calculate_optimism_base_fee_success() { + let base_fee = [ + 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, + 1, 2, + ]; + let gas_used = [ + 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, + 10000000, + ]; + let gas_limit = [ + 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, + 18000000, 18000000, + ]; + let next_base_fee = [ + 1100000048, 1080000000, 1065714297, 1167067046, 1128881311, 1028254188, 1098203452, 1, + 2, 3, + ]; + + for i in 0..base_fee.len() { + assert_eq!( + next_base_fee[i], + calc_next_block_base_fee( + gas_used[i] as u128, + gas_limit[i] as u128, + base_fee[i] as u128, + OP_BASE_FEE_PARAMS, + ) as u64 + ); + } + } + + #[cfg(feature = "optimism")] + #[test] + fn calculate_optimism_sepolia_base_fee_success() { + let base_fee = [ + 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, + 1, 2, + ]; + let gas_used = [ + 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, + 10000000, + ]; + let gas_limit = [ + 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, + 18000000, 18000000, + ]; + let next_base_fee = [ + 1180000000, 1146666666, 1122857142, 1244299375, 1189416692, 1028254188, 1144836295, 1, + 2, 3, + ]; + + for i in 0..base_fee.len() { + assert_eq!( + next_base_fee[i], + calc_next_block_base_fee( + gas_used[i] as u128, + gas_limit[i] as u128, + base_fee[i] as u128, + OP_SEPOLIA_BASE_FEE_PARAMS, + ) as u64 + ); + } + } +} From d39a8d74041c690b254c0de38e892e551b32ea1a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 16 Apr 2024 22:45:01 +0200 Subject: [PATCH 189/700] feat: add native compact impl for alloy TxKind (#7686) --- crates/storage/codecs/src/alloy/log.rs | 2 ++ crates/storage/codecs/src/alloy/mod.rs | 1 + crates/storage/codecs/src/alloy/txkind.rs | 31 +++++++++++++++++++++++ 3 files changed, 34 insertions(+) create mode 100644 crates/storage/codecs/src/alloy/txkind.rs diff --git a/crates/storage/codecs/src/alloy/log.rs b/crates/storage/codecs/src/alloy/log.rs index 762a1eae0234a..a374b3680c351 100644 --- a/crates/storage/codecs/src/alloy/log.rs +++ b/crates/storage/codecs/src/alloy/log.rs @@ -1,3 +1,5 @@ +//! Native Compact codec impl for primitive alloy log types. + use crate::Compact; use alloy_primitives::{Address, Bytes, Log, LogData}; use bytes::BufMut; diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 5785546c319e6..7d7a794fe6708 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,2 +1,3 @@ mod access_list; mod log; +mod txkind; diff --git a/crates/storage/codecs/src/alloy/txkind.rs b/crates/storage/codecs/src/alloy/txkind.rs new file mode 100644 index 0000000000000..220384bdde730 --- /dev/null +++ b/crates/storage/codecs/src/alloy/txkind.rs @@ -0,0 +1,31 @@ +//! Native Compact codec impl for primitive alloy [TxKind]. + +use crate::Compact; +use alloy_primitives::{Address, TxKind}; + +impl Compact for TxKind { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + match self { + TxKind::Create => 0, + TxKind::Call(address) => { + address.to_compact(buf); + 1 + } + } + } + fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) { + match identifier { + 0 => (TxKind::Create, buf), + 1 => { + let (addr, buf) = Address::from_compact(buf, buf.len()); + (TxKind::Call(addr), buf) + } + _ => { + unreachable!("Junk data in database: unknown TransactionKind variant",) + } + } + } +} From 86d8f0b0ec3c3caf7d7c84d68a6f546b0a679893 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 16 Apr 2024 23:05:08 +0200 Subject: [PATCH 190/700] fix: add enable missing arbitrary feature (#7688) --- crates/primitives/Cargo.toml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index f2e7ba78916e0..daddf3e9b337d 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -64,14 +64,17 @@ proptest-derive = { workspace = true, optional = true } strum = { workspace = true, features = ["derive"] } [dev-dependencies] +# eth +revm-primitives = { workspace = true, features = ["arbitrary"] } +nybbles = { workspace = true, features = ["arbitrary"] } +alloy-trie = { workspace = true, features = ["arbitrary"] } +alloy-eips = { workspace = true, features = ["arbitrary"] } + arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true proptest-derive.workspace = true rand.workspace = true -revm-primitives = { workspace = true, features = ["arbitrary"] } -nybbles = { workspace = true, features = ["arbitrary"] } -alloy-trie = { workspace = true, features = ["arbitrary"] } serde_json.workspace = true test-fuzz.workspace = true toml.workspace = true @@ -99,6 +102,7 @@ arbitrary = [ "nybbles/arbitrary", "alloy-trie/arbitrary", "alloy-chains/arbitrary", + "alloy-eips/arbitrary", "dep:arbitrary", "dep:proptest", "dep:proptest-derive", From b386a8d5049b7d47a9d2871f82aad0dbfdf2da7e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 17 Apr 2024 00:13:35 +0200 Subject: [PATCH 191/700] fix(discv5): remove todo (#7684) --- crates/net/network/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index c5a1e12188311..f9c9212d97ec2 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -125,7 +125,7 @@ impl NetworkConfig { let boot_nodes = self.boot_nodes.clone(); let mut builder = - reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); // todo: store discv5 peers in separate file + reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); if chain.is_optimism() { builder = builder.fork(OPSTACK, fork_id) From 24225d0a8830ff9034db6a31c948b5a5824d5875 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 17 Apr 2024 08:23:14 +0200 Subject: [PATCH 192/700] chore(engine): dedup consistent forkchoice state check (#7677) --- crates/consensus/beacon/src/engine/mod.rs | 77 ++++++----------------- 1 file changed, 18 insertions(+), 59 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 3f4bd2ed1df16..bea6159e7d643 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -416,29 +416,26 @@ where elapsed, )); } + }; + + // Validate that the forkchoice state is consistent. + if let Some(invalid_fcu_response) = + self.ensure_consistent_forkchoice_state(state)? + { + trace!(target: "consensus::engine", ?state, "Forkchoice state is inconsistent, returning invalid response"); + return Ok(invalid_fcu_response) } if let Some(attrs) = attrs { - // if we return early then we wouldn't perform these consistency checks, so we - // need to do them here, and should do them before we process any payload - // attributes - if let Some(invalid_fcu_response) = self.ensure_consistent_state(state)? { - trace!(target: "consensus::engine", ?state, head=?state.head_block_hash, "Forkchoice state is inconsistent, returning invalid response"); - return Ok(invalid_fcu_response) - } - // the CL requested to build a new payload on top of this new VALID head - let payload_response = self.process_payload_attributes( - attrs, - outcome.into_header().unseal(), - state, - ); - - trace!(target: "consensus::engine", status = ?payload_response, ?state, "Returning forkchoice status"); - return Ok(payload_response) + let head = outcome.into_header().unseal(); + self.process_payload_attributes(attrs, head, state) + } else { + OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + )) } - - PayloadStatus::new(PayloadStatusEnum::Valid, Some(state.head_block_hash)) } Err(err) => { if err.is_fatal() { @@ -446,19 +443,12 @@ where return Err(err.into()) } - self.on_failed_canonical_forkchoice_update(&state, err) + OnForkChoiceUpdated::valid(self.on_failed_canonical_forkchoice_update(&state, err)) } }; - if let Some(invalid_fcu_response) = - self.ensure_consistent_state_with_status(state, &status)? - { - trace!(target: "consensus::engine", ?status, ?state, "Forkchoice state is inconsistent, returning invalid response"); - return Ok(invalid_fcu_response) - } - trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); - Ok(OnForkChoiceUpdated::valid(status)) + Ok(status) } /// Invoked when head hash references a `VALID` block that is already canonical. @@ -830,37 +820,6 @@ where elapsed } - /// Ensures that the given forkchoice state is consistent, assuming the head block has been - /// made canonical. This takes a status as input, and will only perform consistency checks if - /// the input status is VALID. - /// - /// If the forkchoice state is consistent, this will return Ok(None). Otherwise, this will - /// return an instance of [OnForkChoiceUpdated] that is INVALID. - /// - /// This also updates the safe and finalized blocks in the [CanonChainTracker], if they are - /// consistent with the head block. - fn ensure_consistent_state_with_status( - &mut self, - state: ForkchoiceState, - status: &PayloadStatus, - ) -> RethResult> { - // We only perform consistency checks if the status is VALID because if the status is - // INVALID, we want to return the correct _type_ of error to the CL so we can properly - // describe the reason it is invalid. For example, it's possible that the status is invalid - // because the safe block has an invalid state root. In that case, we want to preserve the - // correct `latestValidHash`, instead of returning a generic "invalid state" error that - // does not contain a `latestValidHash`. - // - // We also should not perform these checks if the status is SYNCING, because in that case - // we likely do not have the finalized or safe blocks, and would return an incorrect - // INVALID status instead. - if status.is_valid() { - return self.ensure_consistent_state(state) - } - - Ok(None) - } - /// Ensures that the given forkchoice state is consistent, assuming the head block has been /// made canonical. /// @@ -869,7 +828,7 @@ where /// /// This also updates the safe and finalized blocks in the [CanonChainTracker], if they are /// consistent with the head block. - fn ensure_consistent_state( + fn ensure_consistent_forkchoice_state( &mut self, state: ForkchoiceState, ) -> RethResult> { From 70db3d827677a1890b6e3146de2fe8d696df40f3 Mon Sep 17 00:00:00 2001 From: Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com> Date: Wed, 17 Apr 2024 08:39:21 +0200 Subject: [PATCH 193/700] storage: fixes behaviour of cursor delete_current on start item (#7646) Co-authored-by: Roman Krasiuk --- crates/storage/db/src/abstraction/cursor.rs | 4 ++ .../storage/db/src/implementation/mdbx/mod.rs | 40 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/crates/storage/db/src/abstraction/cursor.rs b/crates/storage/db/src/abstraction/cursor.rs index 306b010f53bfc..eb7a209da4073 100644 --- a/crates/storage/db/src/abstraction/cursor.rs +++ b/crates/storage/db/src/abstraction/cursor.rs @@ -181,6 +181,7 @@ impl<'cursor, T: Table, CURSOR: DbCursorRO> Walker<'cursor, T, CURSOR> { impl<'cursor, T: Table, CURSOR: DbCursorRW + DbCursorRO> Walker<'cursor, T, CURSOR> { /// Delete current item that walker points to. pub fn delete_current(&mut self) -> Result<(), DatabaseError> { + self.start.take(); self.cursor.delete_current() } } @@ -223,6 +224,7 @@ impl<'cursor, T: Table, CURSOR: DbCursorRO> ReverseWalker<'cursor, T, CURSOR> impl<'cursor, T: Table, CURSOR: DbCursorRW + DbCursorRO> ReverseWalker<'cursor, T, CURSOR> { /// Delete current item that walker points to. pub fn delete_current(&mut self) -> Result<(), DatabaseError> { + self.start.take(); self.cursor.delete_current() } } @@ -321,6 +323,7 @@ impl<'cursor, T: Table, CURSOR: DbCursorRO> RangeWalker<'cursor, T, CURSOR> { impl<'cursor, T: Table, CURSOR: DbCursorRW + DbCursorRO> RangeWalker<'cursor, T, CURSOR> { /// Delete current item that walker points to. pub fn delete_current(&mut self) -> Result<(), DatabaseError> { + self.start.take(); self.cursor.delete_current() } } @@ -353,6 +356,7 @@ where impl<'cursor, T: DupSort, CURSOR: DbCursorRW + DbDupCursorRO> DupWalker<'cursor, T, CURSOR> { /// Delete current item that walker points to. pub fn delete_current(&mut self) -> Result<(), DatabaseError> { + self.start.take(); self.cursor.delete_current() } } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 244c4d5b0caee..53594f671a47d 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -482,6 +482,7 @@ mod tests { const ERROR_APPEND: &str = "Not able to append the value to the table."; const ERROR_UPSERT: &str = "Not able to upsert the value to the table."; const ERROR_GET: &str = "Not able to get value from table."; + const ERROR_DEL: &str = "Not able to delete from table."; const ERROR_COMMIT: &str = "Not able to commit transaction."; const ERROR_RETURN_VALUE: &str = "Mismatching result."; const ERROR_INIT_TX: &str = "Failed to create a MDBX transaction."; @@ -511,6 +512,45 @@ mod tests { tx.commit().expect(ERROR_COMMIT); } + #[test] + fn db_dup_cursor_delete_first() { + let db: Arc = create_test_db(DatabaseEnvKind::RW); + let tx = db.tx_mut().expect(ERROR_INIT_TX); + + let mut dup_cursor = tx.cursor_dup_write::().unwrap(); + + let entry_0 = StorageEntry { key: B256::with_last_byte(1), value: U256::from(0) }; + let entry_1 = StorageEntry { key: B256::with_last_byte(1), value: U256::from(1) }; + + dup_cursor.upsert(Address::with_last_byte(1), entry_0).expect(ERROR_UPSERT); + dup_cursor.upsert(Address::with_last_byte(1), entry_1).expect(ERROR_UPSERT); + + assert_eq!( + dup_cursor.walk(None).unwrap().collect::, _>>(), + Ok(vec![(Address::with_last_byte(1), entry_0), (Address::with_last_byte(1), entry_1),]) + ); + + let mut walker = dup_cursor.walk(None).unwrap(); + walker.delete_current().expect(ERROR_DEL); + + assert_eq!(walker.next(), Some(Ok((Address::with_last_byte(1), entry_1)))); + + // Check the tx view - it correctly holds entry_1 + assert_eq!( + tx.cursor_dup_read::() + .unwrap() + .walk(None) + .unwrap() + .collect::, _>>(), + Ok(vec![ + (Address::with_last_byte(1), entry_1), // This is ok - we removed entry_0 + ]) + ); + + // Check the remainder of walker + assert_eq!(walker.next(), None); + } + #[test] fn db_cursor_walk() { let env = create_test_db(DatabaseEnvKind::RW); From f4386c9cd5e03f763026b0b2f82588c780aa63a6 Mon Sep 17 00:00:00 2001 From: Kyrylo Riabov Date: Wed, 17 Apr 2024 10:25:01 +0300 Subject: [PATCH 194/700] fix(docker): Add support for RUSTFLAGS and feature flags in Dockerfile (#7658) --- Dockerfile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 5dac92bb9db7e..f1cc4d804ee95 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,6 +16,10 @@ COPY --from=planner /app/recipe.json recipe.json ARG BUILD_PROFILE=release ENV BUILD_PROFILE $BUILD_PROFILE +# Extra Cargo flags +ARG RUSTFLAGS="" +ENV RUSTFLAGS "$RUSTFLAGS" + # Extra Cargo features ARG FEATURES="" ENV FEATURES $FEATURES @@ -24,7 +28,7 @@ ENV FEATURES $FEATURES RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config # Builds dependencies -RUN cargo chef cook --profile $BUILD_PROFILE --recipe-path recipe.json +RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json # Build application COPY . . From 4c4aaafff472312b216e3fa0a2b61e5e6fc37bee Mon Sep 17 00:00:00 2001 From: int88 <106391185+int88@users.noreply.github.com> Date: Wed, 17 Apr 2024 17:44:30 +0800 Subject: [PATCH 195/700] test: add `test_requests_timeout()` for disv4 (#7693) --- crates/net/discv4/src/lib.rs | 65 ++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index bfd64d6236090..48e25c163826a 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -2560,6 +2560,71 @@ mod tests { let _ = discv4.lookup_self().await; } + #[tokio::test] + async fn test_requests_timeout() { + reth_tracing::init_test_tracing(); + let fork_id = ForkId { hash: ForkHash(hex!("743f3d89")), next: 16191202 }; + + let config = Discv4Config::builder() + .request_timeout(Duration::from_millis(200)) + .ping_expiration(Duration::from_millis(200)) + .add_eip868_pair("eth", fork_id) + .build(); + let (_disv4, mut service) = create_discv4_with_config(config).await; + + let id = PeerId::random(); + let key = kad_key(id); + let record = NodeRecord::new("0.0.0.0:0".parse().unwrap(), id); + + let _ = service.kbuckets.insert_or_update( + &key, + NodeEntry::new_proven(record), + NodeStatus { + direction: ConnectionDirection::Incoming, + state: ConnectionState::Connected, + }, + ); + + service.lookup_self(); + assert_eq!(service.pending_find_nodes.len(), 1); + + let ctx = service.pending_find_nodes.values().next().unwrap().lookup_context.clone(); + + service.pending_lookup.insert(record.id, (Instant::now(), ctx)); + + assert_eq!(service.pending_lookup.len(), 1); + + let ping = Ping { + from: service.local_node_record.into(), + to: record.into(), + expire: service.ping_expiration(), + enr_sq: service.enr_seq(), + }; + let echo_hash = service.send_packet(Message::Ping(ping), record.udp_addr()); + let ping_request = PingRequest { + sent_at: Instant::now(), + node: record, + echo_hash, + reason: PingReason::InitialInsert, + }; + service.pending_pings.insert(record.id, ping_request); + + assert_eq!(service.pending_pings.len(), 1); + + tokio::time::sleep(Duration::from_secs(1)).await; + + poll_fn(|cx| { + let _ = service.poll(cx); + + assert_eq!(service.pending_find_nodes.len(), 0); + assert_eq!(service.pending_lookup.len(), 0); + assert_eq!(service.pending_pings.len(), 0); + + Poll::Ready(()) + }) + .await; + } + // sends a PING packet with wrong 'to' field and expects a PONG response. #[tokio::test(flavor = "multi_thread")] async fn test_check_wrong_to() { From 4911febe6f1f2bd7abc5768c4950b305f5127c1e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 17 Apr 2024 12:56:06 +0200 Subject: [PATCH 196/700] feat: add Prague hardfork variant (#7694) --- crates/ethereum-forks/src/hardfork.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/ethereum-forks/src/hardfork.rs b/crates/ethereum-forks/src/hardfork.rs index b098f5fca5805..6ccb306973cb5 100644 --- a/crates/ethereum-forks/src/hardfork.rs +++ b/crates/ethereum-forks/src/hardfork.rs @@ -69,6 +69,10 @@ pub enum Hardfork { #[cfg(feature = "optimism")] Ecotone, // ArbOS20Atlas, + + // Upcoming + /// Prague: + Prague, } impl Hardfork { @@ -551,6 +555,7 @@ impl FromStr for Hardfork { "canyon" => Hardfork::Canyon, #[cfg(feature = "optimism")] "ecotone" => Hardfork::Ecotone, + "prague" => Hardfork::Prague, // "arbos11" => Hardfork::ArbOS11, // "arbos20atlas" => Hardfork::ArbOS20Atlas, _ => return Err(format!("Unknown hardfork: {s}")), @@ -588,6 +593,7 @@ mod tests { "PARIS", "ShAnGhAI", "CaNcUn", + "PrAguE", ]; let expected_hardforks = [ Hardfork::Frontier, @@ -607,6 +613,7 @@ mod tests { Hardfork::Paris, Hardfork::Shanghai, Hardfork::Cancun, + Hardfork::Prague, ]; let hardforks: Vec = From dc39fd68f4f2c2b57645b1edaefa1b15cc0794a5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 17 Apr 2024 15:21:25 +0200 Subject: [PATCH 197/700] fix: check for genesis block on pool validator init (#7699) --- crates/node-optimism/src/txpool.rs | 14 +++++++++++--- crates/revm/src/optimism/mod.rs | 2 ++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/crates/node-optimism/src/txpool.rs b/crates/node-optimism/src/txpool.rs index c3fc8a8de490a..417b65f628dfa 100644 --- a/crates/node-optimism/src/txpool.rs +++ b/crates/node-optimism/src/txpool.rs @@ -52,7 +52,14 @@ where if let Ok(Some(block)) = this.inner.client().block_by_number_or_tag(reth_primitives::BlockNumberOrTag::Latest) { - this.update_l1_block_info(&block); + // genesis block has no txs, so we can't extract L1 info, we set the block info to empty + // so that we will accept txs into the pool before the first block + if block.number == 0 { + this.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); + *this.block_info.l1_block_info.write() = Some(Default::default()) + } else { + this.update_l1_block_info(&block); + } } this @@ -69,8 +76,9 @@ where /// Update the L1 block info. fn update_l1_block_info(&self, block: &Block) { self.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); - let cost_addition = reth_revm::optimism::extract_l1_info(block).ok(); - *self.block_info.l1_block_info.write() = cost_addition; + if let Ok(cost_addition) = reth_revm::optimism::extract_l1_info(block) { + *self.block_info.l1_block_info.write() = Some(cost_addition); + } } /// Validates a single transaction. diff --git a/crates/revm/src/optimism/mod.rs b/crates/revm/src/optimism/mod.rs index c23d9b38f47b8..470e7a914468e 100644 --- a/crates/revm/src/optimism/mod.rs +++ b/crates/revm/src/optimism/mod.rs @@ -26,6 +26,8 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// Extracts the [L1BlockInfo] from the L2 block. The L1 info transaction is always the first /// transaction in the L2 block. +/// +/// Returns an error if the L1 info transaction is not found, if the block is empty. pub fn extract_l1_info(block: &Block) -> Result { let l1_info_tx_data = block .body From 7cda5945d74a664d6fadc604196d5b93ebea6fe0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 17 Apr 2024 15:25:44 +0200 Subject: [PATCH 198/700] chore(engine): refactor pipeline outcome processing (#7692) --- crates/consensus/beacon/src/engine/mod.rs | 408 ++++++++++------------ 1 file changed, 191 insertions(+), 217 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index bea6159e7d643..91197205a43fc 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -33,7 +33,7 @@ use reth_provider::{ use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; -use reth_stages::{ControlFlow, Pipeline, PipelineError}; +use reth_stages::{ControlFlow, Pipeline}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventListeners; use std::{ @@ -784,16 +784,11 @@ where /// Checks if the given `head` points to an invalid header, which requires a specific response /// to a forkchoice update. fn check_invalid_ancestor(&mut self, head: B256) -> Option { - let parent_hash = { - // check if the head was previously marked as invalid - let header = self.invalid_headers.get(&head)?; - header.parent_hash - }; + // check if the head was previously marked as invalid + let header = self.invalid_headers.get(&head)?; // populate the latest valid hash field - let status = self.prepare_invalid_response(parent_hash); - - Some(status) + Some(self.prepare_invalid_response(header.parent_hash)) } /// Record latency metrics for one call to make a block canonical @@ -816,7 +811,6 @@ where } Err(_) => self.metrics.make_canonical_error_latency.record(elapsed), } - elapsed } @@ -1404,17 +1398,15 @@ where if let Err((hash, error)) = self.try_make_sync_target_canonical(downloaded_num_hash) { - if !error.is_block_hash_not_found() { - if error.is_fatal() { - error!(target: "consensus::engine", %error, "Encountered fatal error while making sync target canonical: {:?}, {:?}", error, hash); - } else { - debug!( - target: "consensus::engine", - "Unexpected error while making sync target canonical: {:?}, {:?}", - error, - hash - ) - } + if error.is_fatal() { + error!(target: "consensus::engine", %error, "Encountered fatal error while making sync target canonical: {:?}, {:?}", error, hash); + } else if !error.is_block_hash_not_found() { + debug!( + target: "consensus::engine", + "Unexpected error while making sync target canonical: {:?}, {:?}", + error, + hash + ) } } } @@ -1502,59 +1494,57 @@ where &mut self, inserted: BlockNumHash, ) -> Result<(), (B256, CanonicalError)> { - if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { - // optimistically try to make the head of the current FCU target canonical, the sync - // target might have changed since the block download request was issued - // (new FCU received) - let start = Instant::now(); - let make_canonical_result = self.blockchain.make_canonical(target.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - match make_canonical_result { - Ok(outcome) => { - if let CanonicalOutcome::Committed { head } = &outcome { - self.listeners.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( - Box::new(head.clone()), - elapsed, - )); - } - - let new_head = outcome.into_header(); - debug!(target: "consensus::engine", hash=?new_head.hash(), number=new_head.number, "Canonicalized new head"); + let Some(target) = self.forkchoice_state_tracker.sync_target_state() else { return Ok(()) }; - // we can update the FCU blocks - if let Err(err) = self.update_canon_chain(new_head, &target) { - debug!(target: "consensus::engine", ?err, ?target, "Failed to update the canonical chain tracker"); - } + // optimistically try to make the head of the current FCU target canonical, the sync + // target might have changed since the block download request was issued + // (new FCU received) + let start = Instant::now(); + let make_canonical_result = self.blockchain.make_canonical(target.head_block_hash); + let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); + match make_canonical_result { + Ok(outcome) => { + if let CanonicalOutcome::Committed { head } = &outcome { + self.listeners.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( + Box::new(head.clone()), + elapsed, + )); + } - // we're no longer syncing - self.sync_state_updater.update_sync_state(SyncState::Idle); + let new_head = outcome.into_header(); + debug!(target: "consensus::engine", hash=?new_head.hash(), number=new_head.number, "Canonicalized new head"); - // clear any active block requests - self.sync.clear_block_download_requests(); - Ok(()) + // we can update the FCU blocks + if let Err(err) = self.update_canon_chain(new_head, &target) { + debug!(target: "consensus::engine", ?err, ?target, "Failed to update the canonical chain tracker"); } - Err(err) => { - // if we failed to make the FCU's head canonical, because we don't have that - // block yet, then we can try to make the inserted block canonical if we know - // it's part of the canonical chain: if it's the safe or the finalized block - if err.is_block_hash_not_found() { - // if the inserted block is the currently targeted `finalized` or `safe` - // block, we will attempt to make them canonical, - // because they are also part of the canonical chain and - // their missing block range might already be downloaded (buffered). - if let Some(target_hash) = ForkchoiceStateHash::find(&target, inserted.hash) - .filter(|h| !h.is_head()) - { - // TODO: do not ignore this - let _ = self.blockchain.make_canonical(*target_hash.as_ref()); - } - } - Err((target.head_block_hash, err)) + // we're no longer syncing + self.sync_state_updater.update_sync_state(SyncState::Idle); + + // clear any active block requests + self.sync.clear_block_download_requests(); + Ok(()) + } + Err(err) => { + // if we failed to make the FCU's head canonical, because we don't have that + // block yet, then we can try to make the inserted block canonical if we know + // it's part of the canonical chain: if it's the safe or the finalized block + if err.is_block_hash_not_found() { + // if the inserted block is the currently targeted `finalized` or `safe` + // block, we will attempt to make them canonical, + // because they are also part of the canonical chain and + // their missing block range might already be downloaded (buffered). + if let Some(target_hash) = + ForkchoiceStateHash::find(&target, inserted.hash).filter(|h| !h.is_head()) + { + // TODO: do not ignore this + let _ = self.blockchain.make_canonical(*target_hash.as_ref()); + } } + + Err((target.head_block_hash, err)) } - } else { - Ok(()) } } @@ -1564,168 +1554,144 @@ where fn on_sync_event( &mut self, event: EngineSyncEvent, - ) -> Option> { - match event { + ) -> Result { + let outcome = match event { EngineSyncEvent::FetchedFullBlock(block) => { self.on_downloaded_block(block); + SyncEventOutcome::Processed } EngineSyncEvent::PipelineStarted(target) => { trace!(target: "consensus::engine", ?target, continuous = target.is_none(), "Started the pipeline"); self.metrics.pipeline_runs.increment(1); self.sync_state_updater.update_sync_state(SyncState::Syncing); + SyncEventOutcome::Processed + } + EngineSyncEvent::PipelineFinished { result, reached_max_block } => { + trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); + // Any pipeline error at this point is fatal. + let ctrl = result?; + if reached_max_block { + // Terminate the sync early if it's reached the maximum user-configured block. + SyncEventOutcome::ReachedMaxBlock + } else { + self.on_pipeline_outcome(ctrl)?; + SyncEventOutcome::Processed + } } EngineSyncEvent::PipelineTaskDropped => { error!(target: "consensus::engine", "Failed to receive spawned pipeline"); - return Some(Err(BeaconConsensusEngineError::PipelineChannelClosed)) - } - EngineSyncEvent::PipelineFinished { result, reached_max_block } => { - return self.on_pipeline_finished(result, reached_max_block) + return Err(BeaconConsensusEngineError::PipelineChannelClosed) } }; - None + Ok(outcome) } - /// Invoked when the pipeline has finished. + /// Invoked when the pipeline has successfully finished. /// - /// Returns an Option to indicate whether the engine future should resolve: - /// - /// Returns a result if: - /// - Ok(()) if the pipeline finished successfully - /// - Err(..) if the pipeline failed fatally - /// - /// Returns None if the pipeline finished successfully and engine should continue. - fn on_pipeline_finished( - &mut self, - result: Result, - reached_max_block: bool, - ) -> Option> { - trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); - match result { - Ok(ctrl) => { - if reached_max_block { - // Terminate the sync early if it's reached the maximum user - // configured block. - return Some(Ok(())) - } + /// Updates the internal sync state depending on the pipeline configuration, + /// the outcome of the pipeline run and the last observed forkchoice state. + fn on_pipeline_outcome(&mut self, ctrl: ControlFlow) -> RethResult<()> { + // Pipeline unwound, memorize the invalid block and + // wait for CL for further sync instructions. + if let ControlFlow::Unwind { bad_block, .. } = ctrl { + warn!(target: "consensus::engine", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + // update the `invalid_headers` cache with the new invalid header + self.invalid_headers.insert(*bad_block); + return Ok(()) + } - if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "consensus::engine", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + // update the canon chain if continuous is enabled + if self.sync.run_pipeline_continuously() { + let max_block = ctrl.block_number().unwrap_or_default(); + let max_header = self.blockchain.sealed_header(max_block) + .inspect_err(|error| { + error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); + })? + .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; + self.blockchain.set_canonical_head(max_header); + } - // update the `invalid_headers` cache with the new invalid headers - self.invalid_headers.insert(*bad_block); - return None - } + let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { + Some(current_state) => current_state, + None => { + // This is only possible if the node was run with `debug.tip` + // argument and without CL. + warn!(target: "consensus::engine", "No fork choice state available"); + return Ok(()) + } + }; - // update the canon chain if continuous is enabled - if self.sync.run_pipeline_continuously() { - let max_block = ctrl.block_number().unwrap_or_default(); - let max_header = match self.blockchain.sealed_header(max_block) { - Ok(header) => match header { - Some(header) => header, - None => { - return Some(Err(RethError::Provider( - ProviderError::HeaderNotFound(max_block.into()), - ) - .into())) - } - }, - Err(error) => { - error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); - return Some(Err(RethError::Provider(error).into())) - } - }; - self.blockchain.set_canonical_head(max_header); - } + // Next, we check if we need to schedule another pipeline run or transition + // to live sync via tree. + // This can arise if we buffer the forkchoice head, and if the head is an + // ancestor of an invalid block. + // + // * The forkchoice head could be buffered if it were first sent as a `newPayload` request. + // + // In this case, we won't have the head hash in the database, so we would + // set the pipeline sync target to a known-invalid head. + // + // This is why we check the invalid header cache here. + let lowest_buffered_ancestor = + self.lowest_buffered_ancestor_or(sync_target_state.head_block_hash); + + // this inserts the head into invalid headers cache + // if the lowest buffered ancestor is invalid + if self + .check_invalid_ancestor_with_head( + lowest_buffered_ancestor, + sync_target_state.head_block_hash, + ) + .is_some() + { + warn!( + target: "consensus::engine", + invalid_ancestor = %lowest_buffered_ancestor, + head = %sync_target_state.head_block_hash, + "Current head has an invalid ancestor" + ); + return Ok(()) + } - let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { - Some(current_state) => current_state, - None => { - // This is only possible if the node was run with `debug.tip` - // argument and without CL. - warn!(target: "consensus::engine", "No fork choice state available"); - return None - } - }; + // get the block number of the finalized block, if we have it + let newest_finalized = self + .blockchain + .buffered_header_by_hash(sync_target_state.finalized_block_hash) + .map(|header| header.number); - // Next, we check if we need to schedule another pipeline run or transition - // to live sync via tree. - // This can arise if we buffer the forkchoice head, and if the head is an - // ancestor of an invalid block. - // - // * The forkchoice head could be buffered if it were first sent as a `newPayload` - // request. - // - // In this case, we won't have the head hash in the database, so we would - // set the pipeline sync target to a known-invalid head. - // - // This is why we check the invalid header cache here. - let lowest_buffered_ancestor = - self.lowest_buffered_ancestor_or(sync_target_state.head_block_hash); - - // this inserts the head if the lowest buffered ancestor is invalid - if self - .check_invalid_ancestor_with_head( - lowest_buffered_ancestor, - sync_target_state.head_block_hash, - ) - .is_none() - { - // get the block number of the finalized block, if we have it - let newest_finalized = self - .forkchoice_state_tracker - .sync_target_state() - .map(|s| s.finalized_block_hash) - .and_then(|h| self.blockchain.buffered_header_by_hash(h)) - .map(|header| header.number); - - // The block number that the pipeline finished at - if the progress or newest - // finalized is None then we can't check the distance anyways. - // - // If both are Some, we perform another distance check and return the desired - // pipeline target - let pipeline_target = if let (Some(progress), Some(finalized_number)) = - (ctrl.block_number(), newest_finalized) - { - // Determines whether or not we should run the pipeline again, in case the - // new gap is large enough to warrant running the pipeline. - self.can_pipeline_sync_to_finalized(progress, finalized_number, None) - } else { - None - }; + // The block number that the pipeline finished at - if the progress or newest + // finalized is None then we can't check the distance anyways. + // + // If both are Some, we perform another distance check and return the desired + // pipeline target + let pipeline_target = + ctrl.block_number().zip(newest_finalized).and_then(|(progress, finalized_number)| { + // Determines whether or not we should run the pipeline again, in case + // the new gap is large enough to warrant + // running the pipeline. + self.can_pipeline_sync_to_finalized(progress, finalized_number, None) + }); - // If the distance is large enough, we should run the pipeline again to prevent - // the tree update from executing too many blocks and blocking. - if let Some(target) = pipeline_target { - // run the pipeline to the target since the distance is sufficient - self.sync.set_pipeline_sync_target(target); - } else { - // Update the state and hashes of the blockchain tree if possible. - match self.update_tree_on_finished_pipeline( - sync_target_state.finalized_block_hash, - ) { - Ok(synced) => { - if !synced { - // We don't have the finalized block in the database, so - // we need to run another pipeline. - self.sync.set_pipeline_sync_target( - sync_target_state.finalized_block_hash, - ); - } - } - Err(error) => { - error!(target: "consensus::engine", %error, "Error restoring blockchain tree state"); - return Some(Err(error.into())) - } - }; - } - } + // If the distance is large enough, we should run the pipeline again to prevent + // the tree update from executing too many blocks and blocking. + if let Some(target) = pipeline_target { + // run the pipeline to the target since the distance is sufficient + self.sync.set_pipeline_sync_target(target); + } else { + // Update the state and hashes of the blockchain tree if possible. + let synced = self.update_tree_on_finished_pipeline(sync_target_state.finalized_block_hash).inspect_err(|error| { + error!(target: "consensus::engine", %error, "Error restoring blockchain tree state"); + })?; + + if !synced { + // We don't have the finalized block in the database, so + // we need to run another pipeline. + self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash); } - // Any pipeline error at this point is fatal. - Err(error) => return Some(Err(error.into())), - }; + } - None + Ok(()) } fn on_hook_result(&self, polled_hook: PolledHook) -> Result<(), BeaconConsensusEngineError> { @@ -1847,18 +1813,17 @@ where } // process sync events if any - match this.sync.poll(cx) { - Poll::Ready(sync_event) => { - if let Some(res) = this.on_sync_event(sync_event) { - return Poll::Ready(res) - } - // this could have taken a while, so we start the next cycle to handle any new - // engine messages - continue 'main - } - Poll::Pending => { - // no more sync events to process + if let Poll::Ready(sync_event) = this.sync.poll(cx) { + match this.on_sync_event(sync_event)? { + // Sync event was successfully processed + SyncEventOutcome::Processed => (), + // Max block has been reached, exit the engine loop + SyncEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), } + + // this could have taken a while, so we start the next cycle to handle any new + // engine messages + continue 'main } // at this point, all engine messages and sync events are fully drained @@ -1898,6 +1863,15 @@ enum OnForkchoiceUpdateOutcome { Fatal(BlockExecutionError), } +/// Represents outcomes of processing a sync event +#[derive(Debug)] +enum SyncEventOutcome { + /// Sync event was processed successfully, engine should continue. + Processed, + /// Sync event was processed successfully and reached max block. + ReachedMaxBlock, +} + #[cfg(test)] mod tests { use super::*; @@ -1911,7 +1885,7 @@ mod tests { use reth_provider::{BlockWriter, ProviderFactory}; use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use reth_rpc_types_compat::engine::payload::try_block_to_payload_v1; - use reth_stages::{ExecOutput, StageError}; + use reth_stages::{ExecOutput, PipelineError, StageError}; use std::{collections::VecDeque, sync::Arc}; use tokio::sync::oneshot::error::TryRecvError; From 9286cc6c8401fd28a5631aaa371287b60456e95c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 17 Apr 2024 16:34:14 +0200 Subject: [PATCH 199/700] chore: bump alloy (#7701) --- Cargo.lock | 36 ++++++++++++++-------------- Cargo.toml | 26 ++++++++++---------- crates/rpc/rpc/src/eth/revm_utils.rs | 2 +- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62e25f7dace80..656071711557b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -178,7 +178,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -196,7 +196,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-primitives", "alloy-serde", @@ -218,7 +218,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-primitives", "serde", @@ -230,7 +230,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -246,7 +246,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -288,7 +288,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -338,7 +338,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -358,7 +358,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -380,7 +380,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-primitives", "alloy-serde", @@ -390,7 +390,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -420,7 +420,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-primitives", "serde", @@ -430,7 +430,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-primitives", "async-trait", @@ -443,7 +443,7 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-consensus", "alloy-network", @@ -518,7 +518,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -536,7 +536,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=a32e6f7#a32e6f7f84cc1f2a1b244cc79a20057b0a3d4cba" +source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -7560,7 +7560,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=67f9968#67f9968fe56e5968ada322d084a98dd6a405ccdb" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=735f67c#735f67cd450fe952625eb777b86d0e48df3ef28c" dependencies = [ "alloy-primitives", "alloy-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index 6ed1ab9adce9a..2c4e3f020ba02 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -267,7 +267,7 @@ revm = { version = "8.0.0", features = [ revm-primitives = { version = "3.1.0", features = [ "std", ], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "67f9968" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "735f67c" } # eth alloy-chains = "0.1.15" @@ -276,20 +276,20 @@ alloy-dyn-abi = "0.7.0" alloy-sol-types = "0.7.0" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "a32e6f7" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } # misc aquamarine = "0.5" diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 374918d399da0..4b00d4662cb9f 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -217,7 +217,7 @@ pub(crate) fn create_txn_env( request: TransactionRequest, ) -> EthResult { // Ensure that if versioned hashes are set, they're not empty - if request.has_empty_blob_hashes() { + if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into()) } From 3508e565186f95b46b53457b5b72aad5ad7f3db3 Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 17 Apr 2024 22:38:50 +0800 Subject: [PATCH 200/700] chore: convert tab to space and trim trailing space (#7705) Signed-off-by: jsvisa --- crates/net/discv5/src/config.rs | 11 ++++++- crates/primitives/src/net.rs | 51 +++++++++++++++++---------------- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 8f517f422e06f..bcb2df7b24a73 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -271,7 +271,16 @@ mod test { use super::*; const MULTI_ADDRESSES: &str = "/ip4/184.72.129.189/udp/30301/p2p/16Uiu2HAmSG2hdLwyQHQmG4bcJBgD64xnW63WMTLcrNq6KoZREfGb,/ip4/3.231.11.52/udp/30301/p2p/16Uiu2HAmMy4V8bi3XP7KDfSLQcLACSvTLroRRwEsTyFUKo8NCkkp,/ip4/54.198.153.150/udp/30301/p2p/16Uiu2HAmSVsb7MbRf1jg3Dvd6a3n5YNqKQwn1fqHCFgnbqCsFZKe,/ip4/3.220.145.177/udp/30301/p2p/16Uiu2HAm74pBDGdQ84XCZK27GRQbGFFwQ7RsSqsPwcGmCR3Cwn3B,/ip4/3.231.138.188/udp/30301/p2p/16Uiu2HAmMnTiJwgFtSVGV14ZNpwAvS1LUoF4pWWeNtURuV6C3zYB"; - const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &["enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301"]; + const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &[ + "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", + "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", + "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", + "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", + "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", + "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", + "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", + "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301" + ]; #[test] fn parse_boot_nodes() { diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index 48307d1b766e5..c8ff2a3ccf4ae 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -1,6 +1,7 @@ pub use reth_rpc_types::{NodeRecord, NodeRecordParseError}; -// +// Ethereum bootnodes come from +// OP bootnodes come from /// Ethereum Foundation Go Bootnodes pub static MAINNET_BOOTNODES : [&str; 4] = [ @@ -10,16 +11,16 @@ pub static MAINNET_BOOTNODES : [&str; 4] = [ "enode://4aeb4ab6c14b23e2c4cfdce879c04b0748a20d8e9b59e25ded2a08143e265c6c25936e74cbc8e641e3312ca288673d91f2f93f8e277de3cfa444ecdaaf982052@157.90.35.166:30303", // bootnode-hetzner-fsn ]; -/// Ethereum Foundation SEPOLIA BOOTNODES +/// Ethereum Foundation Sepolia Bootnodes pub static SEPOLIA_BOOTNODES : [&str; 5] = [ - "enode://4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b@138.197.51.181:30303", // sepolia-bootnode-1-nyc3 - "enode://143e11fb766781d22d92a2e33f8f104cddae4411a122295ed1fdb6638de96a6ce65f5b7c964ba3763bba27961738fef7d3ecc739268f3e5e771fb4c87b6234ba@146.190.1.103:30303", // sepolia-bootnode-1-sfo3 - "enode://8b61dc2d06c3f96fddcbebb0efb29d60d3598650275dc469c22229d3e5620369b0d3dedafd929835fe7f489618f19f456fe7c0df572bf2d914a9f4e006f783a9@170.64.250.88:30303", // sepolia-bootnode-1-syd1 - "enode://10d62eff032205fcef19497f35ca8477bea0eadfff6d769a147e895d8b2b8f8ae6341630c645c30f5df6e67547c03494ced3d9c5764e8622a26587b083b028e8@139.59.49.206:30303", // sepolia-bootnode-1-blr1 - "enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303", // sepolia-bootnode-1-ams3 + "enode://4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b@138.197.51.181:30303", // sepolia-bootnode-1-nyc3 + "enode://143e11fb766781d22d92a2e33f8f104cddae4411a122295ed1fdb6638de96a6ce65f5b7c964ba3763bba27961738fef7d3ecc739268f3e5e771fb4c87b6234ba@146.190.1.103:30303", // sepolia-bootnode-1-sfo3 + "enode://8b61dc2d06c3f96fddcbebb0efb29d60d3598650275dc469c22229d3e5620369b0d3dedafd929835fe7f489618f19f456fe7c0df572bf2d914a9f4e006f783a9@170.64.250.88:30303", // sepolia-bootnode-1-syd1 + "enode://10d62eff032205fcef19497f35ca8477bea0eadfff6d769a147e895d8b2b8f8ae6341630c645c30f5df6e67547c03494ced3d9c5764e8622a26587b083b028e8@139.59.49.206:30303", // sepolia-bootnode-1-blr1 + "enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303", // sepolia-bootnode-1-ams3 ]; -/// GOERLI bootnodes +/// Görli Bootnodes pub static GOERLI_BOOTNODES : [&str; 7] = [ // Upstream bootnodes "enode://011f758e6552d105183b1761c5e2dea0111bc20fd5f6422bc7f91e0fabbec9a6595caf6239b37feb773dddd3f87240d99d859431891e4a642cf2a0a9e6cbb98a@51.141.78.53:30303", @@ -35,43 +36,43 @@ pub static GOERLI_BOOTNODES : [&str; 7] = [ "enode://d2b720352e8216c9efc470091aa91ddafc53e222b32780f505c817ceef69e01d5b0b0797b69db254c586f493872352f5a022b4d8479a00fc92ec55f9ad46a27e@88.99.70.182:30303", ]; -/// Ethereum Foundation Holesky BOOTNODES +/// Ethereum Foundation Holesky Bootnodes pub static HOLESKY_BOOTNODES : [&str; 2] = [ "enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303", "enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303", ]; #[cfg(feature = "optimism")] -/// OP Mainnet BOOTNODES +/// OP Mainnet Bootnodes pub static OP_BOOTNODES: [&str; 3] = [ - "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", - "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", - "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", + "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", + "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", + "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", ]; #[cfg(feature = "optimism")] -/// OP TESTNET BOOTNODES +/// OP Testnet Bootnodes pub static OP_TESTNET_BOOTNODES: [&str; 3] = [ "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", - "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", - "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", + "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", + "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", ]; #[cfg(feature = "optimism")] -/// Base BOOTNODES +/// Base Mainnet Bootnodes pub static BASE_BOOTNODES: [&str; 5] = [ - "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", - "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", - "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", - "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", + "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", + "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", + "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", + "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301" ]; #[cfg(feature = "optimism")] -/// Base Testnet BOOTNODES +/// Base Testnet Bootnodes pub static BASE_TESTNET_BOOTNODES: [&str; 2] = [ "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", - "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", + "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", ]; /// Returns parsed mainnet nodes @@ -107,13 +108,13 @@ pub fn op_testnet_nodes() -> Vec { } #[cfg(feature = "optimism")] -/// Returns parsed op-stack mainnet nodes +/// Returns parsed op-stack base mainnet nodes pub fn base_nodes() -> Vec { parse_nodes(&BASE_BOOTNODES[..]) } #[cfg(feature = "optimism")] -/// Returns parsed op-stack testnet nodes +/// Returns parsed op-stack base testnet nodes pub fn base_testnet_nodes() -> Vec { parse_nodes(&BASE_TESTNET_BOOTNODES[..]) } From 0a62b2735d2cbe16cf4dabfb1be39ed53ffd0b6f Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 17 Apr 2024 16:40:58 +0200 Subject: [PATCH 201/700] chore(engine): inline `update_tree_on_finished_pipeline` method (#7703) --- crates/consensus/beacon/src/engine/mod.rs | 41 ++++++----------------- 1 file changed, 11 insertions(+), 30 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 91197205a43fc..5f18314d8a76c 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1335,25 +1335,6 @@ where } } - /// Attempt to restore the tree with the given block hash. - /// - /// This is invoked after a full pipeline to update the tree with the most recent canonical - /// hashes. - /// - /// If the given block is missing from the database, this will return `false`. Otherwise, `true` - /// is returned: the database contains the hash and the tree was updated. - fn update_tree_on_finished_pipeline(&mut self, block_hash: B256) -> RethResult { - let synced_to_finalized = match self.blockchain.block_number(block_hash)? { - Some(number) => { - // Attempt to restore the tree. - self.blockchain.connect_buffered_blocks_to_canonical_hashes_and_finalize(number)?; - true - } - None => false, - }; - Ok(synced_to_finalized) - } - /// Invoked if we successfully downloaded a new block from the network. /// /// This will attempt to insert the block into the tree. @@ -1592,8 +1573,7 @@ where /// Updates the internal sync state depending on the pipeline configuration, /// the outcome of the pipeline run and the last observed forkchoice state. fn on_pipeline_outcome(&mut self, ctrl: ControlFlow) -> RethResult<()> { - // Pipeline unwound, memorize the invalid block and - // wait for CL for further sync instructions. + // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. if let ControlFlow::Unwind { bad_block, .. } = ctrl { warn!(target: "consensus::engine", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid header @@ -1678,17 +1658,18 @@ where if let Some(target) = pipeline_target { // run the pipeline to the target since the distance is sufficient self.sync.set_pipeline_sync_target(target); - } else { - // Update the state and hashes of the blockchain tree if possible. - let synced = self.update_tree_on_finished_pipeline(sync_target_state.finalized_block_hash).inspect_err(|error| { + } else if let Some(number) = + self.blockchain.block_number(sync_target_state.finalized_block_hash)? + { + // Finalized block is in the database, attempt to restore the tree with + // the most recent canonical hashes. + self.blockchain.connect_buffered_blocks_to_canonical_hashes_and_finalize(number).inspect_err(|error| { error!(target: "consensus::engine", %error, "Error restoring blockchain tree state"); })?; - - if !synced { - // We don't have the finalized block in the database, so - // we need to run another pipeline. - self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash); - } + } else { + // We don't have the finalized block in the database, so we need to + // trigger another pipeline run. + self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash); } Ok(()) From 4f8d90b104f3eb478ca05722463b7182fe5cdf5b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 17 Apr 2024 16:41:10 +0200 Subject: [PATCH 202/700] chore(engine): refactor sync target determination on failed fcu (#7704) --- crates/consensus/beacon/src/engine/mod.rs | 40 ++++++++++------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 5f18314d8a76c..e5e8c7c162075 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -988,42 +988,38 @@ where } } - // we assume the FCU is valid and at least the head is missing, so we need to start syncing - // to it - let target = if self.forkchoice_state_tracker.is_empty() { - // find the appropriate target to sync to, if we don't have the safe block hash then we - // start syncing to the safe block via pipeline first - let target = if !state.safe_block_hash.is_zero() && - self.blockchain.block_number(state.safe_block_hash).ok().flatten().is_none() - { - state.safe_block_hash - } else { - state.head_block_hash - }; - - // we need to first check the buffer for the head and its ancestors - let lowest_unknown_hash = self.lowest_buffered_ancestor_or(target); - trace!(target: "consensus::engine", request=?lowest_unknown_hash, "Triggering full block download for missing ancestors of the new head"); - lowest_unknown_hash + // we assume the FCU is valid and at least the head is missing, + // so we need to start syncing to it + // + // find the appropriate target to sync to, if we don't have the safe block hash then we + // start syncing to the safe block via pipeline first + let target = if self.forkchoice_state_tracker.is_empty() && + // check that safe block is valid and missing + !state.safe_block_hash.is_zero() && + self.blockchain.block_number(state.safe_block_hash).ok().flatten().is_none() + { + state.safe_block_hash } else { - // we need to first check the buffer for the head and its ancestors - let lowest_unknown_hash = self.lowest_buffered_ancestor_or(state.head_block_hash); - trace!(target: "consensus::engine", request=?lowest_unknown_hash, "Triggering full block download for missing ancestors of the new head"); - lowest_unknown_hash + state.head_block_hash }; + // we need to first check the buffer for the target and its ancestors + let target = self.lowest_buffered_ancestor_or(target); + // if the threshold is zero, we should not download the block first, and just use the // pipeline. Otherwise we use the tree to insert the block first if self.pipeline_run_threshold == 0 { // use the pipeline to sync to the target + trace!(target: "consensus::engine", %target, "Triggering pipeline run to sync missing ancestors of the new head"); self.sync.set_pipeline_sync_target(target); } else { // trigger a full block download for missing hash, or the parent of its lowest buffered // ancestor + trace!(target: "consensus::engine", request=%target, "Triggering full block download for missing ancestors of the new head"); self.sync.download_full_block(target); } - debug!(target: "consensus::engine", ?target, "Syncing to new target"); + debug!(target: "consensus::engine", %target, "Syncing to new target"); PayloadStatus::from_status(PayloadStatusEnum::Syncing) } From db4a4a34e41203920797bbb0ebcea6b5fc6cbb3f Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 17 Apr 2024 15:46:36 +0100 Subject: [PATCH 203/700] fix: add discv5 config to p2p cmd (#7662) --- Cargo.lock | 1 + bin/reth/Cargo.toml | 3 ++ bin/reth/src/commands/p2p/mod.rs | 38 ++++++++++++++++------- crates/node-core/src/args/network_args.rs | 3 +- 4 files changed, 33 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 656071711557b..83455ba8b5e5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6073,6 +6073,7 @@ dependencies = [ "comfy-table", "confy", "crossterm", + "discv5", "eyre", "fdlimit", "futures", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 0ee2ecf0dadca..8d1b0538b7616 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -94,6 +94,9 @@ rayon.workspace = true boyer-moore-magiclen = "0.2.16" ahash = "0.8" +# p2p +discv5.workspace = true + [target.'cfg(unix)'.dependencies] tikv-jemallocator = { version = "0.5.0", optional = true } libc = "0.2" diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index b70c848459535..b67881e64ec65 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -11,13 +11,14 @@ use crate::{ }; use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; +use discv5::ListenConfig; use reth_config::Config; use reth_db::create_db; use reth_discv4::NatResolver; use reth_interfaces::p2p::bodies::client::BodiesClient; use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord}; use reth_provider::ProviderFactory; -use std::{path::PathBuf, sync::Arc}; +use std::{net::SocketAddr, path::PathBuf, sync::Arc}; /// `reth p2p` command #[derive(Debug, Parser)] @@ -122,20 +123,35 @@ impl Command { let secret_key_path = self.p2p_secret_key.clone().unwrap_or(default_secret_key_path); let p2p_secret_key = get_secret_key(&secret_key_path)?; - let mut network_config_builder = - config.network_config(self.nat, None, p2p_secret_key).chain_spec(self.chain.clone()); + let mut network_config_builder = config + .network_config(self.nat, None, p2p_secret_key) + .chain_spec(self.chain.clone()) + .boot_nodes(self.chain.bootnodes().unwrap_or_default()); network_config_builder = self.discovery.apply_to_builder(network_config_builder); - let network = network_config_builder - .build(Arc::new(ProviderFactory::new( - noop_db, - self.chain.clone(), - data_dir.static_files_path(), - )?)) - .start_network() - .await?; + let mut network_config = network_config_builder.build(Arc::new(ProviderFactory::new( + noop_db, + self.chain.clone(), + data_dir.static_files_path(), + )?)); + + if self.discovery.enable_discv5_discovery { + network_config = network_config.discovery_v5_with_config_builder(|builder| { + let DiscoveryArgs { discv5_addr, discv5_port, .. } = self.discovery; + builder + .discv5_config( + discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( + discv5_addr, + discv5_port, + )))) + .build(), + ) + .build() + }); + } + let network = network_config.start_network().await?; let fetch_client = network.fetch_client().await?; let retries = self.retries.max(1); let backoff = ConstantBuilder::default().with_max_times(retries); diff --git a/crates/node-core/src/args/network_args.rs b/crates/node-core/src/args/network_args.rs index 88f448ee99286..59dc6ceba6232 100644 --- a/crates/node-core/src/args/network_args.rs +++ b/crates/node-core/src/args/network_args.rs @@ -252,6 +252,7 @@ impl DiscoveryArgs { } if !self.disable_discovery && (self.enable_discv5_discovery || cfg!(feature = "optimism")) { + network_config_builder = network_config_builder.disable_discv4_discovery(); network_config_builder = network_config_builder.enable_discv5_discovery(); } @@ -271,7 +272,7 @@ impl Default for DiscoveryArgs { Self { disable_discovery: false, disable_dns_discovery: false, - disable_discv4_discovery: false, + disable_discv4_discovery: cfg!(feature = "optimism"), enable_discv5_discovery: cfg!(feature = "optimism"), addr: DEFAULT_DISCOVERY_ADDR, port: DEFAULT_DISCOVERY_PORT, From 21ab76b89cba7fd6c37f91699c092cb8f7c25579 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:46:03 -0400 Subject: [PATCH 204/700] fix: disable read timeout for tx in db list (#7689) Co-authored-by: Roman Krasiuk --- bin/reth/src/commands/db/list.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/reth/src/commands/db/list.rs b/bin/reth/src/commands/db/list.rs index df05638bc9e95..1c1839188d247 100644 --- a/bin/reth/src/commands/db/list.rs +++ b/bin/reth/src/commands/db/list.rs @@ -90,6 +90,8 @@ impl TableViewer<()> for ListTableViewer<'_> { fn view(&self) -> Result<(), Self::Error> { self.tool.provider_factory.db_ref().view(|tx| { + // Disable timeout because we are entering a TUI which might read for a long time + tx.inner.disable_timeout(); let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?; let total_entries = stats.entries(); From 992e08cc7f82e7430d57588bea3bf65207ba55ac Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 17 Apr 2024 18:21:39 +0200 Subject: [PATCH 205/700] fix(docker): obsolete syntax compose file (#7697) --- etc/docker-compose.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index 89a06c8b859ab..8e69dc34ec33c 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -1,4 +1,3 @@ -version: '3.9' name: 'reth' services: From 69370e989e226ad61d2b3b293f5da9fc182faef2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 17 Apr 2024 18:22:02 +0200 Subject: [PATCH 206/700] fix(discv5): fix panel filtered peers (#7696) --- etc/grafana/dashboards/reth-discovery.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/grafana/dashboards/reth-discovery.json b/etc/grafana/dashboards/reth-discovery.json index 62ee81480772d..53d71cd766439 100644 --- a/etc/grafana/dashboards/reth-discovery.json +++ b/etc/grafana/dashboards/reth-discovery.json @@ -766,7 +766,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_custom_filtered{instance=\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_total_established_sessions_custom_filtered{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, From 9557ce05ad7c17195fd52e0bedf3430af53fca28 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 17 Apr 2024 19:52:15 +0200 Subject: [PATCH 207/700] perf(discv5): boost bootstrap lookups (#7695) --- crates/net/discv5/src/config.rs | 2 +- crates/net/discv5/src/lib.rs | 99 ++++++++++++++++++++++---------- crates/net/discv5/src/metrics.rs | 14 ++--- 3 files changed, 76 insertions(+), 39 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index bcb2df7b24a73..809f0fa325075 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -19,7 +19,7 @@ pub const ETH2: &[u8] = b"eth2"; /// Optimism pub const OPSTACK: &[u8] = b"opstack"; -/// Default interval in seconds at which to run a self-lookup up query. +/// Default interval in seconds at which to run a lookup up query. /// /// Default is 60 seconds. const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 7618c511a3dc5..07e6291a79a7c 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -39,7 +39,17 @@ pub use config::{BootNode, Config, ConfigBuilder}; pub use enr::enr_to_discv4_id; pub use error::Error; pub use filter::{FilterOutcome, MustNotIncludeKeys}; -use metrics::Discv5Metrics; +use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; + +/// Default number of times to do pulse lookup queries, at bootstrap (5 second intervals). +/// +/// Default is 200 seconds. +pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; + +/// Default duration of look up interval, for pulse look ups at bootstrap. +/// +/// Default is 5 seconds. +pub const DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL: u64 = 5; /// The max log2 distance, is equivalent to the index of the last bit in a discv5 node id. const MAX_LOG2_DISTANCE: usize = 255; @@ -295,31 +305,46 @@ impl Discv5 { metrics: Discv5Metrics, discv5: Arc, ) { - // initiate regular lookups to populate kbuckets task::spawn({ let local_node_id = discv5.local_enr().node_id(); let lookup_interval = Duration::from_secs(lookup_interval); - let mut metrics = metrics.discovered_peers; + let metrics = metrics.discovered_peers; let mut log2_distance = 0usize; + let pulse_lookup_interval = Duration::from_secs(DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL); // todo: graceful shutdown async move { - loop { - metrics.set_total_sessions(discv5.metrics().active_sessions); - metrics.set_total_kbucket_peers( - discv5.with_kbuckets(|kbuckets| kbuckets.read().iter_ref().count()), + // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest + // log2distance from local node + for i in (0..DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP).rev() { + let target = discv5::enr::NodeId::random(); + + trace!(target: "net::discv5", + %target, + bootstrap_boost_runs_count_down=i, + lookup_interval=format!("{:#?}", pulse_lookup_interval), + "starting bootstrap boost lookup query" ); + lookup(target, &discv5, &metrics).await; + + tokio::time::sleep(pulse_lookup_interval).await; + } + + // initiate regular lookups to populate kbuckets + loop { // make sure node is connected to each subtree in the network by target // selection (ref kademlia) let target = get_lookup_target(log2_distance, local_node_id); trace!(target: "net::discv5", - target=format!("{:#?}", target), + %target, lookup_interval=format!("{:#?}", lookup_interval), "starting periodic lookup query" ); + lookup(target, &discv5, &metrics).await; + if log2_distance < MAX_LOG2_DISTANCE { // try to populate bucket one step further away log2_distance += 1 @@ -327,30 +352,7 @@ impl Discv5 { // start over with self lookup log2_distance = 0 } - match discv5.find_node(target).await { - Err(err) => trace!(target: "net::discv5", - lookup_interval=format!("{:#?}", lookup_interval), - %err, - "periodic lookup query failed" - ), - Ok(peers) => trace!(target: "net::discv5", - target=format!("{:#?}", target), - lookup_interval=format!("{:#?}", lookup_interval), - peers_count=peers.len(), - peers=format!("[{:#}]", peers.iter() - .map(|enr| enr.node_id() - ).format(", ")), - "peers returned by periodic lookup query" - ), - } - // `Discv5::connected_peers` can be subset of sessions, not all peers make it - // into kbuckets, e.g. incoming sessions from peers with - // unreachable enrs - debug!(target: "net::discv5", - connected_peers=discv5.connected_peers(), - "connected peers in routing table" - ); tokio::time::sleep(lookup_interval).await; } } @@ -545,6 +547,41 @@ pub fn get_lookup_target( target.into() } +/// Runs a [`discv5::Discv5`] lookup query. +pub async fn lookup( + target: discv5::enr::NodeId, + discv5: &discv5::Discv5, + metrics: &DiscoveredPeersMetrics, +) { + metrics.set_total_sessions(discv5.metrics().active_sessions); + metrics.set_total_kbucket_peers( + discv5.with_kbuckets(|kbuckets| kbuckets.read().iter_ref().count()), + ); + + match discv5.find_node(target).await { + Err(err) => trace!(target: "net::discv5", + %err, + "lookup query failed" + ), + Ok(peers) => trace!(target: "net::discv5", + target=format!("{:#?}", target), + peers_count=peers.len(), + peers=format!("[{:#}]", peers.iter() + .map(|enr| enr.node_id() + ).format(", ")), + "peers returned by lookup query" + ), + } + + // `Discv5::connected_peers` can be subset of sessions, not all peers make it + // into kbuckets, e.g. incoming sessions from peers with + // unreachable enrs + debug!(target: "net::discv5", + connected_peers=discv5.connected_peers(), + "connected peers in routing table" + ); +} + #[cfg(test)] mod tests { use ::enr::{CombinedKey, EnrKey}; diff --git a/crates/net/discv5/src/metrics.rs b/crates/net/discv5/src/metrics.rs index e38fa0fae17f8..72ea5fc0e0cb5 100644 --- a/crates/net/discv5/src/metrics.rs +++ b/crates/net/discv5/src/metrics.rs @@ -52,34 +52,34 @@ pub struct DiscoveredPeersMetrics { impl DiscoveredPeersMetrics { /// Sets current total number of peers in [`discv5::Discv5`]'s kbuckets. - pub fn set_total_kbucket_peers(&mut self, num: usize) { + pub fn set_total_kbucket_peers(&self, num: usize) { self.total_kbucket_peers_raw.set(num as f64) } /// Increments the number of kbucket insertions in [`discv5::Discv5`]. - pub fn increment_kbucket_insertions(&mut self, num: u64) { + pub fn increment_kbucket_insertions(&self, num: u64) { self.total_inserted_kbucket_peers_raw.increment(num) } /// Sets current total number of peers connected to [`discv5::Discv5`]. - pub fn set_total_sessions(&mut self, num: usize) { + pub fn set_total_sessions(&self, num: usize) { self.total_sessions_raw.set(num as f64) } /// Increments number of sessions established by [`discv5::Discv5`]. - pub fn increment_established_sessions_raw(&mut self, num: u64) { + pub fn increment_established_sessions_raw(&self, num: u64) { self.total_established_sessions_raw.increment(num) } /// Increments number of sessions established by [`discv5::Discv5`], with peers that don't have /// a reachable node record. - pub fn increment_established_sessions_unreachable_enr(&mut self, num: u64) { + pub fn increment_established_sessions_unreachable_enr(&self, num: u64) { self.total_established_sessions_unreachable_enr.increment(num) } /// Increments number of sessions established by [`discv5::Discv5`], that pass configured /// [`filter`](crate::filter) rules. - pub fn increment_established_sessions_filtered(&mut self, num: u64) { + pub fn increment_established_sessions_filtered(&self, num: u64) { self.total_established_sessions_custom_filtered.increment(num) } } @@ -103,7 +103,7 @@ pub struct AdvertisedChainMetrics { impl AdvertisedChainMetrics { /// Counts each recognised network type that is advertised on node record, once. - pub fn increment_once_by_network_type(&mut self, enr: &discv5::Enr) { + pub fn increment_once_by_network_type(&self, enr: &discv5::Enr) { if enr.get_raw_rlp(OPSTACK).is_some() { self.opstack.increment(1u64) } From b846f47fdfa821ceb478af181f63b102633ae7c4 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 18 Apr 2024 08:26:19 +0200 Subject: [PATCH 208/700] feat: `--debug.skip-fcu` (#7709) --- crates/consensus/beacon/src/engine/message.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 3 +- crates/node-builder/src/builder.rs | 12 +++++ crates/node-core/src/args/debug_args.rs | 4 ++ crates/node-core/src/engine_api_store.rs | 2 +- crates/node-core/src/engine_skip_fcu.rs | 51 +++++++++++++++++++ crates/node-core/src/lib.rs | 1 + 7 files changed, 71 insertions(+), 4 deletions(-) create mode 100644 crates/node-core/src/engine_skip_fcu.rs diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 9b4324e5ac46d..464dcedb295b7 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -41,7 +41,7 @@ impl OnForkChoiceUpdated { } /// Creates a new instance of `OnForkChoiceUpdated` for the `SYNCING` state - pub(crate) fn syncing() -> Self { + pub fn syncing() -> Self { let status = PayloadStatus::from_status(PayloadStatusEnum::Syncing); Self { forkchoice_status: ForkchoiceStatus::from_payload_status(&status.status), diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index e5e8c7c162075..e0af48a348dd6 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,7 +1,6 @@ use crate::{ engine::{ forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}, - message::OnForkChoiceUpdated, metrics::EngineMetrics, }, hooks::{EngineHookContext, EngineHooksController}, @@ -51,7 +50,7 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; mod message; -pub use message::BeaconEngineMessage; +pub use message::{BeaconEngineMessage, OnForkChoiceUpdated}; mod error; pub use error::{ diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 6e05e93d05f49..0589b9ce60c24 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -37,6 +37,7 @@ use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethRpcConfig, RethTransactionPoolConfig}, dirs::{ChainPath, DataDirPath, MaybePlatformPath}, engine_api_store::EngineApiStore, + engine_skip_fcu::EngineApiSkipFcu, events::cl::ConsensusLayerHealthEvents, exit::NodeExitFuture, init::init_genesis, @@ -665,6 +666,17 @@ where let network_client = network.fetch_client().await?; let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); + if let Some(skip_fcu_threshold) = config.debug.skip_fcu { + debug!(target: "reth::cli", "spawning skip FCU task"); + let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); + let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); + executor.spawn_critical( + "skip FCU interceptor", + engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), + ); + consensus_engine_rx = skip_fcu_rx; + } + if let Some(store_path) = config.debug.engine_api_store.clone() { debug!(target: "reth::cli", "spawning engine API store"); let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); diff --git a/crates/node-core/src/args/debug_args.rs b/crates/node-core/src/args/debug_args.rs index 6e93dd58ec8f4..916b4a1efa04d 100644 --- a/crates/node-core/src/args/debug_args.rs +++ b/crates/node-core/src/args/debug_args.rs @@ -59,6 +59,10 @@ pub struct DebugArgs { )] pub hook_all: bool, + /// If provided, the engine will skip `n` consecutive FCUs. + #[arg(long = "debug.skip-fcu", help_heading = "Debug")] + pub skip_fcu: Option, + /// The path to store engine API messages at. /// If specified, all of the intercepted engine API messages /// will be written to specified location. diff --git a/crates/node-core/src/engine_api_store.rs b/crates/node-core/src/engine_api_store.rs index 1ff5a0a305ed9..5552137f621a4 100644 --- a/crates/node-core/src/engine_api_store.rs +++ b/crates/node-core/src/engine_api_store.rs @@ -2,7 +2,7 @@ use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; -use reth_primitives::fs::{self}; +use reth_primitives::fs; use reth_rpc_types::{ engine::{CancunPayloadFields, ForkchoiceState}, ExecutionPayload, diff --git a/crates/node-core/src/engine_skip_fcu.rs b/crates/node-core/src/engine_skip_fcu.rs new file mode 100644 index 0000000000000..c6bbd791792dc --- /dev/null +++ b/crates/node-core/src/engine_skip_fcu.rs @@ -0,0 +1,51 @@ +//! Stores engine API messages to disk for later inspection and replay. + +use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; +use reth_engine_primitives::EngineTypes; +use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; + +/// Intercept Engine API message and skip FCUs. +#[derive(Debug)] +pub struct EngineApiSkipFcu { + /// The number of FCUs to skip. + threshold: usize, + /// Current count of skipped FCUs. + skipped: usize, +} + +impl EngineApiSkipFcu { + /// Creates new [EngineApiSkipFcu] interceptor. + pub fn new(threshold: usize) -> Self { + Self { threshold, skipped: 0 } + } + + /// Intercepts an incoming engine API message, skips FCU or forwards it + /// to the engine depending on current number of skipped FCUs. + pub async fn intercept( + mut self, + mut rx: UnboundedReceiver>, + to_engine: UnboundedSender>, + ) where + Engine: EngineTypes, + BeaconEngineMessage: std::fmt::Debug, + { + while let Some(msg) = rx.recv().await { + if let BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } = msg { + if self.skipped < self.threshold { + self.skipped += 1; + tracing::warn!(target: "engine::intercept", ?state, ?payload_attrs, threshold=self.threshold, skipped=self.skipped, "Skipping FCU"); + let _ = tx.send(Ok(OnForkChoiceUpdated::syncing())); + } else { + self.skipped = 0; + let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + }); + } + } else { + let _ = to_engine.send(msg); + } + } + } +} diff --git a/crates/node-core/src/lib.rs b/crates/node-core/src/lib.rs index eae853d1b70b7..ba25a12eb4909 100644 --- a/crates/node-core/src/lib.rs +++ b/crates/node-core/src/lib.rs @@ -12,6 +12,7 @@ pub mod args; pub mod cli; pub mod dirs; pub mod engine_api_store; +pub mod engine_skip_fcu; pub mod events; pub mod exit; pub mod init; From 27ddfae9a4c15df974e8fef92b8a4b5f8c5505e0 Mon Sep 17 00:00:00 2001 From: qedk <1994constant@gmail.com> Date: Thu, 18 Apr 2024 12:44:07 +0400 Subject: [PATCH 209/700] ci: update runners for apple builds (#7408) --- .github/workflows/release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e16e2c1c3e9fe..55ce0843fb828 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -38,10 +38,10 @@ jobs: os: ubuntu-20.04 profile: maxperf - target: x86_64-apple-darwin - os: macos-latest + os: macos-13 profile: maxperf - target: aarch64-apple-darwin - os: macos-latest + os: macos-14 profile: maxperf - target: x86_64-pc-windows-gnu os: ubuntu-20.04 From 0ff3b0e3afcd319057cf7df80c510bcc65766234 Mon Sep 17 00:00:00 2001 From: Zaki Saad Date: Thu, 18 Apr 2024 19:22:14 +1000 Subject: [PATCH 210/700] Dencun readiness - pin Lighthouse image tag in base Docker Compose setup (#7066) Co-authored-by: Oliver Nordbjerg --- etc/lighthouse.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/lighthouse.yml b/etc/lighthouse.yml index fc8f00f15d919..966e4454db5bc 100644 --- a/etc/lighthouse.yml +++ b/etc/lighthouse.yml @@ -4,7 +4,7 @@ name: reth services: lighthouse: restart: unless-stopped - image: sigp/lighthouse + image: sigp/lighthouse:v5.1.3-modern depends_on: - reth ports: From 3a3b9579619f7aefee6f939584760ef839e6813d Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 18 Apr 2024 12:55:54 +0200 Subject: [PATCH 211/700] fix: don't import ipc client on windows (#7712) --- crates/rpc/rpc-builder/src/auth.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 3daa1dc2786f4..2349c6e8562cc 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -13,7 +13,6 @@ use jsonrpsee::{ server::{AlreadyStoppedError, RpcModule}, Methods, }; -use reth_ipc::client::IpcClientBuilder; pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; use reth_engine_primitives::EngineTypes; @@ -445,6 +444,8 @@ impl AuthServerHandle { /// Returns an ipc client connected to the server. #[cfg(unix)] pub async fn ipc_client(&self) -> Option { + use reth_ipc::client::IpcClientBuilder; + if let Some(ipc_endpoint) = self.ipc_endpoint.clone() { return Some( IpcClientBuilder::default() From 979e5427b3b80af64234a48fdadc0f95815da98d Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 18 Apr 2024 12:57:03 +0200 Subject: [PATCH 212/700] nit: use ref slice for `random_account_change` (#7714) --- crates/interfaces/src/test_utils/generators.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index b3cd847b248af..e601d96291055 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -196,7 +196,6 @@ pub type ChangeSet = Vec<(Address, Account, Vec)>; type AccountState = (Account, Vec); /// Generate a range of changesets for given blocks and accounts. -/// Assumes all accounts start with an empty storage. /// /// Returns a Vec of account and storage changes for each block, /// along with the final state of all accounts and storages. @@ -216,7 +215,7 @@ where .map(|(addr, (acc, st))| (addr, (acc, st.into_iter().map(|e| (e.key, e.value)).collect()))) .collect(); - let valid_addresses = state.keys().copied().collect(); + let valid_addresses = state.keys().copied().collect::>(); let mut changesets = Vec::new(); @@ -279,7 +278,7 @@ where /// Returns two addresses, a balance_change, and a Vec of new storage entries. pub fn random_account_change( rng: &mut R, - valid_addresses: &Vec
, + valid_addresses: &[Address], n_storage_changes: Range, key_range: Range, ) -> (Address, Address, U256, Vec) { @@ -340,6 +339,7 @@ pub fn random_contract_account_range( let mut accounts = Vec::with_capacity(acc_range.end.saturating_sub(acc_range.start) as usize); for _ in acc_range { let (address, eoa_account) = random_eoa_account(rng); + // todo: can a non-eoa account have a nonce > 0? let account = Account { bytecode_hash: Some(rng.gen()), ..eoa_account }; accounts.push((address, account)) } From 027f920eb43f4d17c2eae9939dbfcc4d36cd2ce5 Mon Sep 17 00:00:00 2001 From: Kolby Moroz Liebl <31669092+KolbyML@users.noreply.github.com> Date: Thu, 18 Apr 2024 03:58:03 -0700 Subject: [PATCH 213/700] ci: enable eth_getProof hive test (#7710) --- .github/workflows/hive.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index eb8bb79454634..184ee0df0bdb5 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -116,17 +116,13 @@ jobs: - eth_getBlockBy - eth_getBlockTransactionCountBy - eth_getCode + - eth_getProof - eth_getStorage - eth_getTransactionBy - eth_getTransactionCount - eth_getTransactionReceipt - eth_sendRawTransaction - eth_syncing - # not running eth_getProof tests because we do not support - # eth_getProof yet - # - sim: ethereum/rpc-compat - # include: [eth_getProof/get-account-proof-with-storage, eth_getProof/get-account-proof] - # experimental: true # debug_ rpc methods - sim: ethereum/rpc-compat include: [debug_] From 7b16f0d0b668475596ce1cb17f9da0add2c39716 Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Thu, 18 Apr 2024 16:53:54 +0530 Subject: [PATCH 214/700] chore: Move node-core/events to standalone crate (#7713) Co-authored-by: Matthias Seitz Co-authored-by: Oliver Nordbjerg --- Cargo.lock | 27 +++++++++++--- Cargo.toml | 3 ++ bin/reth/Cargo.toml | 1 + bin/reth/src/commands/debug_cmd/execution.rs | 2 +- bin/reth/src/commands/import.rs | 5 +-- crates/node-builder/Cargo.toml | 1 + crates/node-builder/src/builder.rs | 11 +++--- crates/node-core/Cargo.toml | 6 +--- crates/node-core/src/events/mod.rs | 4 --- crates/node-core/src/lib.rs | 1 - crates/node-events/Cargo.toml | 35 +++++++++++++++++++ .../src/events => node-events/src}/cl.rs | 0 crates/node-events/src/lib.rs | 12 +++++++ .../src/events => node-events/src}/node.rs | 2 +- 14 files changed, 85 insertions(+), 25 deletions(-) delete mode 100644 crates/node-core/src/events/mod.rs create mode 100644 crates/node-events/Cargo.toml rename crates/{node-core/src/events => node-events/src}/cl.rs (100%) create mode 100644 crates/node-events/src/lib.rs rename crates/{node-core/src/events => node-events/src}/node.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 83455ba8b5e5b..610064f817129 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6104,6 +6104,7 @@ dependencies = [ "reth-node-builder", "reth-node-core", "reth-node-ethereum", + "reth-node-events", "reth-node-optimism", "reth-payload-builder", "reth-payload-validator", @@ -6864,6 +6865,7 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-core", + "reth-node-events", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -6899,7 +6901,6 @@ dependencies = [ "metrics-process", "metrics-util", "once_cell", - "pin-project", "procfs", "proptest", "rand 0.8.5", @@ -6918,15 +6919,12 @@ dependencies = [ "reth-network-api", "reth-primitives", "reth-provider", - "reth-prune", "reth-rpc", "reth-rpc-api", "reth-rpc-builder", "reth-rpc-engine-api", "reth-rpc-types", "reth-rpc-types-compat", - "reth-stages", - "reth-static-file", "reth-tasks", "reth-tracing", "reth-transaction-pool", @@ -6963,6 +6961,27 @@ dependencies = [ "reth-transaction-pool", ] +[[package]] +name = "reth-node-events" +version = "0.2.0-beta.5" +dependencies = [ + "futures", + "humantime", + "pin-project", + "reth-beacon-consensus", + "reth-db", + "reth-interfaces", + "reth-network", + "reth-network-api", + "reth-primitives", + "reth-provider", + "reth-prune", + "reth-stages", + "reth-static-file", + "tokio", + "tracing", +] + [[package]] name = "reth-node-optimism" version = "0.2.0-beta.5" diff --git a/Cargo.toml b/Cargo.toml index 2c4e3f020ba02..3ac244aadb660 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,7 @@ members = [ "crates/primitives/", "crates/prune/", "crates/revm/", + "crates/node-events/", "crates/rpc/ipc/", "crates/rpc/rpc/", "crates/rpc/rpc-api/", @@ -258,6 +259,7 @@ reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } +reth-node-events = {path = "crates/node-events"} # revm revm = { version = "8.0.0", features = [ @@ -304,6 +306,7 @@ thiserror = "1.0" serde_json = "1.0.94" serde = { version = "1.0", default-features = false } serde_with = "3.3.0" +humantime = "2.1" humantime-serde = "1.1" rand = "0.8.5" schnellru = "0.2" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 8d1b0538b7616..b57ea46ec7ca6 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -51,6 +51,7 @@ reth-node-ethereum.workspace = true reth-node-optimism = { workspace = true, optional = true, features = ["optimism"] } reth-node-core.workspace = true reth-node-builder.workspace = true +reth-node-events.workspace = true # crypto alloy-rlp.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 3a6f5710099ec..9a2f58d4cabfe 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -260,7 +260,7 @@ impl Command { ); ctx.task_executor.spawn_critical( "events task", - reth_node_core::events::node::handle_events( + reth_node_events::node::handle_events( Some(network.clone()), latest_block_number, events, diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 74e694388acaa..934ff80db599e 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -26,8 +26,9 @@ use reth_interfaces::{ headers::downloader::{HeaderDownloader, SyncTarget}, }, }; -use reth_node_core::{events::node::NodeEvent, init::init_genesis}; +use reth_node_core::init::init_genesis; use reth_node_ethereum::EthEvmConfig; +use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256, OP_RETH_MAINNET_BELOW_BEDROCK}; use reth_provider::{HeaderSyncMode, ProviderFactory, StageCheckpointReader}; use reth_stages::{ @@ -168,7 +169,7 @@ impl ImportCommand { let latest_block_number = provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); - tokio::spawn(reth_node_core::events::node::handle_events( + tokio::spawn(reth_node_events::node::handle_events( None, latest_block_number, events, diff --git a/crates/node-builder/Cargo.toml b/crates/node-builder/Cargo.toml index 944d35b49df18..c245203ca0846 100644 --- a/crates/node-builder/Cargo.toml +++ b/crates/node-builder/Cargo.toml @@ -36,6 +36,7 @@ reth-prune.workspace = true reth-stages.workspace = true reth-config.workspace = true reth-downloaders.workspace = true +reth-node-events.workspace = true ## async futures.workspace = true diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 0589b9ce60c24..bff7473452c53 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -38,13 +38,15 @@ use reth_node_core::{ dirs::{ChainPath, DataDirPath, MaybePlatformPath}, engine_api_store::EngineApiStore, engine_skip_fcu::EngineApiSkipFcu, - events::cl::ConsensusLayerHealthEvents, exit::NodeExitFuture, init::init_genesis, node_config::NodeConfig, primitives::{kzg::KzgSettings, Head}, utils::write_peers_to_file, }; +use reth_node_events::node; + +use reth_node_events::cl::ConsensusLayerHealthEvents; use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, format_ether, ChainSpec}; use reth_provider::{ providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, @@ -816,12 +818,7 @@ where ); executor.spawn_critical( "events task", - reth_node_core::events::node::handle_events( - Some(network.clone()), - Some(head.number), - events, - database.clone(), - ), + node::handle_events(Some(network.clone()), Some(head.number), events, database.clone()), ); let engine_api = EngineApi::new( diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 763cd00c6bf97..cdf42cee247dc 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -35,9 +35,6 @@ reth-tasks.workspace = true reth-consensus-common.workspace = true reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true -reth-stages.workspace = true -reth-prune.workspace = true -reth-static-file.workspace = true # ethereum discv5.workspace = true @@ -56,11 +53,10 @@ reth-metrics.workspace = true # misc eyre.workspace = true clap = { workspace = true, features = ["derive"] } -humantime = "2.1.0" +humantime.workspace = true thiserror.workspace = true const-str = "0.5.6" rand.workspace = true -pin-project.workspace = true derive_more.workspace = true # io diff --git a/crates/node-core/src/events/mod.rs b/crates/node-core/src/events/mod.rs deleted file mode 100644 index 5931e72c313f7..0000000000000 --- a/crates/node-core/src/events/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Various event handlers for the node. - -pub mod cl; -pub mod node; diff --git a/crates/node-core/src/lib.rs b/crates/node-core/src/lib.rs index ba25a12eb4909..3d73e0e610796 100644 --- a/crates/node-core/src/lib.rs +++ b/crates/node-core/src/lib.rs @@ -13,7 +13,6 @@ pub mod cli; pub mod dirs; pub mod engine_api_store; pub mod engine_skip_fcu; -pub mod events; pub mod exit; pub mod init; pub mod metrics; diff --git a/crates/node-events/Cargo.toml b/crates/node-events/Cargo.toml new file mode 100644 index 0000000000000..9c66f146931c9 --- /dev/null +++ b/crates/node-events/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "reth-node-events" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-provider.workspace = true +reth-beacon-consensus.workspace = true +reth-network = { workspace = true, features = ["serde"] } +reth-network-api.workspace = true +reth-stages.workspace = true +reth-prune.workspace = true +reth-static-file.workspace = true +reth-interfaces.workspace = true +reth-db.workspace = true +reth-primitives.workspace = true + +# async +tokio.workspace = true + +# async +futures.workspace = true + +tracing.workspace = true + +#misc +pin-project.workspace = true +humantime.workspace = true diff --git a/crates/node-core/src/events/cl.rs b/crates/node-events/src/cl.rs similarity index 100% rename from crates/node-core/src/events/cl.rs rename to crates/node-events/src/cl.rs diff --git a/crates/node-events/src/lib.rs b/crates/node-events/src/lib.rs new file mode 100644 index 0000000000000..e4665066c70c9 --- /dev/null +++ b/crates/node-events/src/lib.rs @@ -0,0 +1,12 @@ +//! Various event handlers for the node. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod cl; +pub mod node; diff --git a/crates/node-core/src/events/node.rs b/crates/node-events/src/node.rs similarity index 99% rename from crates/node-core/src/events/node.rs rename to crates/node-events/src/node.rs index c1a9c1b0f364e..b18cc5f0b6710 100644 --- a/crates/node-core/src/events/node.rs +++ b/crates/node-events/src/node.rs @@ -1,6 +1,6 @@ //! Support for handling events emitted by node components. -use crate::events::cl::ConsensusLayerHealthEvent; +use crate::cl::ConsensusLayerHealthEvent; use futures::Stream; use reth_beacon_consensus::{ BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, From 58cb524d73e1935a263ab8cd08777580cf67e15a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 18 Apr 2024 13:39:57 +0200 Subject: [PATCH 215/700] chore: rename `parent_block_hashed` to `parent_block_hashes` (#7715) --- crates/blockchain-tree/src/blockchain_tree.rs | 10 +++++----- crates/blockchain-tree/src/bundle.rs | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index a278263e8cd8a..3f836849c4925 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -267,18 +267,18 @@ where let state = chain.state_at_block(block_number)?; // get parent hashes - let mut parent_block_hashed = self.all_chain_hashes(chain_id); + let mut parent_block_hashes = self.all_chain_hashes(chain_id); let first_pending_block_number = - *parent_block_hashed.first_key_value().expect("There is at least one block hash").0; + *parent_block_hashes.first_key_value().expect("There is at least one block hash").0; let canonical_chain = canonical_chain .iter() .filter(|&(key, _)| key < first_pending_block_number) .collect::>(); - parent_block_hashed.extend(canonical_chain); + parent_block_hashes.extend(canonical_chain); // get canonical fork. let canonical_fork = self.canonical_fork(chain_id)?; - return Some(BundleStateData { state, parent_block_hashed, canonical_fork }) + return Some(BundleStateData { state, parent_block_hashes, canonical_fork }) } // check if there is canonical block @@ -287,7 +287,7 @@ where return Some(BundleStateData { canonical_fork: ForkBlock { number: canonical_number, hash: block_hash }, state: BundleStateWithReceipts::default(), - parent_block_hashed: canonical_chain.inner().clone(), + parent_block_hashes: canonical_chain.inner().clone(), }) } diff --git a/crates/blockchain-tree/src/bundle.rs b/crates/blockchain-tree/src/bundle.rs index 1e7eb31820042..d8c93439e5b17 100644 --- a/crates/blockchain-tree/src/bundle.rs +++ b/crates/blockchain-tree/src/bundle.rs @@ -44,7 +44,7 @@ pub struct BundleStateData { /// Parent block hashes needs for evm BLOCKHASH opcode. /// NOTE: it does not mean that all hashes are there but all until finalized are there. /// Other hashes can be obtained from provider - pub parent_block_hashed: BTreeMap, + pub parent_block_hashes: BTreeMap, /// Canonical block where state forked from. pub canonical_fork: ForkBlock, } @@ -55,7 +55,7 @@ impl BundleStateDataProvider for BundleStateData { } fn block_hash(&self, block_number: BlockNumber) -> Option { - self.parent_block_hashed.get(&block_number).cloned() + self.parent_block_hashes.get(&block_number).cloned() } fn canonical_fork(&self) -> ForkBlock { From 4ca86fb4d2c435c62e878766a39527f5560ffbcf Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 18 Apr 2024 16:23:27 +0200 Subject: [PATCH 216/700] chore(storage): use chain spec from provider field (#7723) --- bin/reth/src/commands/debug_cmd/execution.rs | 2 +- bin/reth/src/commands/stage/unwind.rs | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 5 +---- .../provider/src/providers/database/provider.rs | 7 +++---- crates/storage/provider/src/traits/block.rs | 11 ++++------- 5 files changed, 10 insertions(+), 17 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 9a2f58d4cabfe..db54bbb0dfbe6 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -285,7 +285,7 @@ impl Command { { provider_factory .provider_rw()? - .take_block_and_execution_range(&self.chain, next_block..=target_block)?; + .take_block_and_execution_range(next_block..=target_block)?; } // Update latest block diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index c7483870a8c7b..8e9141399f4ca 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -126,7 +126,7 @@ impl Command { let provider = provider_factory.provider_rw()?; let _ = provider - .take_block_and_execution_range(&self.chain, range.clone()) + .take_block_and_execution_range(range.clone()) .map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?; provider.commit()?; diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 3f836849c4925..28094853566ca 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1213,10 +1213,7 @@ where info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range); // read block and execution result from database. and remove traces of block from tables. let blocks_and_execution = provider_rw - .take_block_and_execution_range( - self.externals.provider_factory.chain_spec().as_ref(), - revert_range, - ) + .take_block_and_execution_range(revert_range) .map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?; provider_rw.commit()?; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 01c03e9564d5b..a2b2ec74a52e0 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -709,7 +709,6 @@ impl DatabaseProvider { /// Return range of blocks and its execution result fn get_take_block_range( &self, - chain_spec: &ChainSpec, range: impl RangeBounds + Clone, ) -> ProviderResult> { // For block we need Headers, Bodies, Uncles, withdrawals, Transactions, Signers @@ -768,7 +767,8 @@ impl DatabaseProvider { }; // withdrawal can be missing - let shanghai_is_active = chain_spec.is_shanghai_active_at_timestamp(header.timestamp); + let shanghai_is_active = + self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp); let mut withdrawals = Some(Withdrawals::default()); if shanghai_is_active { if let Some((block_number, _)) = block_withdrawals.as_ref() { @@ -2376,7 +2376,6 @@ impl BlockExecutionWriter for DatabaseProvider { /// Return range of blocks and its execution result fn get_or_take_block_and_execution_range( &self, - chain_spec: &ChainSpec, range: RangeInclusive, ) -> ProviderResult { if TAKE { @@ -2447,7 +2446,7 @@ impl BlockExecutionWriter for DatabaseProvider { } // get blocks - let blocks = self.get_take_block_range::(chain_spec, range.clone())?; + let blocks = self.get_take_block_range::(range.clone())?; let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); // get execution res let execution_state = self.unwind_or_peek_state::(range.clone())?; diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index b8ae1bdb145b9..1b767f350a39c 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -6,8 +6,8 @@ use auto_impl::auto_impl; use reth_db::models::StoredBlockBodyIndices; use reth_interfaces::provider::ProviderResult; use reth_primitives::{ - Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, ChainSpec, - Header, PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, B256, + Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, Header, + PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, B256, }; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::ops::RangeInclusive; @@ -268,25 +268,22 @@ pub trait BlockExecutionWriter: BlockWriter + BlockReader + Send + Sync { /// Get range of blocks and its execution result fn get_block_and_execution_range( &self, - chain_spec: &ChainSpec, range: RangeInclusive, ) -> ProviderResult { - self.get_or_take_block_and_execution_range::(chain_spec, range) + self.get_or_take_block_and_execution_range::(range) } /// Take range of blocks and its execution result fn take_block_and_execution_range( &self, - chain_spec: &ChainSpec, range: RangeInclusive, ) -> ProviderResult { - self.get_or_take_block_and_execution_range::(chain_spec, range) + self.get_or_take_block_and_execution_range::(range) } /// Return range of blocks and its execution result fn get_or_take_block_and_execution_range( &self, - chain_spec: &ChainSpec, range: RangeInclusive, ) -> ProviderResult; } From f57bef718165d5f632b85b03bfdf3426e6bff865 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 18 Apr 2024 18:10:22 +0200 Subject: [PATCH 217/700] Revert "feat(op): import bodies (#7659)" (#7724) --- bin/reth/src/commands/import.rs | 4 +- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/transaction/mod.rs | 40 +++++-------------- .../primitives/src/transaction/signature.rs | 13 ------ 4 files changed, 14 insertions(+), 45 deletions(-) diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 934ff80db599e..d87328e33ae56 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -29,7 +29,7 @@ use reth_interfaces::{ use reth_node_core::init::init_genesis; use reth_node_ethereum::EthEvmConfig; use reth_node_events::node::NodeEvent; -use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256, OP_RETH_MAINNET_BELOW_BEDROCK}; +use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; use reth_provider::{HeaderSyncMode, ProviderFactory, StageCheckpointReader}; use reth_stages::{ prelude::*, @@ -75,7 +75,7 @@ pub struct ImportCommand { /// Import OP Mainnet chain below Bedrock. Caution! Flag must be set as env var, since the env /// var is read by another process too, in order to make below Bedrock import work. - #[arg(long, verbatim_doc_comment, env = OP_RETH_MAINNET_BELOW_BEDROCK)] + #[arg(long, verbatim_doc_comment, env = "OP_RETH_MAINNET_BELOW_BEDROCK")] op_mainnet_below_bedrock: bool, /// Chunk byte length. diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index c14f719647283..167a645451397 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -100,7 +100,7 @@ pub use transaction::{ Transaction, TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHashOrNumber, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, OP_RETH_MAINNET_BELOW_BEDROCK, + LEGACY_TX_TYPE_ID, }; pub use withdrawal::{Withdrawal, Withdrawals}; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c72c70627ad2a..5cd3001f7003d 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -32,7 +32,7 @@ pub use sidecar::generate_blob_sidecar; #[cfg(feature = "c-kzg")] pub use sidecar::{BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError}; -pub use signature::{Signature, OP_RETH_MAINNET_BELOW_BEDROCK}; +pub use signature::Signature; pub use tx_type::{ TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; @@ -963,8 +963,8 @@ impl TransactionSignedNoHash { // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. #[cfg(feature = "optimism")] - if let Some(address) = get_deposit_or_null_address(&self.transaction, &self.signature) { - return Some(address) + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) } let signature_hash = self.signature_hash(); @@ -983,9 +983,11 @@ impl TransactionSignedNoHash { buffer.clear(); self.transaction.encode_without_signature(buffer); + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. #[cfg(feature = "optimism")] - if let Some(address) = get_deposit_or_null_address(&self.transaction, &self.signature) { - return Some(address) + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) } self.signature.recover_signer_unchecked(keccak256(buffer)) @@ -1194,8 +1196,8 @@ impl TransactionSigned { // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. #[cfg(feature = "optimism")] - if let Some(address) = get_deposit_or_null_address(&self.transaction, &self.signature) { - return Some(address) + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) } let signature_hash = self.signature_hash(); self.signature.recover_signer(signature_hash) @@ -1210,8 +1212,8 @@ impl TransactionSigned { // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. #[cfg(feature = "optimism")] - if let Some(address) = get_deposit_or_null_address(&self.transaction, &self.signature) { - return Some(address) + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) } let signature_hash = self.signature_hash(); self.signature.recover_signer_unchecked(signature_hash) @@ -1800,26 +1802,6 @@ impl IntoRecoveredTransaction for TransactionSignedEcRecovered { } } -#[cfg(feature = "optimism")] -fn get_deposit_or_null_address( - transaction: &Transaction, - signature: &Signature, -) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - if let Transaction::Deposit(TxDeposit { from, .. }) = transaction { - return Some(*from) - } - // OP blocks below bedrock include transactions sent from the null address - if std::env::var_os(OP_RETH_MAINNET_BELOW_BEDROCK).as_deref() == Some("true".as_ref()) && - *signature == Signature::optimism_deposit_tx_signature() - { - return Some(Address::default()) - } - - None -} - #[cfg(test)] mod tests { use crate::{ diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index a6bc8905acbb0..200bf6989abe4 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -14,9 +14,6 @@ const SECP256K1N_HALF: U256 = U256::from_be_bytes([ 0x5D, 0x57, 0x6E, 0x73, 0x57, 0xA4, 0x50, 0x1D, 0xDF, 0xE9, 0x2F, 0x46, 0x68, 0x1B, 0x20, 0xA0, ]); -/// Running OP Mainnet migration for chain below bedrock.] -pub const OP_RETH_MAINNET_BELOW_BEDROCK: &str = "OP_RETH_MAINNET_BELOW_BEDROCK"; - /// r, s: Values corresponding to the signature of the /// transaction and used to determine the sender of /// the transaction; formally Tr and Ts. This is expanded in Appendix F of yellow paper. @@ -85,12 +82,6 @@ impl Signature { // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 self.odd_y_parity as u64 + chain_id * 2 + 35 } else { - #[cfg(feature = "optimism")] - if std::env::var_os(OP_RETH_MAINNET_BELOW_BEDROCK).as_deref() == Some("true".as_ref()) && - *self == Self::optimism_deposit_tx_signature() - { - return 0 - } self.odd_y_parity as u64 + 27 } } @@ -107,10 +98,6 @@ impl Signature { if v < 35 { // non-EIP-155 legacy scheme, v = 27 for even y-parity, v = 28 for odd y-parity if v != 27 && v != 28 { - #[cfg(feature = "optimism")] - if std::env::var(OP_RETH_MAINNET_BELOW_BEDROCK) == Ok(true.to_string()) && v == 0 { - return Ok((Signature { r, s, odd_y_parity: false }, None)) - } return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) } let odd_y_parity = v == 28; From 6863cdb42bbac29de57b66739c8e0fc7b4d5dbaa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 18 Apr 2024 18:33:29 +0200 Subject: [PATCH 218/700] chore: introduce node dir (#7720) Co-authored-by: Alexey Shekhirin --- Cargo.toml | 4 ++-- crates/{node-events => node/events}/Cargo.toml | 0 crates/{node-events => node/events}/src/cl.rs | 0 crates/{node-events => node/events}/src/lib.rs | 0 crates/{node-events => node/events}/src/node.rs | 0 5 files changed, 2 insertions(+), 2 deletions(-) rename crates/{node-events => node/events}/Cargo.toml (100%) rename crates/{node-events => node/events}/src/cl.rs (100%) rename crates/{node-events => node/events}/src/lib.rs (100%) rename crates/{node-events => node/events}/src/node.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 3ac244aadb660..6aace0ad1853c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ members = [ "crates/primitives/", "crates/prune/", "crates/revm/", - "crates/node-events/", + "crates/node/events/", "crates/rpc/ipc/", "crates/rpc/rpc/", "crates/rpc/rpc-api/", @@ -259,7 +259,7 @@ reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } -reth-node-events = {path = "crates/node-events"} +reth-node-events = { path = "crates/node/events" } # revm revm = { version = "8.0.0", features = [ diff --git a/crates/node-events/Cargo.toml b/crates/node/events/Cargo.toml similarity index 100% rename from crates/node-events/Cargo.toml rename to crates/node/events/Cargo.toml diff --git a/crates/node-events/src/cl.rs b/crates/node/events/src/cl.rs similarity index 100% rename from crates/node-events/src/cl.rs rename to crates/node/events/src/cl.rs diff --git a/crates/node-events/src/lib.rs b/crates/node/events/src/lib.rs similarity index 100% rename from crates/node-events/src/lib.rs rename to crates/node/events/src/lib.rs diff --git a/crates/node-events/src/node.rs b/crates/node/events/src/node.rs similarity index 100% rename from crates/node-events/src/node.rs rename to crates/node/events/src/node.rs From e401c4848a8c28754b1c5cb6025cfe756076329b Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 18 Apr 2024 19:15:28 +0200 Subject: [PATCH 219/700] feat(pruner): respect ExEx finished height (#7673) --- Cargo.lock | 1 + .../consensus/beacon/src/engine/test_utils.rs | 3 +- crates/exex/src/manager.rs | 4 +- crates/node-builder/src/builder.rs | 24 +++++-- crates/primitives/src/exex/mod.rs | 9 ++- crates/prune/Cargo.toml | 1 + crates/prune/src/builder.rs | 28 ++++++-- crates/prune/src/pruner.rs | 70 +++++++++++++++++-- 8 files changed, 117 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 610064f817129..5c48a9d7456aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7170,6 +7170,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "thiserror", + "tokio", "tokio-stream", "tracing", ] diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 7aeb8d746d339..42f85282c64f7 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -22,7 +22,7 @@ use reth_interfaces::{ }; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::test_utils::spawn_test_payload_service; -use reth_primitives::{BlockNumber, ChainSpec, PruneModes, B256}; +use reth_primitives::{BlockNumber, ChainSpec, FinishedExExHeight, PruneModes, B256}; use reth_provider::{ providers::BlockchainProvider, test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, @@ -435,6 +435,7 @@ where self.base_config.chain_spec.prune_delete_limit, config.max_reorg_depth() as usize, None, + watch::channel(FinishedExExHeight::NoExExs).1, ); let mut hooks = EngineHooks::new(); diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 32c4d8e26d219..332650607d9c2 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -396,8 +396,8 @@ impl ExExManagerHandle { } /// The finished height of all ExEx's. - pub fn finished_height(&mut self) -> FinishedExExHeight { - *self.finished_height.borrow_and_update() + pub fn finished_height(&self) -> watch::Receiver { + self.finished_height.clone() } /// Wait until the manager is ready for new notifications. diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index bff7473452c53..76df0fc8f56f6 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -641,20 +641,21 @@ where future::join_all(exexs).await; // spawn exex manager - if !exex_handles.is_empty() { + let exex_manager_handle = if !exex_handles.is_empty() { debug!(target: "reth::cli", "spawning exex manager"); // todo(onbjerg): rm magic number let exex_manager = ExExManager::new(exex_handles, 1024); - let mut exex_manager_handle = exex_manager.handle(); + let exex_manager_handle = exex_manager.handle(); executor.spawn_critical("exex manager", async move { exex_manager.await.expect("exex manager crashed"); }); // send notifications from the blockchain tree to exex manager let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); + let mut handle = exex_manager_handle.clone(); executor.spawn_critical("exex manager blockchain tree notifications", async move { while let Ok(notification) = canon_state_notifications.recv().await { - exex_manager_handle + handle .send_async(notification) .await .expect("blockchain tree notification could not be sent to exex manager"); @@ -662,7 +663,11 @@ where }); info!(target: "reth::cli", "ExEx Manager started"); - } + + Some(exex_manager_handle) + } else { + None + }; // create pipeline let network_client = network.fetch_client().await?; @@ -773,11 +778,16 @@ where let initial_target = config.initial_pipeline_target(genesis_hash); let prune_config = prune_config.unwrap_or_default(); - let mut pruner = PrunerBuilder::new(prune_config.clone()) + let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) .max_reorg_depth(tree_config.max_reorg_depth() as usize) .prune_delete_limit(config.chain.prune_delete_limit) - .timeout(PrunerBuilder::DEFAULT_TIMEOUT) - .build(provider_factory.clone()); + .timeout(PrunerBuilder::DEFAULT_TIMEOUT); + if let Some(exex_manager_handle) = &exex_manager_handle { + pruner_builder = + pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); + } + + let mut pruner = pruner_builder.build(provider_factory.clone()); let pruner_events = pruner.events(); hooks.add(PruneHook::new(pruner, Box::new(executor.clone()))); diff --git a/crates/primitives/src/exex/mod.rs b/crates/primitives/src/exex/mod.rs index 9fc2ace66dd1f..82730f2972ff5 100644 --- a/crates/primitives/src/exex/mod.rs +++ b/crates/primitives/src/exex/mod.rs @@ -5,7 +5,7 @@ use crate::BlockNumber; pub enum FinishedExExHeight { /// No ExEx's are installed, so there is no finished height. NoExExs, - /// Not all ExExs emitted a `FinishedHeight` event yet. + /// Not all ExExs have emitted a `FinishedHeight` event yet. NotReady, /// The finished height of all ExEx's. /// @@ -16,3 +16,10 @@ pub enum FinishedExExHeight { /// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune. Height(BlockNumber), } + +impl FinishedExExHeight { + /// Returns `true` if not all ExExs have emitted a `FinishedHeight` event yet. + pub const fn is_not_ready(&self) -> bool { + matches!(self, Self::NotReady) + } +} diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 3a8971a667d32..cc24e68b8341d 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -29,6 +29,7 @@ tracing.workspace = true thiserror.workspace = true itertools.workspace = true rayon.workspace = true +tokio.workspace = true tokio-stream.workspace = true [dev-dependencies] diff --git a/crates/prune/src/builder.rs b/crates/prune/src/builder.rs index 377a986647e17..8a14ccf4aaeaa 100644 --- a/crates/prune/src/builder.rs +++ b/crates/prune/src/builder.rs @@ -3,24 +3,27 @@ use std::time::Duration; use crate::{segments::SegmentSet, Pruner}; use reth_config::PruneConfig; use reth_db::database::Database; -use reth_primitives::{PruneModes, MAINNET}; +use reth_primitives::{FinishedExExHeight, PruneModes, MAINNET}; use reth_provider::ProviderFactory; +use tokio::sync::watch; /// Contains the information required to build a pruner -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone)] pub struct PrunerBuilder { /// Minimum pruning interval measured in blocks. - pub block_interval: usize, + block_interval: usize, /// Pruning configuration for every part of the data that can be pruned. - pub segments: PruneModes, + segments: PruneModes, /// The number of blocks that can be re-orged. - pub max_reorg_depth: usize, + max_reorg_depth: usize, /// The delete limit for pruner, per block. In the actual pruner run it will be multiplied by /// the amount of blocks between pruner runs to account for the difference in amount of new /// data coming in. - pub prune_delete_limit: usize, + prune_delete_limit: usize, /// Time a pruner job can run before timing out. - pub timeout: Option, + timeout: Option, + /// The finished height of all ExEx's. + finished_exex_height: watch::Receiver, } impl PrunerBuilder { @@ -67,6 +70,15 @@ impl PrunerBuilder { self } + /// Sets the receiver for the finished height of all ExEx's. + pub fn finished_exex_height( + mut self, + finished_exex_height: watch::Receiver, + ) -> Self { + self.finished_exex_height = finished_exex_height; + self + } + /// Builds a [Pruner] from the current configuration. pub fn build(self, provider_factory: ProviderFactory) -> Pruner { let segments = SegmentSet::::from_prune_modes(self.segments); @@ -78,6 +90,7 @@ impl PrunerBuilder { self.prune_delete_limit, self.max_reorg_depth, self.timeout, + self.finished_exex_height, ) } } @@ -90,6 +103,7 @@ impl Default for PrunerBuilder { max_reorg_depth: 64, prune_delete_limit: MAINNET.prune_delete_limit, timeout: Some(Self::DEFAULT_TIMEOUT), + finished_exex_height: watch::channel(FinishedExExHeight::NoExExs).1, } } } diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 6bd3749c59f38..f3bf963e0e13c 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -7,8 +7,8 @@ use crate::{ }; use reth_db::database::Database; use reth_primitives::{ - BlockNumber, PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, - StaticFileSegment, + BlockNumber, FinishedExExHeight, PruneLimiter, PruneMode, PruneProgress, PrunePurpose, + PruneSegment, StaticFileSegment, }; use reth_provider::{DatabaseProviderRW, ProviderFactory, PruneCheckpointReader}; use reth_tokio_util::EventListeners; @@ -16,6 +16,7 @@ use std::{ collections::BTreeMap, time::{Duration, Instant}, }; +use tokio::sync::watch; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::debug; @@ -46,6 +47,8 @@ pub struct Pruner { prune_max_blocks_per_run: usize, /// Maximum time for a one pruner run. timeout: Option, + /// The finished height of all ExEx's. + finished_exex_height: watch::Receiver, #[doc(hidden)] metrics: Metrics, listeners: EventListeners, @@ -60,6 +63,7 @@ impl Pruner { delete_limit: usize, prune_max_blocks_per_run: usize, timeout: Option, + finished_exex_height: watch::Receiver, ) -> Self { Self { provider_factory, @@ -69,6 +73,7 @@ impl Pruner { delete_limit_per_block: delete_limit, prune_max_blocks_per_run, timeout, + finished_exex_height, metrics: Metrics::default(), listeners: Default::default(), } @@ -81,6 +86,11 @@ impl Pruner { /// Run the pruner pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { + let Some(tip_block_number) = + self.adjust_tip_block_number_to_finished_exex_height(tip_block_number) + else { + return Ok(PruneProgress::Finished) + }; if tip_block_number == 0 { self.previous_tip_block_number = Some(tip_block_number); @@ -269,6 +279,12 @@ impl Pruner { /// Returns `true` if the pruning is needed at the provided tip block number. /// This determined by the check against minimum pruning interval and last pruned block number. pub fn is_pruning_needed(&self, tip_block_number: BlockNumber) -> bool { + let Some(tip_block_number) = + self.adjust_tip_block_number_to_finished_exex_height(tip_block_number) + else { + return false + }; + // Saturating subtraction is needed for the case when the chain was reverted, meaning // current block number might be less than the previous tip block number. // If that's the case, no pruning is needed as outdated data is also reverted. @@ -286,6 +302,30 @@ impl Pruner { false } } + + /// Adjusts the tip block number to the finished ExEx height. This is needed to not prune more + /// data than ExExs have processed. Depending on the height: + /// - [FinishedExExHeight::NoExExs] returns the tip block number as is as no adjustment for + /// ExExs is needed. + /// - [FinishedExExHeight::NotReady] returns `None` as not all ExExs have emitted a + /// `FinishedHeight` event yet. + /// - [FinishedExExHeight::Height] returns the finished ExEx height. + fn adjust_tip_block_number_to_finished_exex_height( + &self, + tip_block_number: BlockNumber, + ) -> Option { + match *self.finished_exex_height.borrow() { + FinishedExExHeight::NoExExs => Some(tip_block_number), + FinishedExExHeight::NotReady => { + debug!(target: "pruner", %tip_block_number, "Not all ExExs have emitted a `FinishedHeight` event yet, can't prune"); + None + } + FinishedExExHeight::Height(finished_exex_height) => { + debug!(target: "pruner", %tip_block_number, %finished_exex_height, "Adjusting tip block number to the finished ExEx height"); + Some(finished_exex_height) + } + } + } } #[cfg(test)] @@ -293,7 +333,7 @@ mod tests { use crate::Pruner; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; - use reth_primitives::MAINNET; + use reth_primitives::{FinishedExExHeight, MAINNET}; use reth_provider::ProviderFactory; #[test] @@ -302,7 +342,12 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); let provider_factory = ProviderFactory::new(db, MAINNET.clone(), static_dir_path) .expect("create provide factory with static_files"); - let mut pruner = Pruner::new(provider_factory, vec![], 5, 0, 5, None); + + let (finished_exex_height_tx, finished_exex_height_rx) = + tokio::sync::watch::channel(FinishedExExHeight::NoExExs); + + let mut pruner = + Pruner::new(provider_factory, vec![], 5, 0, 5, None, finished_exex_height_rx); // No last pruned block number was set before let first_block_number = 1; @@ -315,7 +360,22 @@ mod tests { pruner.previous_tip_block_number = Some(second_block_number); // Tip block number delta is < than min block interval - let third_block_number = second_block_number; + assert!(!pruner.is_pruning_needed(second_block_number)); + + // Tip block number delta is >= than min block interval + let third_block_number = second_block_number + pruner.min_block_interval as u64; + assert!(pruner.is_pruning_needed(third_block_number)); + + // Not all ExExs have emitted a `FinishedHeight` event yet + finished_exex_height_tx.send(FinishedExExHeight::NotReady).unwrap(); + assert!(!pruner.is_pruning_needed(third_block_number)); + + // Adjust tip block number to the finished ExEx height that doesn't reach the threshold + finished_exex_height_tx.send(FinishedExExHeight::Height(second_block_number)).unwrap(); assert!(!pruner.is_pruning_needed(third_block_number)); + + // Adjust tip block number to the finished ExEx height that reaches the threshold + finished_exex_height_tx.send(FinishedExExHeight::Height(third_block_number)).unwrap(); + assert!(pruner.is_pruning_needed(third_block_number)); } } From 528f1e904766bcafc3aa1e109fda27cfd991b5be Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:26:29 +0100 Subject: [PATCH 220/700] fix: set ETL directory inside `datadir` on `reth run` and `reth import` (#7722) --- bin/reth/src/commands/debug_cmd/execution.rs | 10 +++- bin/reth/src/commands/import.rs | 9 +++- bin/reth/src/commands/stage/run.rs | 10 +++- crates/stages/src/sets.rs | 47 ++++++++++++------- crates/stages/src/stages/hashing_account.rs | 6 +++ crates/stages/src/stages/hashing_storage.rs | 6 +++ .../src/stages/index_account_history.rs | 6 +++ .../src/stages/index_storage_history.rs | 6 +++ crates/stages/src/stages/tx_lookup.rs | 6 +++ 9 files changed, 84 insertions(+), 22 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index db54bbb0dfbe6..99edd2ef25104 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -13,7 +13,7 @@ use crate::{ use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::BeaconConsensus; -use reth_config::Config; +use reth_config::{config::EtlConfig, Config}; use reth_db::{database::Database, init_db, DatabaseEnv}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -200,10 +200,16 @@ impl Command { /// Execute `execution-debug` command pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { - let config = Config::default(); + let mut config = Config::default(); let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let db_path = data_dir.db_path(); + + // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + if config.stages.etl.dir.is_none() { + config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + } + fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index d87328e33ae56..ce9cd3efe7afe 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -12,7 +12,7 @@ use clap::Parser; use eyre::Context; use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensus; -use reth_config::Config; +use reth_config::{config::EtlConfig, Config}; use reth_db::{database::Database, init_db}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -115,9 +115,14 @@ impl ImportCommand { let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); - let config: Config = self.load_config(config_path.clone())?; + let mut config: Config = self.load_config(config_path.clone())?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); + // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + if config.stages.etl.dir.is_none() { + config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + } + let db_path = data_dir.db_path(); info!(target: "reth::cli", path = ?db_path, "Opening database"); diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 47e70ae7c0ac3..b8f9bc527893a 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -256,8 +256,14 @@ impl Command { Box::new(MerkleStage::default_execution()), Some(Box::new(MerkleStage::default_unwind())), ), - StageEnum::AccountHistory => (Box::::default(), None), - StageEnum::StorageHistory => (Box::::default(), None), + StageEnum::AccountHistory => ( + Box::new(IndexAccountHistoryStage::default().with_etl_config(etl_config)), + None, + ), + StageEnum::StorageHistory => ( + Box::new(IndexStorageHistoryStage::default().with_etl_config(etl_config)), + None, + ), _ => return Ok(()), }; if let Some(unwind_stage) = &unwind_stage { diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 4e24e9a9234bd..1185de25f0ae2 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -17,6 +17,7 @@ //! # use reth_node_ethereum::EthEvmConfig; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; +//! # use reth_config::config::EtlConfig; //! //! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone(), EthEvmConfig::default()); //! # let provider_factory = create_test_provider_factory(); @@ -27,7 +28,7 @@ //! ); //! // Build a pipeline with all offline stages. //! # let pipeline = Pipeline::builder() -//! .add_stages(OfflineStages::new(executor_factory)) +//! .add_stages(OfflineStages::new(executor_factory, EtlConfig::default())) //! .build(provider_factory, static_file_producer); //! ``` //! @@ -37,11 +38,13 @@ //! # use reth_revm::EvmProcessorFactory; //! # use reth_node_ethereum::EthEvmConfig; //! # use reth_primitives::MAINNET; +//! # use reth_config::config::EtlConfig; +//! //! // Build a pipeline with all offline stages and a custom stage at the end. //! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone(), EthEvmConfig::default()); //! Pipeline::builder() //! .add_stages( -//! OfflineStages::new(executor_factory).builder().add_stage(MyCustomStage) +//! OfflineStages::new(executor_factory, EtlConfig::default()).builder().add_stage(MyCustomStage) //! ) //! .build(); //! ``` @@ -90,6 +93,8 @@ pub struct DefaultStages { online: OnlineStages, /// Executor factory needs for execution stage executor_factory: EF, + /// ETL configuration + etl_config: EtlConfig, } impl DefaultStages { @@ -113,9 +118,10 @@ impl DefaultStages { consensus, header_downloader, body_downloader, - etl_config, + etl_config.clone(), ), executor_factory, + etl_config, } } } @@ -128,10 +134,11 @@ where pub fn add_offline_stages( default_offline: StageSetBuilder, executor_factory: EF, + etl_config: EtlConfig, ) -> StageSetBuilder { StageSetBuilder::default() .add_set(default_offline) - .add_set(OfflineStages::new(executor_factory)) + .add_set(OfflineStages::new(executor_factory, etl_config)) .add_stage(FinishStage) } } @@ -145,7 +152,7 @@ where DB: Database + 'static, { fn builder(self) -> StageSetBuilder { - Self::add_offline_stages(self.online.builder(), self.executor_factory) + Self::add_offline_stages(self.online.builder(), self.executor_factory, self.etl_config) } } @@ -250,12 +257,14 @@ where pub struct OfflineStages { /// Executor factory needs for execution stage pub executor_factory: EF, + /// ETL configuration + etl_config: EtlConfig, } impl OfflineStages { /// Create a new set of offline stages with default values. - pub fn new(executor_factory: EF) -> Self { - Self { executor_factory } + pub fn new(executor_factory: EF, etl_config: EtlConfig) -> Self { + Self { executor_factory, etl_config } } } @@ -263,8 +272,8 @@ impl StageSet for OfflineStages { fn builder(self) -> StageSetBuilder { ExecutionStages::new(self.executor_factory) .builder() - .add_set(HashingStages) - .add_set(HistoryIndexingStages) + .add_set(HashingStages { etl_config: self.etl_config.clone() }) + .add_set(HistoryIndexingStages { etl_config: self.etl_config }) } } @@ -294,14 +303,17 @@ impl StageSet for ExecutionStages { /// A set containing all stages that hash account state. #[derive(Debug, Default)] #[non_exhaustive] -pub struct HashingStages; +pub struct HashingStages { + /// ETL configuration + etl_config: EtlConfig, +} impl StageSet for HashingStages { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(MerkleStage::default_unwind()) - .add_stage(AccountHashingStage::default()) - .add_stage(StorageHashingStage::default()) + .add_stage(AccountHashingStage::default().with_etl_config(self.etl_config.clone())) + .add_stage(StorageHashingStage::default().with_etl_config(self.etl_config)) .add_stage(MerkleStage::default_execution()) } } @@ -309,13 +321,16 @@ impl StageSet for HashingStages { /// A set containing all stages that do additional indexing for historical state. #[derive(Debug, Default)] #[non_exhaustive] -pub struct HistoryIndexingStages; +pub struct HistoryIndexingStages { + /// ETL configuration + etl_config: EtlConfig, +} impl StageSet for HistoryIndexingStages { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() - .add_stage(TransactionLookupStage::default()) - .add_stage(IndexStorageHistoryStage::default()) - .add_stage(IndexAccountHistoryStage::default()) + .add_stage(TransactionLookupStage::default().with_etl_config(self.etl_config.clone())) + .add_stage(IndexStorageHistoryStage::default().with_etl_config(self.etl_config.clone())) + .add_stage(IndexAccountHistoryStage::default().with_etl_config(self.etl_config)) } } diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 372ce46eab598..4afccc77f8f06 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -47,6 +47,12 @@ impl AccountHashingStage { pub fn new(clean_threshold: u64, commit_threshold: u64, etl_config: EtlConfig) -> Self { Self { clean_threshold, commit_threshold, etl_config } } + + /// Set the ETL configuration to use. + pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { + self.etl_config = etl_config; + self + } } impl Default for AccountHashingStage { diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 059fdf0fca016..54f4b9520eb69 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -48,6 +48,12 @@ impl StorageHashingStage { pub fn new(clean_threshold: u64, commit_threshold: u64, etl_config: EtlConfig) -> Self { Self { clean_threshold, commit_threshold, etl_config } } + + /// Set the ETL configuration to use. + pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { + self.etl_config = etl_config; + self + } } impl Default for StorageHashingStage { diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 76d01dbdd6a66..89c77d6e1b869 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -37,6 +37,12 @@ impl IndexAccountHistoryStage { ) -> Self { Self { commit_threshold, prune_mode, etl_config } } + + /// Set the ETL configuration to use. + pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { + self.etl_config = etl_config; + self + } } impl Default for IndexAccountHistoryStage { diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index c00de1632eaef..b321f1c56211e 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -41,6 +41,12 @@ impl IndexStorageHistoryStage { ) -> Self { Self { commit_threshold, prune_mode, etl_config } } + + /// Set the ETL configuration to use. + pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { + self.etl_config = etl_config; + self + } } impl Default for IndexStorageHistoryStage { diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 03a8aa2eceb4b..7bdeb4e1a5933 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -48,6 +48,12 @@ impl TransactionLookupStage { pub fn new(chunk_size: u64, etl_config: EtlConfig, prune_mode: Option) -> Self { Self { chunk_size, etl_config, prune_mode } } + + /// Set the ETL configuration to use. + pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { + self.etl_config = etl_config; + self + } } impl Stage for TransactionLookupStage { From 643cae776603d40e1ad158e9f8fea972ff4fd62b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 18 Apr 2024 20:21:33 +0200 Subject: [PATCH 221/700] feat: add pre bedrock recovery check (#7726) --- crates/primitives/src/transaction/mod.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 5cd3001f7003d..3fd21c9a3d655 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -979,6 +979,11 @@ impl TransactionSignedNoHash { /// /// Returns `None` if the transaction's signature is invalid, see also /// [Signature::recover_signer_unchecked]. + /// + /// # Optimism + /// + /// For optimism this will return [Address::ZERO] if the Signature is empty, this is because pre bedrock (on OP mainnet), relay messages to the L2 Cross Domain Messenger were sent as legacy transactions from the zero address with an empty signature, e.g.: + /// This makes it possible to import pre bedrock transactions via the sender recovery stage. pub fn encode_and_recover_unchecked(&self, buffer: &mut Vec) -> Option
{ buffer.clear(); self.transaction.encode_without_signature(buffer); @@ -986,8 +991,17 @@ impl TransactionSignedNoHash { // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) + { + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } + + // pre bedrock system transactions were sent from the zero address as legacy + // transactions with an empty signature Note: this is very hacky and only + // relevant for op-mainnet pre bedrock + if self.is_legacy() && self.signature == Signature::optimism_deposit_tx_signature() { + return Some(Address::ZERO) + } } self.signature.recover_signer_unchecked(keccak256(buffer)) From fae308ee70c92076af88ac8886324f6d32926e8b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 18 Apr 2024 20:50:33 +0200 Subject: [PATCH 222/700] fix(grafana): adds missing panel for tx fetcher (#7700) --- .../net/network/src/transactions/fetcher.rs | 2 +- etc/grafana/dashboards/reth-mempool.json | 186 ++++++++++++++---- 2 files changed, 154 insertions(+), 34 deletions(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 8803a99f60f1b..cbec0f1e67fb5 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -717,7 +717,7 @@ impl TransactionFetcher { /// The request hashes buffer is filled as if it's an eth68 request, i.e. smartly assemble /// the request based on expected response size. For any hash missing size metadata, it is /// guessed at [`AVERAGE_BYTE_SIZE_TX_ENCODED`]. - + /// /// Loops through hashes pending fetch and does: /// /// 1. Check if a hash pending fetch is seen by peer. diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index f7437ad8af08d..07212ac3bdbfa 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -2324,7 +2324,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Frequency of transaction types seen in announcements", + "description": "Duration of one call to `TransactionFetcher::on_fetch_pending_hashes`.\n\nFind Peer - find an idle fallback peer for a hash pending fetch.\n\nFill Request - fill `GetPooledTransactions` request, for the found peer, with more hashes from cache of hashes pending fetch. ", "fieldConfig": { "defaults": { "color": { @@ -2376,7 +2376,7 @@ } ] }, - "unit": "cps", + "unit": "s", "unitScale": true }, "overrides": [] @@ -2387,7 +2387,7 @@ "x": 12, "y": 53 }, - "id": 214, + "id": 215, "options": { "legend": { "calcs": [], @@ -2408,14 +2408,14 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_transaction_fetcher_legacy_sum{instance=\"$instance\"}[$__rate_interval])", + "expr": "reth_network_duration_find_idle_fallback_peer_for_any_pending_hash{instance=\"$instance\"}", "fullMetaSearch": false, "hide": false, - "includeNullMetadata": false, + "includeNullMetadata": true, "instant": false, - "legendFormat": "Legacy", + "legendFormat": "Find Idle Peer", "range": true, - "refId": "A", + "refId": "C", "useBackend": false }, { @@ -2425,16 +2425,102 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_transaction_fetcher_eip2930_sum{instance=\"$instance\"}[$__rate_interval])", + "expr": "reth_network_duration_fill_request_from_hashes_pending_fetch{instance=\"$instance\"}", "fullMetaSearch": false, "hide": false, - "includeNullMetadata": false, + "includeNullMetadata": true, "instant": false, - "legendFormat": "Eip2930", + "legendFormat": "Fill Request", "range": true, "refId": "B", "useBackend": false + } + ], + "title": "Fetch Hashes Pending Fetch Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Durations of one call to poll `NetworkManager` future, and its nested function calls.\n\nNetwork Handle Message - stream network handle messages from `TransactionsManager`;\nSwarm Events - stream transaction gossip from `Swarm`", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s", + "unitScale": true }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 61 + }, + "id": 209, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", @@ -2442,14 +2528,14 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_transaction_fetcher_eip1559_sum{instance=\"$instance\"}[$__rate_interval])", + "expr": "reth_network_duration_poll_network_handle{instance=\"$instance\"}", "fullMetaSearch": false, "hide": false, - "includeNullMetadata": false, + "includeNullMetadata": true, "instant": false, - "legendFormat": "Eip1559", + "legendFormat": "Network Handle Messages", "range": true, - "refId": "C", + "refId": "A", "useBackend": false }, { @@ -2459,18 +2545,35 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_transaction_fetcher_eip4844_sum{instance=\"$instance\"}[$__rate_interval])", + "expr": "reth_network_duration_poll_swarm{instance=\"$instance\"}", "fullMetaSearch": false, "hide": false, - "includeNullMetadata": false, + "includeNullMetadata": true, "instant": false, - "legendFormat": "Eip4844", + "legendFormat": "Swarm Events", "range": true, - "refId": "D", + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_duration_poll_network_manager{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total Network Manager Future", + "range": true, + "refId": "C", "useBackend": false } ], - "title": "Announced Transactions by Type", + "title": "Network Manager Poll Duration", "type": "timeseries" }, { @@ -2478,7 +2581,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Durations of one call to poll `NetworkManager` future, and its nested function calls.\n\nNetwork Handle Message - stream network handle messages from `TransactionsManager`;\nSwarm Events - stream transaction gossip from `Swarm`", + "description": "Frequency of transaction types seen in announcements", "fieldConfig": { "defaults": { "color": { @@ -2530,7 +2633,7 @@ } ] }, - "unit": "s", + "unit": "cps", "unitScale": true }, "overrides": [] @@ -2538,10 +2641,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, + "x": 12, "y": 61 }, - "id": 209, + "id": 214, "options": { "legend": { "calcs": [], @@ -2562,12 +2665,12 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_network_duration_poll_network_handle{instance=\"$instance\"}", + "expr": "rate(reth_network_transaction_fetcher_legacy_sum{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, - "includeNullMetadata": true, + "includeNullMetadata": false, "instant": false, - "legendFormat": "Network Handle Messages", + "legendFormat": "Legacy", "range": true, "refId": "A", "useBackend": false @@ -2579,12 +2682,12 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_network_duration_poll_swarm{instance=\"$instance\"}", + "expr": "rate(reth_network_transaction_fetcher_eip2930_sum{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, - "includeNullMetadata": true, + "includeNullMetadata": false, "instant": false, - "legendFormat": "Swarm Events", + "legendFormat": "Eip2930", "range": true, "refId": "B", "useBackend": false @@ -2596,18 +2699,35 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_network_duration_poll_network_manager{instance=\"$instance\"}", + "expr": "rate(reth_network_transaction_fetcher_eip1559_sum{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, - "includeNullMetadata": true, + "includeNullMetadata": false, "instant": false, - "legendFormat": "Total Network Manager Future", + "legendFormat": "Eip1559", "range": true, "refId": "C", "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_eip4844_sum{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eip4844", + "range": true, + "refId": "D", + "useBackend": false } ], - "title": "Network Manager Poll Duration", + "title": "Announced Transactions by Type", "type": "timeseries" }, { From 18725f142545f6d2ddd4df764c2b772f06740a31 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 18 Apr 2024 21:27:00 +0200 Subject: [PATCH 223/700] fix(examples): send finished height from minimal exex (#7730) --- examples/exex/minimal/src/main.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/exex/minimal/src/main.rs b/examples/exex/minimal/src/main.rs index ff71a71aca409..1c2463cdaa16a 100644 --- a/examples/exex/minimal/src/main.rs +++ b/examples/exex/minimal/src/main.rs @@ -1,5 +1,5 @@ use futures::Future; -use reth_exex::ExExContext; +use reth_exex::{ExExContext, ExExEvent}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; use reth_provider::CanonStateNotification; @@ -16,8 +16,8 @@ async fn exex_init( /// An ExEx is just a future, which means you can implement all of it in an async function! /// -/// This ExEx just prints out whenever a state transition happens, either a new chain segment being -/// added, or a chain segment being re-orged. +/// This ExEx just prints out whenever either a new chain of blocks being added, or a chain of +/// blocks being re-orged. After processing the chain, emits an [ExExEvent::FinishedHeight] event. async fn exex(mut ctx: ExExContext) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.recv().await { match ¬ification { @@ -32,6 +32,8 @@ async fn exex(mut ctx: ExExContext) -> eyre::Res ); } }; + + ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; } Ok(()) } From 1a61d29afd2819a2a8e6dd71c3eac14092802e0c Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 18 Apr 2024 21:45:16 +0200 Subject: [PATCH 224/700] chore(engine): remove `find_canonical_ancestor` (#7716) --- crates/blockchain-tree/src/block_indices.rs | 5 ----- crates/blockchain-tree/src/noop.rs | 4 ---- crates/blockchain-tree/src/shareable.rs | 15 -------------- crates/consensus/beacon/src/engine/mod.rs | 21 ++++++-------------- crates/interfaces/src/blockchain_tree/mod.rs | 9 --------- crates/storage/provider/src/providers/mod.rs | 4 ---- 6 files changed, 6 insertions(+), 52 deletions(-) diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 28b07145342aa..a262148b9fed1 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -109,11 +109,6 @@ impl BlockIndices { self.canonical_chain.get_canonical_block_number(self.last_finalized_block, block_hash) } - /// Check if block hash belongs to canonical chain. - pub(crate) fn is_block_hash_canonical(&self, block_hash: &BlockHash) -> bool { - self.get_canonical_block_number(block_hash).is_some() - } - /// Last finalized block pub fn last_finalized_block(&self) -> BlockNumber { self.last_finalized_block diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index a9fc43eb8344e..eff385fb6ef73 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -87,10 +87,6 @@ impl BlockchainTreeViewer for NoopBlockchainTree { Default::default() } - fn find_canonical_ancestor(&self, _parent_hash: BlockHash) -> Option { - None - } - fn is_canonical(&self, _block_hash: BlockHash) -> Result { Ok(false) } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 03adfe2afa110..f839e20186d69 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -141,21 +141,6 @@ where self.tree.read().block_indices().canonical_chain().inner().clone() } - fn find_canonical_ancestor(&self, mut parent: BlockHash) -> Option { - let tree = self.tree.read(); - - // walk up the tree and check if the parent is in the sidechain - while let Some(block) = tree.block_by_hash(parent) { - parent = block.parent_hash; - } - - if tree.block_indices().is_block_hash_canonical(&parent) { - return Some(parent) - } - - None - } - fn is_canonical(&self, hash: BlockHash) -> Result { trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical"); self.tree.read().is_block_hash_canonical(&hash) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index e0af48a348dd6..636adce8833d4 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -720,23 +720,14 @@ where return Some(B256::ZERO) } - // If this is sent from new payload then the parent hash could be in a side chain, and is - // not necessarily canonical - if self.blockchain.header_by_hash(parent_hash).is_some() { - // parent is in side-chain: validated but not canonical yet + // Check if parent exists in side chain or in canonical chain. + if matches!(self.blockchain.find_block_by_hash(parent_hash, BlockSource::Any), Ok(Some(_))) + { Some(parent_hash) } else { - let parent_hash = self.blockchain.find_canonical_ancestor(parent_hash)?; - let parent_header = self.blockchain.header(&parent_hash).ok().flatten()?; - - // we need to check if the parent block is the last POW block, if so then the payload is - // the first POS. The engine API spec mandates a zero hash to be returned: - if !parent_header.is_zero_difficulty() { - return Some(B256::ZERO) - } - - // parent is canonical POS block - Some(parent_hash) + // TODO: attempt to iterate over ancestors in the invalid cache + // until we encounter the first valid ancestor + None } } diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index f512d46be8ca8..d8ad667fcbbf2 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -288,15 +288,6 @@ pub trait BlockchainTreeViewer: Send + Sync { /// Canonical block number and hashes best known by the tree. fn canonical_blocks(&self) -> BTreeMap; - /// Given the parent hash of a block, this tries to find the last ancestor that is part of the - /// canonical chain. - /// - /// In other words, this will walk up the (side) chain starting with the given hash and return - /// the first block that's canonical. - /// - /// Note: this could be the given `parent_hash` if it's already canonical. - fn find_canonical_ancestor(&self, parent_hash: BlockHash) -> Option; - /// Return whether or not the block is known and in the canonical chain. fn is_canonical(&self, hash: BlockHash) -> Result; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index bc5d6a3dfb5ba..f696c86d7bf6d 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -714,10 +714,6 @@ where self.tree.canonical_blocks() } - fn find_canonical_ancestor(&self, hash: BlockHash) -> Option { - self.tree.find_canonical_ancestor(hash) - } - fn is_canonical(&self, hash: BlockHash) -> Result { self.tree.is_canonical(hash) } From a1059bed994e278b696bc715ca72b27c8e154ebc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 18 Apr 2024 21:50:41 +0200 Subject: [PATCH 225/700] chore: extract cli runner from node-core (#7719) --- Cargo.lock | 11 ++++++++++ Cargo.toml | 2 ++ bin/reth/Cargo.toml | 1 + bin/reth/src/cli/mod.rs | 2 +- .../src/commands/debug_cmd/build_block.rs | 2 +- bin/reth/src/commands/debug_cmd/execution.rs | 2 +- .../commands/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/commands/debug_cmd/merkle.rs | 2 +- bin/reth/src/commands/debug_cmd/mod.rs | 3 ++- .../src/commands/debug_cmd/replay_engine.rs | 2 +- bin/reth/src/commands/node/mod.rs | 2 +- bin/reth/src/commands/recover/mod.rs | 2 +- .../src/commands/recover/storage_tries.rs | 2 +- bin/reth/src/lib.rs | 4 ++++ crates/cli/runner/Cargo.toml | 22 +++++++++++++++++++ .../cli/runner.rs => cli/runner/src/lib.rs} | 12 ++++++++++ crates/node-core/src/cli/mod.rs | 1 - 17 files changed, 63 insertions(+), 11 deletions(-) create mode 100644 crates/cli/runner/Cargo.toml rename crates/{node-core/src/cli/runner.rs => cli/runner/src/lib.rs} (93%) diff --git a/Cargo.lock b/Cargo.lock index 5c48a9d7456aa..b71d76871c537 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6090,6 +6090,7 @@ dependencies = [ "reth-basic-payload-builder", "reth-beacon-consensus", "reth-blockchain-tree", + "reth-cli-runner", "reth-config", "reth-consensus-common", "reth-db", @@ -6244,6 +6245,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-cli-runner" +version = "0.2.0-beta.5" +dependencies = [ + "futures", + "reth-tasks", + "tokio", + "tracing", +] + [[package]] name = "reth-codecs" version = "0.2.0-beta.5" diff --git a/Cargo.toml b/Cargo.toml index 6aace0ad1853c..2ce5d813daf54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ members = [ "bin/reth/", "crates/blockchain-tree/", + "crates/cli/runner/", "crates/config/", "crates/consensus/auto-seal/", "crates/consensus/beacon/", @@ -202,6 +203,7 @@ reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-beacon-consensus-core = { path = "crates/consensus/beacon-core" } reth-blockchain-tree = { path = "crates/blockchain-tree" } +reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } reth-config = { path = "crates/config" } reth-consensus-common = { path = "crates/consensus/common" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index b57ea46ec7ca6..bf8922ccf07cc 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -24,6 +24,7 @@ reth-interfaces = { workspace = true, features = ["clap"] } reth-transaction-pool.workspace = true reth-beacon-consensus.workspace = true reth-auto-seal-consensus.workspace = true +reth-cli-runner.workspace = true reth-consensus-common.workspace = true reth-blockchain-tree.workspace = true reth-rpc-engine-api.workspace = true diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index ff206eefcb6bc..1adc4975fdd6f 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -9,10 +9,10 @@ use crate::{ config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, node, node::NoArgs, p2p, recover, stage, test_vectors, }, - core::cli::runner::CliRunner, version::{LONG_VERSION, SHORT_VERSION}, }; use clap::{value_parser, Parser, Subcommand}; +use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{InitState, WithLaunchContext}; use reth_primitives::ChainSpec; diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 063af9fc3ce3f..566198ec8255f 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -5,7 +5,6 @@ use crate::{ utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, }, - core::cli::runner::CliContext, dirs::{DataDirPath, MaybePlatformPath}, }; use alloy_rlp::Decodable; @@ -18,6 +17,7 @@ use reth_beacon_consensus::BeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; +use reth_cli_runner::CliContext; use reth_db::{init_db, DatabaseEnv}; use reth_interfaces::{consensus::Consensus, RethResult}; use reth_node_api::PayloadBuilderAttributes; diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 99edd2ef25104..839e037ef771d 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -6,13 +6,13 @@ use crate::{ utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, NetworkArgs, }, - core::cli::runner::CliContext, dirs::{DataDirPath, MaybePlatformPath}, utils::get_single_header, }; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::BeaconConsensus; +use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; use reth_db::{database::Database, init_db, DatabaseEnv}; use reth_downloaders::{ diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index e579e446a9a4a..3632f4cff6f92 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -6,12 +6,12 @@ use crate::{ utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, NetworkArgs, }, - core::cli::runner::CliContext, dirs::{DataDirPath, MaybePlatformPath}, utils::{get_single_body, get_single_header}, }; use backon::{ConstantBuilder, Retryable}; use clap::Parser; +use reth_cli_runner::CliContext; use reth_config::Config; use reth_db::{init_db, DatabaseEnv}; use reth_interfaces::executor::BlockValidationError; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 5db0503e5e401..742e51c707e5a 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -6,13 +6,13 @@ use crate::{ utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, NetworkArgs, }, - core::cli::runner::CliContext, dirs::{DataDirPath, MaybePlatformPath}, utils::get_single_header, }; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_beacon_consensus::BeaconConsensus; +use reth_cli_runner::CliContext; use reth_config::Config; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient}; diff --git a/bin/reth/src/commands/debug_cmd/mod.rs b/bin/reth/src/commands/debug_cmd/mod.rs index 512df32e85b5d..c3704aff4e1ff 100644 --- a/bin/reth/src/commands/debug_cmd/mod.rs +++ b/bin/reth/src/commands/debug_cmd/mod.rs @@ -1,7 +1,8 @@ //! `reth debug` command. Collection of various debugging routines. -use crate::core::cli::runner::CliContext; use clap::{Parser, Subcommand}; +use reth_cli_runner::CliContext; + mod build_block; mod execution; mod in_memory_merkle; diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index d1b99f074a67e..0ef866396f77a 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -4,7 +4,6 @@ use crate::{ utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, NetworkArgs, }, - core::cli::runner::CliContext, dirs::{DataDirPath, MaybePlatformPath}, }; use clap::Parser; @@ -14,6 +13,7 @@ use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensus, BeaconConsensus use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; +use reth_cli_runner::CliContext; use reth_config::Config; use reth_db::{init_db, DatabaseEnv}; use reth_interfaces::consensus::Consensus; diff --git a/bin/reth/src/commands/node/mod.rs b/bin/reth/src/commands/node/mod.rs index 786d6d1a125e2..3491304865342 100644 --- a/bin/reth/src/commands/node/mod.rs +++ b/bin/reth/src/commands/node/mod.rs @@ -6,10 +6,10 @@ use crate::{ DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, - core::cli::runner::CliContext, dirs::{DataDirPath, MaybePlatformPath}, }; use clap::{value_parser, Args, Parser}; +use reth_cli_runner::CliContext; use reth_db::{init_db, DatabaseEnv}; use reth_node_builder::{InitState, NodeBuilder, WithLaunchContext}; use reth_node_core::{node_config::NodeConfig, version}; diff --git a/bin/reth/src/commands/recover/mod.rs b/bin/reth/src/commands/recover/mod.rs index b1f778feac87b..d082f4e0731f8 100644 --- a/bin/reth/src/commands/recover/mod.rs +++ b/bin/reth/src/commands/recover/mod.rs @@ -1,7 +1,7 @@ //! `reth recover` command. -use crate::core::cli::runner::CliContext; use clap::{Parser, Subcommand}; +use reth_cli_runner::CliContext; mod storage_tries; diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/bin/reth/src/commands/recover/storage_tries.rs index 6b619b94501ba..7a1c2ccc2747c 100644 --- a/bin/reth/src/commands/recover/storage_tries.rs +++ b/bin/reth/src/commands/recover/storage_tries.rs @@ -1,9 +1,9 @@ use crate::{ args::utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, - core::cli::runner::CliContext, dirs::{DataDirPath, MaybePlatformPath}, }; use clap::Parser; +use reth_cli_runner::CliContext; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRW}, init_db, tables, diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 00e2c586a20b9..42f26115c5d8e 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -158,6 +158,10 @@ pub mod rpc { } } +// re-export for convenience +#[doc(inline)] +pub use reth_cli_runner::{tokio_runtime, CliContext, CliRunner}; + #[cfg(all(unix, any(target_env = "gnu", target_os = "macos")))] pub mod sigsegv_handler; diff --git a/crates/cli/runner/Cargo.toml b/crates/cli/runner/Cargo.toml new file mode 100644 index 0000000000000..697621cee0500 --- /dev/null +++ b/crates/cli/runner/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "reth-cli-runner" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-tasks.workspace = true + +# async +futures.workspace = true +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal"] } + +# misc +tracing.workspace = true diff --git a/crates/node-core/src/cli/runner.rs b/crates/cli/runner/src/lib.rs similarity index 93% rename from crates/node-core/src/cli/runner.rs rename to crates/cli/runner/src/lib.rs index 9c2b717cdc019..31a1356c62bcb 100644 --- a/crates/node-core/src/cli/runner.rs +++ b/crates/cli/runner/src/lib.rs @@ -1,3 +1,13 @@ +//! A tokio based CLI runner. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + //! Entrypoint for running commands. use futures::pin_mut; @@ -6,6 +16,8 @@ use std::future::Future; use tracing::{debug, error, trace}; /// Executes CLI commands. +/// +/// Provides utilities for running a cli command to completion. #[derive(Clone, Debug, Default)] #[non_exhaustive] pub struct CliRunner; diff --git a/crates/node-core/src/cli/mod.rs b/crates/node-core/src/cli/mod.rs index fa14c5ee8ccc2..0ae8d33c061be 100644 --- a/crates/node-core/src/cli/mod.rs +++ b/crates/node-core/src/cli/mod.rs @@ -1,4 +1,3 @@ //! Additional CLI configuration support. pub mod config; -pub mod runner; From 61acd7801d8a9501c681b64498c61b7f4c4c77de Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 18 Apr 2024 21:52:30 +0200 Subject: [PATCH 226/700] docs: update base sequencer url (#7731) --- book/run/optimism.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/run/optimism.md b/book/run/optimism.md index ef5646e102fb2..8a5392d631e47 100644 --- a/book/run/optimism.md +++ b/book/run/optimism.md @@ -73,7 +73,7 @@ First, ensure that your L1 archival node is running and synced to tip. Then, sta ```sh op-reth node \ --chain base \ - --rollup.sequencer-http https://sequencer.base.org \ + --rollup.sequencer-http https://mainnet-sequencer.base.org \ --http \ --ws \ --authrpc.port 9551 \ From d5858adc0ff566dfc5892b37c4ba2a935a027a4e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 18 Apr 2024 23:34:55 +0200 Subject: [PATCH 227/700] chore(engine): remove map insert error (#7733) --- crates/consensus/beacon/src/engine/mod.rs | 47 ++++++++--------------- 1 file changed, 15 insertions(+), 32 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 636adce8833d4..6f96c3606fdfd 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1108,7 +1108,21 @@ where } Err(error) => { warn!(target: "consensus::engine", %error, "Error while processing payload"); - self.map_insert_error(error) + + // If the error was due to an invalid payload, the payload is added to the invalid + // headers cache and `Ok` with [PayloadStatusEnum::Invalid] is returned. + let (block, error) = error.split(); + if error.is_invalid_block() { + warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); + let latest_valid_hash = + self.latest_valid_hash_for_invalid_payload(block.parent_hash, Some(&error)); + // keep track of the invalid header + self.invalid_headers.insert(block.header); + let status = PayloadStatusEnum::Invalid { validation_error: error.to_string() }; + Ok(PayloadStatus::new(status, latest_valid_hash)) + } else { + Err(BeaconOnNewPayloadError::Internal(Box::new(error))) + } } }; @@ -1290,37 +1304,6 @@ where Ok(PayloadStatus::new(status, latest_valid_hash)) } - /// Maps the error, that occurred while inserting a payload into the tree to its corresponding - /// result type. - /// - /// If the error was due to an invalid payload, the payload is added to the invalid headers - /// cache and `Ok` with [PayloadStatusEnum::Invalid] is returned. - /// - /// This returns an error if the error was internal and assumed not be related to the payload. - fn map_insert_error( - &mut self, - err: InsertBlockError, - ) -> Result { - let (block, error) = err.split(); - - if error.is_invalid_block() { - warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); - - // all of these occurred if the payload is invalid - let parent_hash = block.parent_hash; - - // keep track of the invalid header - self.invalid_headers.insert(block.header); - - let latest_valid_hash = - self.latest_valid_hash_for_invalid_payload(parent_hash, Some(&error)); - let status = PayloadStatusEnum::Invalid { validation_error: error.to_string() }; - Ok(PayloadStatus::new(status, latest_valid_hash)) - } else { - Err(BeaconOnNewPayloadError::Internal(Box::new(error))) - } - } - /// Invoked if we successfully downloaded a new block from the network. /// /// This will attempt to insert the block into the tree. From 93871b3eaf389a85d4700149d11efe60b69a7969 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 19 Apr 2024 10:40:17 +0200 Subject: [PATCH 228/700] chore(engine): extract fcu pre-validation (#7736) --- crates/consensus/beacon/src/engine/mod.rs | 46 ++++++++++++++++------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 6f96c3606fdfd..dc5829bc85ca7 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -336,35 +336,30 @@ where }) } - /// Called to resolve chain forks and ensure that the Execution layer is working with the latest - /// valid chain. - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). + /// Pre-validate forkchoice update and check whether it can be processed. /// - /// Returns an error if an internal error occurred like a database error. - fn forkchoice_updated( + /// This method returns the update outcome if validation fails or + /// the node is syncing and the update cannot be processed at the moment. + fn pre_validate_forkchoice_update( &mut self, state: ForkchoiceState, - mut attrs: Option, - ) -> RethResult { - trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); + ) -> Option { if state.head_block_hash.is_zero() { - return Ok(OnForkChoiceUpdated::invalid_state()) + return Some(OnForkChoiceUpdated::invalid_state()) } // check if the new head hash is connected to any ancestor that we previously marked as // invalid let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu) { - return Ok(OnForkChoiceUpdated::with_invalid(status)) + return Some(OnForkChoiceUpdated::with_invalid(status)) } if self.sync.is_pipeline_active() { // We can only process new forkchoice updates if the pipeline is idle, since it requires // exclusive access to the database trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); - return Ok(OnForkChoiceUpdated::syncing()) + return Some(OnForkChoiceUpdated::syncing()) } if let Some(hook) = self.hooks.active_db_write_hook() { @@ -379,7 +374,30 @@ where "Hook is in progress, skipping forkchoice update. \ This may affect the performance of your node as a validator." ); - return Ok(OnForkChoiceUpdated::syncing()) + return Some(OnForkChoiceUpdated::syncing()) + } + + None + } + + /// Called to resolve chain forks and ensure that the Execution layer is working with the latest + /// valid chain. + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). + /// + /// Returns an error if an internal error occurred like a database error. + fn forkchoice_updated( + &mut self, + state: ForkchoiceState, + mut attrs: Option, + ) -> RethResult { + trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); + + // Pre-validate forkchoice state update and return if it's invalid or + // cannot be processed at the moment. + if let Some(on_updated) = self.pre_validate_forkchoice_update(state) { + return Ok(on_updated) } let start = Instant::now(); From 1c46e5ae7d21a0696bc299fefc172d5a6cb993f6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 11:41:51 +0200 Subject: [PATCH 229/700] feat: block executor provider and ethereum + op impl (#7594) Co-authored-by: Oliver Nordbjerg Co-authored-by: Georgios Konstantopoulos --- Cargo.lock | 10 + crates/evm-ethereum/Cargo.toml | 12 + crates/evm-ethereum/src/execute.rs | 826 ++++++++++++++++++ crates/evm-ethereum/src/lib.rs | 1 + crates/evm/Cargo.toml | 1 + crates/evm/src/execute.rs | 165 ++++ crates/evm/src/lib.rs | 2 + crates/node-optimism/Cargo.toml | 6 + crates/node-optimism/src/evm/execute.rs | 745 ++++++++++++++++ .../node-optimism/src/{evm.rs => evm/mod.rs} | 3 + crates/node-optimism/src/rpc.rs | 1 - crates/revm/Cargo.toml | 2 + crates/revm/src/lib.rs | 2 +- crates/revm/src/processor.rs | 12 +- crates/revm/src/test_utils.rs | 1 + 15 files changed, 1779 insertions(+), 10 deletions(-) create mode 100644 crates/evm-ethereum/src/execute.rs create mode 100644 crates/evm/src/execute.rs create mode 100644 crates/node-optimism/src/evm/execute.rs rename crates/node-optimism/src/{evm.rs => evm/mod.rs} (98%) diff --git a/Cargo.lock b/Cargo.lock index b71d76871c537..5f76031ce87e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6601,6 +6601,7 @@ dependencies = [ name = "reth-evm" version = "0.2.0-beta.5" dependencies = [ + "reth-interfaces", "reth-primitives", "revm", "revm-primitives", @@ -6611,7 +6612,12 @@ name = "reth-evm-ethereum" version = "0.2.0-beta.5" dependencies = [ "reth-evm", + "reth-interfaces", "reth-primitives", + "reth-provider", + "reth-revm", + "revm-primitives", + "tracing", ] [[package]] @@ -7008,6 +7014,8 @@ dependencies = [ "reqwest 0.11.27", "reth-basic-payload-builder", "reth-db", + "reth-evm", + "reth-interfaces", "reth-network", "reth-node-api", "reth-node-builder", @@ -7022,9 +7030,11 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "revm", + "revm-primitives", "serde", "serde_json", "thiserror", + "tracing", ] [[package]] diff --git a/crates/evm-ethereum/Cargo.toml b/crates/evm-ethereum/Cargo.toml index 7a05695ab65b3..ea7cfab8c2368 100644 --- a/crates/evm-ethereum/Cargo.toml +++ b/crates/evm-ethereum/Cargo.toml @@ -11,6 +11,18 @@ repository.workspace = true workspace = true [dependencies] +# Reth reth-evm.workspace = true reth-primitives.workspace = true +reth-revm.workspace = true +reth-interfaces.workspace = true +reth-provider.workspace = true +# Ethereum +revm-primitives.workspace = true + +# misc +tracing.workspace = true + +[dev-dependencies] +reth-revm = { workspace = true, features = ["test-utils"] } \ No newline at end of file diff --git a/crates/evm-ethereum/src/execute.rs b/crates/evm-ethereum/src/execute.rs new file mode 100644 index 0000000000000..b23c35cfd53ab --- /dev/null +++ b/crates/evm-ethereum/src/execute.rs @@ -0,0 +1,826 @@ +//! Ethereum block executor. + +use crate::EthEvmConfig; +use reth_evm::{ + execute::{ + BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, + ExecutorProvider, + }, + ConfigureEvm, ConfigureEvmEnv, +}; +use reth_interfaces::{ + executor::{BlockExecutionError, BlockValidationError}, + provider::ProviderError, +}; +use reth_primitives::{ + BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, Receipts, + Withdrawals, U256, +}; +use reth_provider::BundleStateWithReceipts; +use reth_revm::{ + batch::{BlockBatchRecord, BlockExecutorStats}, + db::states::bundle_state::BundleRetention, + eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, + processor::verify_receipt, + stack::InspectorStack, + state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, + Evm, State, +}; +use revm_primitives::{ + db::{Database, DatabaseCommit}, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, +}; +use std::sync::Arc; +use tracing::debug; + +/// Provides executors to execute regular ethereum blocks +#[derive(Debug, Clone)] +pub struct EthExecutorProvider { + chain_spec: Arc, + evm_config: EvmConfig, + inspector: Option, + prune_modes: PruneModes, +} + +impl EthExecutorProvider { + /// Creates a new default ethereum executor provider. + pub fn ethereum(chain_spec: Arc) -> Self { + Self::new(chain_spec, Default::default()) + } +} + +impl EthExecutorProvider { + /// Creates a new executor provider. + pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { + Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } + } + + /// Configures an optional inspector stack for debugging. + pub fn with_inspector(mut self, inspector: InspectorStack) -> Self { + self.inspector = Some(inspector); + self + } + + /// Configures the prune modes for the executor. + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } +} + +impl EthExecutorProvider +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + fn eth_executor(&self, db: DB) -> EthBlockExecutor + where + DB: Database, + { + EthBlockExecutor::new( + self.chain_spec.clone(), + self.evm_config.clone(), + State::builder().with_database(db).with_bundle_update().without_state_clear().build(), + ) + .with_inspector(self.inspector.clone()) + } +} + +impl ExecutorProvider for EthExecutorProvider +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + type Executor> = EthBlockExecutor; + + type BatchExecutor> = EthBatchExecutor; + + fn executor(&self, db: DB) -> Self::Executor + where + DB: Database, + { + self.eth_executor(db) + } + + fn batch_executor(&self, db: DB) -> Self::BatchExecutor + where + DB: Database, + { + let executor = self.eth_executor(db); + EthBatchExecutor { + executor, + batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + stats: BlockExecutorStats::default(), + } + } +} + +/// Helper container type for EVM with chain spec. +#[derive(Debug, Clone)] +struct EthEvmExecutor { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EvmConfig, +} + +impl EthEvmExecutor +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + /// Executes the transactions in the block and returns the receipts. + /// + /// This applies the pre-execution changes, and executes the transactions. + /// + /// # Note + /// + /// It does __not__ apply post-execution changes. + fn execute_pre_and_transactions( + &mut self, + block: &BlockWithSenders, + mut evm: Evm<'_, Ext, &mut State>, + ) -> Result<(Vec, u64), BlockExecutionError> + where + DB: Database, + { + // apply pre execution changes + apply_beacon_root_contract_call( + &self.chain_spec, + block.timestamp, + block.number, + block.parent_beacon_block_root, + &mut evm, + )?; + + // execute transactions + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(block.body.len()); + for (sender, transaction) in block.transactions_with_sender() { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = block.header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, ()); + + // Execute transaction. + let ResultAndState { result, state } = evm.transact().map_err(move |err| { + // Ensure hash is calculated for error log, if not already done + BlockValidationError::EVM { + hash: transaction.recalculate_hash(), + error: err.into(), + } + })?; + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + receipts.push( + #[allow(clippy::needless_update)] // side-effect of optimism fields + Receipt { + tx_type: transaction.tx_type(), + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + success: result.is_success(), + cumulative_gas_used, + // convert to reth log + logs: result.into_logs(), + ..Default::default() + }, + ); + } + drop(evm); + + // Check if gas used matches the value set in header. + if block.gas_used != cumulative_gas_used { + let receipts = Receipts::from_block_receipt(receipts); + return Err(BlockValidationError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: receipts.gas_spent_by_tx()?, + } + .into()) + } + + Ok((receipts, cumulative_gas_used)) + } +} + +/// A basic Ethereum block executor. +/// +/// Expected usage: +/// - Create a new instance of the executor. +/// - Execute the block. +#[derive(Debug)] +pub struct EthBlockExecutor { + /// Chain specific evm config that's used to execute a block. + executor: EthEvmExecutor, + /// The state to use for execution + state: State, + /// Optional inspector stack for debugging + inspector: Option, +} + +impl EthBlockExecutor { + /// Creates a new Ethereum block executor. + pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { + Self { executor: EthEvmExecutor { chain_spec, evm_config }, state, inspector: None } + } + + /// Sets the inspector stack for debugging. + pub fn with_inspector(mut self, inspector: Option) -> Self { + self.inspector = inspector; + self + } + + #[inline] + fn chain_spec(&self) -> &ChainSpec { + &self.executor.chain_spec + } + + /// Returns mutable reference to the state that wraps the underlying database. + #[allow(unused)] + fn state_mut(&mut self) -> &mut State { + &mut self.state + } +} + +impl EthBlockExecutor +where + EvmConfig: ConfigureEvm, + // TODO(mattsse): get rid of this + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + /// Configures a new evm configuration and block environment for the given block. + /// + /// # Caution + /// + /// This does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + EvmConfig::fill_cfg_and_block_env( + &mut cfg, + &mut block_env, + self.chain_spec(), + header, + total_difficulty, + ); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } + + /// Execute a single block and apply the state changes to the internal state. + /// + /// Returns the receipts of the transactions in the block and the total gas used. + /// + /// Returns an error if execution fails or receipt verification fails. + fn execute_and_verify( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(Vec, u64), BlockExecutionError> { + // 1. prepare state on new block + self.on_new_block(&block.header); + + // 2. configure the evm and execute + let env = self.evm_env_for_block(&block.header, total_difficulty); + + let (receipts, gas_used) = { + if let Some(inspector) = self.inspector.as_mut() { + let evm = self.executor.evm_config.evm_with_env_and_inspector( + &mut self.state, + env, + inspector, + ); + self.executor.execute_pre_and_transactions(block, evm)? + } else { + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + + self.executor.execute_pre_and_transactions(block, evm)? + } + }; + + // 3. apply post execution changes + self.post_execution(block, total_difficulty)?; + + // Before Byzantium, receipts contained state root that would mean that expensive + // operation as hashing that is required for state root got calculated in every + // transaction This was replaced with is_success flag. + // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 + if self.chain_spec().is_byzantium_active_at_block(block.header.number) { + if let Err(error) = + verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts.iter()) + { + debug!(target: "evm", %error, ?receipts, "receipts verification failed"); + return Err(error) + }; + } + + Ok((receipts, gas_used)) + } + + /// Apply settings before a new block is executed. + pub(crate) fn on_new_block(&mut self, header: &Header) { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); + self.state.set_state_clear_flag(state_clear_flag); + } + + /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO + /// hardfork state change. + pub fn post_execution( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(), BlockExecutionError> { + let mut balance_increments = post_block_balance_increments( + self.chain_spec(), + block.number, + block.difficulty, + block.beneficiary, + block.timestamp, + total_difficulty, + &block.ommers, + block.withdrawals.as_ref().map(Withdrawals::as_ref), + ); + + // Irregular state change at Ethereum DAO hardfork + if self.chain_spec().fork(Hardfork::Dao).transitions_at_block(block.number) { + // drain balances from hardcoded addresses. + let drained_balance: u128 = self + .state + .drain_balances(DAO_HARDKFORK_ACCOUNTS) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)? + .into_iter() + .sum(); + + // return balance to DAO beneficiary. + *balance_increments.entry(DAO_HARDFORK_BENEFICIARY).or_default() += drained_balance; + } + // increment balances + self.state + .increment_balances(balance_increments) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + + Ok(()) + } +} + +impl Executor for EthBlockExecutor +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; + type Output = EthBlockOutput; + type Error = BlockExecutionError; + + /// Executes the block and commits the state changes. + /// + /// Returns the receipts of the transactions in the block. + /// + /// Returns an error if the block could not be executed or failed verification. + /// + /// State changes are committed to the database. + fn execute(mut self, input: Self::Input<'_>) -> Result { + let EthBlockExecutionInput { block, total_difficulty } = input; + let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; + + // prepare the state for extraction + self.state.merge_transitions(BundleRetention::PlainState); + + Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + } +} + +/// An executor for a batch of blocks. +/// +/// State changes are tracked until the executor is finalized. +#[derive(Debug)] +pub struct EthBatchExecutor { + /// The executor used to execute single blocks + /// + /// All state changes are committed to the [State]. + executor: EthBlockExecutor, + /// Keeps track of the batch and records receipts based on the configured prune mode + batch_record: BlockBatchRecord, + stats: BlockExecutorStats, +} + +impl EthBatchExecutor { + /// Returns mutable reference to the state that wraps the underlying database. + #[allow(unused)] + fn state_mut(&mut self) -> &mut State { + self.executor.state_mut() + } +} + +impl BatchExecutor for EthBatchExecutor +where + EvmConfig: ConfigureEvm, + // TODO(mattsse): get rid of this + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; + type Output = BundleStateWithReceipts; + type Error = BlockExecutionError; + + fn execute_one(&mut self, input: Self::Input<'_>) -> Result { + let EthBlockExecutionInput { block, total_difficulty } = input; + let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; + + // prepare the state according to the prune mode + let retention = self.batch_record.bundle_retention(block.number); + self.executor.state.merge_transitions(retention); + + // store receipts in the set + self.batch_record.save_receipts(receipts)?; + + Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + } + + fn finalize(mut self) -> Self::Output { + self.stats.log_debug(); + + BundleStateWithReceipts::new( + self.executor.state.take_bundle(), + self.batch_record.take_receipts(), + self.batch_record.first_block().unwrap_or_default(), + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::EthEvmConfig; + use reth_primitives::{ + bytes, + constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, + keccak256, Account, Block, Bytes, ChainSpecBuilder, ForkCondition, B256, MAINNET, + }; + use reth_revm::{ + database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, + }; + use std::collections::HashMap; + + static BEACON_ROOT_CONTRACT_CODE: Bytes = bytes!("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"); + + fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { + let mut db = StateProviderTest::default(); + + let beacon_root_contract_account = Account { + balance: U256::ZERO, + bytecode_hash: Some(keccak256(BEACON_ROOT_CONTRACT_CODE.clone())), + nonce: 1, + }; + + db.insert_account( + BEACON_ROOTS_ADDRESS, + beacon_root_contract_account, + Some(BEACON_ROOT_CONTRACT_CODE.clone()), + HashMap::new(), + ); + + db + } + + fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { + EthExecutorProvider { + chain_spec, + evm_config: Default::default(), + inspector: None, + prune_modes: Default::default(), + } + } + + #[test] + fn eip_4788_non_genesis_call() { + let mut header = + Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; + + let db = create_state_provider_with_beacon_root_contract(); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = executor_provider(chain_spec); + + // attempt to execute a block without parent beacon block root, expect err + let err = provider + .executor(StateProviderDatabase::new(&db)) + .execute( + ( + &BlockWithSenders { + block: Block { + header: header.clone(), + body: vec![], + ommers: vec![], + withdrawals: None, + }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect_err( + "Executing cancun block without parent beacon block root field should fail", + ); + assert_eq!( + err, + BlockExecutionError::Validation(BlockValidationError::MissingParentBeaconBlockRoot) + ); + + // fix header, set a gas limit + header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); + + let mut executor = provider.executor(StateProviderDatabase::new(&db)); + + // Now execute a block with the fixed header, ensure that it does not fail + executor + .execute_and_verify( + &BlockWithSenders { + block: Block { + header: header.clone(), + body: vec![], + ommers: vec![], + withdrawals: None, + }, + senders: vec![], + }, + U256::ZERO, + ) + .unwrap(); + + // check the actual storage of the contract - it should be: + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be + // header.timestamp + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH + // // should be parent_beacon_block_root + let history_buffer_length = 8191u64; + let timestamp_index = header.timestamp % history_buffer_length; + let parent_beacon_block_root_index = + timestamp_index % history_buffer_length + history_buffer_length; + + // get timestamp storage and compare + let timestamp_storage = + executor.state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); + assert_eq!(timestamp_storage, U256::from(header.timestamp)); + + // get parent beacon block root storage and compare + let parent_beacon_block_root_storage = executor + .state + .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) + .expect("storage value should exist"); + assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); + } + + #[test] + fn eip_4788_no_code_cancun() { + // This test ensures that we "silently fail" when cancun is active and there is no code at + // // BEACON_ROOTS_ADDRESS + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(B256::with_last_byte(0x69)), + excess_blob_gas: Some(0), + ..Header::default() + }; + + let db = StateProviderTest::default(); + + // DON'T deploy the contract at genesis + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = executor_provider(chain_spec); + + // attempt to execute an empty block with parent beacon block root, this should not fail + provider + .executor(StateProviderDatabase::new(&db)) + .execute( + ( + &BlockWithSenders { + block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect( + "Executing a block with no transactions while cancun is active should not fail", + ); + } + + #[test] + fn eip_4788_empty_account_call() { + // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account + // // during the pre-block call + + let mut db = create_state_provider_with_beacon_root_contract(); + + // insert an empty SYSTEM_ADDRESS + db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::new()); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = executor_provider(chain_spec); + + // construct the header for block one + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(B256::with_last_byte(0x69)), + excess_blob_gas: Some(0), + ..Header::default() + }; + + let mut executor = provider.executor(StateProviderDatabase::new(&db)); + + // attempt to execute an empty block with parent beacon block root, this should not fail + executor + .execute_and_verify( + &BlockWithSenders { + block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, + senders: vec![], + }, + U256::ZERO, + ) + .expect( + "Executing a block with no transactions while cancun is active should not fail", + ); + + // ensure that the nonce of the system address account has not changed + let nonce = executor.state_mut().basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce; + assert_eq!(nonce, 0); + } + + #[test] + fn eip_4788_genesis_call() { + let db = create_state_provider_with_beacon_root_contract(); + + // activate cancun at genesis + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(0)) + .build(), + ); + + let mut header = chain_spec.genesis_header(); + + let provider = executor_provider(chain_spec); + + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // attempt to execute the genesis block with non-zero parent beacon block root, expect err + header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); + let _err = executor + .execute_one( + ( + &BlockWithSenders { + block: Block { + header: header.clone(), + body: vec![], + ommers: vec![], + withdrawals: None, + }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .expect_err( + "Executing genesis cancun block with non-zero parent beacon block root field + should fail", + ); + + // fix header + header.parent_beacon_block_root = Some(B256::ZERO); + + // now try to process the genesis block again, this time ensuring that a system contract + // call does not occur + executor + .execute_one( + ( + &BlockWithSenders { + block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + // there is no system contract call so there should be NO STORAGE CHANGES + // this means we'll check the transition state + let transition_state = executor + .state_mut() + .transition_state + .take() + .expect("the evm should be initialized with bundle updates"); + + // assert that it is the default (empty) transition state + assert_eq!(transition_state, TransitionState::default()); + } + + #[test] + fn eip_4788_high_base_fee() { + // This test ensures that if we have a base fee, then we don't return an error when the + // system contract is called, due to the gas price being less than the base fee. + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(B256::with_last_byte(0x69)), + base_fee_per_gas: Some(u64::MAX), + excess_blob_gas: Some(0), + ..Header::default() + }; + + let db = create_state_provider_with_beacon_root_contract(); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = executor_provider(chain_spec); + + // execute header + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + // Now execute a block with the fixed header, ensure that it does not fail + executor + .execute_one( + ( + &BlockWithSenders { + block: Block { + header: header.clone(), + body: vec![], + ommers: vec![], + withdrawals: None, + }, + senders: vec![], + }, + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + // check the actual storage of the contract - it should be: + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be + // header.timestamp + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH + // // should be parent_beacon_block_root + let history_buffer_length = 8191u64; + let timestamp_index = header.timestamp % history_buffer_length; + let parent_beacon_block_root_index = + timestamp_index % history_buffer_length + history_buffer_length; + + // get timestamp storage and compare + let timestamp_storage = executor + .state_mut() + .storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)) + .unwrap(); + assert_eq!(timestamp_storage, U256::from(header.timestamp)); + + // get parent beacon block root storage and compare + let parent_beacon_block_root_storage = executor + .state_mut() + .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) + .unwrap(); + assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); + } +} diff --git a/crates/evm-ethereum/src/lib.rs b/crates/evm-ethereum/src/lib.rs index 9a195adef136c..a320a2b3c62a8 100644 --- a/crates/evm-ethereum/src/lib.rs +++ b/crates/evm-ethereum/src/lib.rs @@ -14,6 +14,7 @@ use reth_primitives::{ revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, Address, ChainSpec, Head, Header, Transaction, U256, }; +pub mod execute; /// Ethereum-related EVM configuration. #[derive(Debug, Clone, Copy, Default)] diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index b100c83b7b668..f13c471a7a4bd 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -15,4 +15,5 @@ workspace = true reth-primitives.workspace = true revm-primitives.workspace = true revm.workspace = true +reth-interfaces.workspace = true diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs new file mode 100644 index 0000000000000..b8c1536029a9d --- /dev/null +++ b/crates/evm/src/execute.rs @@ -0,0 +1,165 @@ +//! Traits for execution. + +use reth_interfaces::provider::ProviderError; +use reth_primitives::U256; +use revm::db::BundleState; +use revm_primitives::db::Database; + +/// A general purpose executor trait that executes on an input (e.g. blocks) and produces an output +/// (e.g. state changes and receipts). +pub trait Executor { + /// The input type for the executor. + type Input<'a>; + /// The output type for the executor. + type Output; + /// The error type returned by the executor. + type Error; + + /// Consumes the type and executes the block. + /// + /// Returns the output of the block execution. + fn execute(self, input: Self::Input<'_>) -> Result; +} + +/// An executor that can execute multiple blocks in a row and keep track of the state over the +/// entire batch. +pub trait BatchExecutor { + /// The input type for the executor. + type Input<'a>; + /// The output type for the executor. + type Output; + /// The error type returned by the executor. + type Error; + + /// Executes the next block in the batch and update the state internally. + fn execute_one(&mut self, input: Self::Input<'_>) -> Result; + + /// Finishes the batch and return the final state. + fn finalize(self) -> Self::Output; +} + +/// The output of an executed block in a batch. +#[derive(Debug, Clone, Copy)] +pub struct BatchBlockOutput { + /// The size hint of the batch's tracked state. + pub size_hint: Option, +} + +/// The output of an ethereum block. +/// +/// Contains the state changes, transaction receipts, and total gas used in the block. +/// +/// TODO(mattsse): combine with BundleStateWithReceipts +#[derive(Debug)] +pub struct EthBlockOutput { + /// The changed state of the block after execution. + pub state: BundleState, + /// All the receipts of the transactions in the block. + pub receipts: Vec, + /// The total gas used by the block. + pub gas_used: u64, +} + +/// A helper type for ethereum block inputs that consists of a block and the total difficulty. +#[derive(Debug)] +pub struct EthBlockExecutionInput<'a, Block> { + /// The block to execute. + pub block: &'a Block, + /// The total difficulty of the block. + pub total_difficulty: U256, +} + +impl<'a, Block> EthBlockExecutionInput<'a, Block> { + /// Creates a new input. + pub fn new(block: &'a Block, total_difficulty: U256) -> Self { + Self { block, total_difficulty } + } +} + +impl<'a, Block> From<(&'a Block, U256)> for EthBlockExecutionInput<'a, Block> { + fn from((block, total_difficulty): (&'a Block, U256)) -> Self { + Self::new(block, total_difficulty) + } +} + +/// A type that can create a new executor. +pub trait ExecutorProvider: Send + Sync + Clone { + /// An executor that can execute a single block given a database. + type Executor>: Executor; + /// An executor that can execute a batch of blocks given a database. + + type BatchExecutor>: BatchExecutor; + /// Creates a new executor for single block execution. + fn executor(&self, db: DB) -> Self::Executor + where + DB: Database; + + /// Creates a new batch executor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor + where + DB: Database; +} + +#[cfg(test)] +mod tests { + use super::*; + use revm::db::{CacheDB, EmptyDBTyped}; + use std::marker::PhantomData; + + #[derive(Clone, Default)] + struct TestExecutorProvider; + + impl ExecutorProvider for TestExecutorProvider { + type Executor> = TestExecutor; + type BatchExecutor> = TestExecutor; + + fn executor(&self, _db: DB) -> Self::Executor + where + DB: Database, + { + TestExecutor(PhantomData) + } + + fn batch_executor(&self, _db: DB) -> Self::BatchExecutor + where + DB: Database, + { + TestExecutor(PhantomData) + } + } + + struct TestExecutor(PhantomData); + + impl Executor for TestExecutor { + type Input<'a> = &'static str; + type Output = (); + type Error = String; + + fn execute(self, _input: Self::Input<'_>) -> Result { + Ok(()) + } + } + + impl BatchExecutor for TestExecutor { + type Input<'a> = &'static str; + type Output = (); + type Error = String; + + fn execute_one( + &mut self, + _input: Self::Input<'_>, + ) -> Result { + Ok(BatchBlockOutput { size_hint: None }) + } + + fn finalize(self) -> Self::Output {} + } + + #[test] + fn test_provider() { + let provider = TestExecutorProvider; + let db = CacheDB::>::default(); + let executor = provider.executor(db); + executor.execute("test").unwrap(); + } +} diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 78dd0cb07e159..78a76e54ccd5f 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -12,6 +12,8 @@ use reth_primitives::{revm::env::fill_block_env, Address, ChainSpec, Header, Tra use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv}; +pub mod execute; + /// Trait for configuring the EVM for executing full blocks. pub trait ConfigureEvm: ConfigureEvmEnv { /// Returns new EVM with the given database diff --git a/crates/node-optimism/Cargo.toml b/crates/node-optimism/Cargo.toml index 7851a259d2920..c6d2ece405e24 100644 --- a/crates/node-optimism/Cargo.toml +++ b/crates/node-optimism/Cargo.toml @@ -25,8 +25,12 @@ reth-tracing.workspace = true reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network.workspace = true +reth-interfaces.workspace = true +reth-evm.workspace = true reth-revm.workspace = true + revm.workspace = true +revm-primitives.workspace = true # async async-trait.workspace = true @@ -36,6 +40,7 @@ http-body = "0.4.5" reqwest = { version = "0.11", default-features = false, features = [ "rustls-tls", ]} +tracing.workspace = true # misc clap.workspace = true @@ -48,6 +53,7 @@ jsonrpsee.workspace = true [dev-dependencies] reth-db.workspace = true +reth-revm = { workspace = true, features = ["test-utils"]} [features] optimism = [ diff --git a/crates/node-optimism/src/evm/execute.rs b/crates/node-optimism/src/evm/execute.rs new file mode 100644 index 0000000000000..cca13fb7d205e --- /dev/null +++ b/crates/node-optimism/src/evm/execute.rs @@ -0,0 +1,745 @@ +//! Optimism block executor. + +use crate::OptimismEvmConfig; +use reth_evm::{ + execute::{ + BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, + ExecutorProvider, + }, + ConfigureEvm, ConfigureEvmEnv, +}; +use reth_interfaces::{ + executor::{BlockExecutionError, BlockValidationError, OptimismBlockExecutionError}, + provider::ProviderError, +}; +use reth_primitives::{ + proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, Bytes, ChainSpec, + GotExpected, Hardfork, Header, PruneModes, Receipt, ReceiptWithBloom, Receipts, TxType, + Withdrawals, B256, U256, +}; +use reth_provider::BundleStateWithReceipts; +use reth_revm::{ + batch::{BlockBatchRecord, BlockExecutorStats}, + db::states::bundle_state::BundleRetention, + optimism::ensure_create2_deployer, + processor::compare_receipts_root_and_logs_bloom, + stack::InspectorStack, + state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, + Evm, State, +}; +use revm_primitives::{ + db::{Database, DatabaseCommit}, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, +}; +use std::sync::Arc; +use tracing::{debug, trace}; + +/// Provides executors to execute regular ethereum blocks +#[derive(Debug, Clone)] +pub struct OpExecutorProvider { + chain_spec: Arc, + evm_config: EvmConfig, + inspector: Option, + prune_modes: PruneModes, +} + +impl OpExecutorProvider { + /// Creates a new default optimism executor provider. + pub fn optimism(chain_spec: Arc) -> Self { + Self::new(chain_spec, Default::default()) + } +} + +impl OpExecutorProvider { + /// Creates a new executor provider. + pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { + Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } + } + + /// Configures an optional inspector stack for debugging. + pub fn with_inspector(mut self, inspector: Option) -> Self { + self.inspector = inspector; + self + } + + /// Configures the prune modes for the executor. + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } +} + +impl OpExecutorProvider +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + fn op_executor(&self, db: DB) -> OpBlockExecutor + where + DB: Database, + { + OpBlockExecutor::new( + self.chain_spec.clone(), + self.evm_config.clone(), + State::builder().with_database(db).with_bundle_update().without_state_clear().build(), + ) + .with_inspector(self.inspector.clone()) + } +} + +impl ExecutorProvider for OpExecutorProvider +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + type Executor> = OpBlockExecutor; + + type BatchExecutor> = OpBatchExecutor; + fn executor(&self, db: DB) -> Self::Executor + where + DB: Database, + { + self.op_executor(db) + } + + fn batch_executor(&self, db: DB) -> Self::BatchExecutor + where + DB: Database, + { + let executor = self.op_executor(db); + OpBatchExecutor { + executor, + batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + stats: BlockExecutorStats::default(), + } + } +} + +/// Helper container type for EVM with chain spec. +#[derive(Debug, Clone)] +struct OpEvmExecutor { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EvmConfig, +} + +impl OpEvmExecutor +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + /// Executes the transactions in the block and returns the receipts. + /// + /// This applies the pre-execution changes, and executes the transactions. + /// + /// # Note + /// + /// It does __not__ apply post-execution changes. + fn execute_pre_and_transactions( + &mut self, + block: &BlockWithSenders, + mut evm: Evm<'_, Ext, &mut State>, + ) -> Result<(Vec, u64), BlockExecutionError> + where + DB: Database, + { + // apply pre execution changes + apply_beacon_root_contract_call( + &self.chain_spec, + block.timestamp, + block.number, + block.parent_beacon_block_root, + &mut evm, + )?; + + // execute transactions + let is_regolith = + self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); + + // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism + // blocks will always have at least a single transaction in them (the L1 info transaction), + // so we can safely assume that this will always be triggered upon the transition and that + // the above check for empty blocks will never be hit on OP chains. + ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()).map_err( + |_| { + BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::ForceCreate2DeployerFail, + ) + }, + )?; + + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(block.body.len()); + for (sender, transaction) in block.transactions_with_sender() { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = block.header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas && + (is_regolith || !transaction.is_system_transaction()) + { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + + // An optimism block should never contain blob transactions. + if matches!(transaction.tx_type(), TxType::Eip4844) { + return Err(BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::BlobTransactionRejected, + )) + } + + // Cache the depositor account prior to the state transition for the deposit nonce. + // + // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces + // were not introduced in Bedrock. In addition, regular transactions don't have deposit + // nonces, so we don't need to touch the DB for those. + let depositor = (is_regolith && transaction.is_deposit()) + .then(|| { + evm.db_mut() + .load_cache_account(*sender) + .map(|acc| acc.account_info().unwrap_or_default()) + }) + .transpose() + .map_err(|_| { + BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::AccountLoadFailed(*sender), + ) + })?; + + let mut buf = Vec::with_capacity(transaction.length_without_header()); + transaction.encode_enveloped(&mut buf); + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, buf.into()); + + // Execute transaction. + let ResultAndState { result, state } = evm.transact().map_err(move |err| { + // Ensure hash is calculated for error log, if not already done + BlockValidationError::EVM { + hash: transaction.recalculate_hash(), + error: err.into(), + } + })?; + + trace!( + target: "evm", + ?transaction, + "Executed transaction" + ); + + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + receipts.push(Receipt { + tx_type: transaction.tx_type(), + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs(), + deposit_nonce: depositor.map(|account| account.nonce), + // The deposit receipt version was introduced in Canyon to indicate an update to how + // receipt hashes should be computed when set. The state transition process ensures + // this is only set for post-Canyon deposit transactions. + deposit_receipt_version: (transaction.is_deposit() && + self.chain_spec + .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) + .then_some(1), + }); + } + drop(evm); + + // Check if gas used matches the value set in header. + if block.gas_used != cumulative_gas_used { + let receipts = Receipts::from_block_receipt(receipts); + return Err(BlockValidationError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: receipts.gas_spent_by_tx()?, + } + .into()) + } + + Ok((receipts, cumulative_gas_used)) + } +} + +/// A basic Ethereum block executor. +/// +/// Expected usage: +/// - Create a new instance of the executor. +/// - Execute the block. +#[derive(Debug)] +pub struct OpBlockExecutor { + /// Chain specific evm config that's used to execute a block. + executor: OpEvmExecutor, + /// The state to use for execution + state: State, + /// Optional inspector stack for debugging + inspector: Option, +} + +impl OpBlockExecutor { + /// Creates a new Ethereum block executor. + pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { + Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, inspector: None } + } + + /// Sets the inspector stack for debugging. + pub fn with_inspector(mut self, inspector: Option) -> Self { + self.inspector = inspector; + self + } + + #[inline] + fn chain_spec(&self) -> &ChainSpec { + &self.executor.chain_spec + } + + /// Returns mutable reference to the state that wraps the underlying database. + #[allow(unused)] + fn state_mut(&mut self) -> &mut State { + &mut self.state + } +} + +impl OpBlockExecutor +where + EvmConfig: ConfigureEvm, + // TODO(mattsse): get rid of this + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + /// Configures a new evm configuration and block environment for the given block. + /// + /// Caution: this does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + EvmConfig::fill_cfg_and_block_env( + &mut cfg, + &mut block_env, + self.chain_spec(), + header, + total_difficulty, + ); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } + + /// Execute a single block and apply the state changes to the internal state. + /// + /// Returns the receipts of the transactions in the block and the total gas used. + /// + /// Returns an error if execution fails or receipt verification fails. + fn execute_and_verify( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(Vec, u64), BlockExecutionError> { + // 1. prepare state on new block + self.on_new_block(&block.header); + + // 2. configure the evm and execute + let env = self.evm_env_for_block(&block.header, total_difficulty); + + let (receipts, gas_used) = { + if let Some(inspector) = self.inspector.as_mut() { + let evm = self.executor.evm_config.evm_with_env_and_inspector( + &mut self.state, + env, + inspector, + ); + self.executor.execute_pre_and_transactions(block, evm)? + } else { + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + + self.executor.execute_pre_and_transactions(block, evm)? + } + }; + + // 3. apply post execution changes + self.post_execution(block, total_difficulty)?; + + // Before Byzantium, receipts contained state root that would mean that expensive + // operation as hashing that is required for state root got calculated in every + // transaction This was replaced with is_success flag. + // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 + if self.chain_spec().is_byzantium_active_at_block(block.header.number) { + if let Err(error) = verify_receipt_optimism( + block.header.receipts_root, + block.header.logs_bloom, + receipts.iter(), + self.chain_spec(), + block.timestamp, + ) { + debug!(target: "evm", %error, ?receipts, "receipts verification failed"); + return Err(error) + }; + } + + Ok((receipts, gas_used)) + } + + /// Apply settings before a new block is executed. + pub(crate) fn on_new_block(&mut self, header: &Header) { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); + self.state.set_state_clear_flag(state_clear_flag); + } + + /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO + /// hardfork state change. + pub fn post_execution( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(), BlockExecutionError> { + let balance_increments = post_block_balance_increments( + self.chain_spec(), + block.number, + block.difficulty, + block.beneficiary, + block.timestamp, + total_difficulty, + &block.ommers, + block.withdrawals.as_ref().map(Withdrawals::as_ref), + ); + // increment balances + self.state + .increment_balances(balance_increments) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + + Ok(()) + } +} + +impl Executor for OpBlockExecutor +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; + type Output = EthBlockOutput; + type Error = BlockExecutionError; + + /// Executes the block and commits the state changes. + /// + /// Returns the receipts of the transactions in the block. + /// + /// Returns an error if the block could not be executed or failed verification. + /// + /// State changes are committed to the database. + fn execute(mut self, input: Self::Input<'_>) -> Result { + let EthBlockExecutionInput { block, total_difficulty } = input; + let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; + + // prepare the state for extraction + self.state.merge_transitions(BundleRetention::PlainState); + + Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + } +} + +/// An executor for a batch of blocks. +/// +/// State changes are tracked until the executor is finalized. +#[derive(Debug)] +pub struct OpBatchExecutor { + /// The executor used to execute blocks. + executor: OpBlockExecutor, + /// Keeps track of the batch and record receipts based on the configured prune mode + batch_record: BlockBatchRecord, + stats: BlockExecutorStats, +} + +impl OpBatchExecutor { + /// Returns the receipts of the executed blocks. + pub fn receipts(&self) -> &Receipts { + self.batch_record.receipts() + } + + /// Returns mutable reference to the state that wraps the underlying database. + #[allow(unused)] + fn state_mut(&mut self) -> &mut State { + self.executor.state_mut() + } +} + +impl BatchExecutor for OpBatchExecutor +where + EvmConfig: ConfigureEvm, + // TODO: get rid of this + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; + type Output = BundleStateWithReceipts; + type Error = BlockExecutionError; + + fn execute_one(&mut self, input: Self::Input<'_>) -> Result { + let EthBlockExecutionInput { block, total_difficulty } = input; + let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; + + // prepare the state according to the prune mode + let retention = self.batch_record.bundle_retention(block.number); + self.executor.state.merge_transitions(retention); + + // store receipts in the set + self.batch_record.save_receipts(receipts)?; + + Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + } + + fn finalize(mut self) -> Self::Output { + // TODO: track stats + self.stats.log_debug(); + + BundleStateWithReceipts::new( + self.executor.state.take_bundle(), + self.batch_record.take_receipts(), + self.batch_record.first_block().unwrap_or_default(), + ) + } +} + +/// Verify the calculated receipts root against the expected receipts root. +pub fn verify_receipt_optimism<'a>( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, + chain_spec: &ChainSpec, + timestamp: u64, +) -> Result<(), BlockExecutionError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = + calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::{ + b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, + Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, + }; + use reth_revm::database::StateProviderDatabase; + use revm::L1_BLOCK_CONTRACT; + use std::{collections::HashMap, str::FromStr}; + + use crate::OptimismEvmConfig; + use reth_revm::test_utils::StateProviderTest; + + fn create_op_state_provider() -> StateProviderTest { + let mut db = StateProviderTest::default(); + + let l1_block_contract_account = + Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; + + let mut l1_block_storage = HashMap::new(); + // base fee + l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); + // l1 fee overhead + l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); + // l1 fee scalar + l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); + // l1 free scalars post ecotone + l1_block_storage.insert( + StorageKey::with_last_byte(3), + StorageValue::from_str( + "0x0000000000000000000000000000000000001db0000d27300000000000000005", + ) + .unwrap(), + ); + + db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); + + db + } + + fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { + OpExecutorProvider { + chain_spec, + evm_config: Default::default(), + inspector: None, + prune_modes: Default::default(), + } + } + + #[test] + fn op_deposit_fields_pre_canyon() { + let header = Header { + timestamp: 1, + number: 1, + gas_limit: 1_000_000, + gas_used: 42_000, + receipts_root: b256!( + "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" + ), + ..Default::default() + }; + + let mut db = create_op_state_provider(); + + let addr = Address::ZERO; + let account = Account { balance: U256::MAX, ..Account::default() }; + db.insert_account(addr, account, None, HashMap::new()); + + let chain_spec = + Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).regolith_activated().build()); + + let tx = TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21_000, + to: TransactionKind::Call(addr), + ..Default::default() + }), + Signature::default(), + ); + + let tx_deposit = TransactionSigned::from_transaction_and_signature( + Transaction::Deposit(reth_primitives::TxDeposit { + from: addr, + to: TransactionKind::Call(addr), + gas_limit: 21_000, + ..Default::default() + }), + Signature::default(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + + // Attempt to execute a block with one deposit and one non-deposit transaction + executor + .execute_one( + ( + &BlockWithSenders { + block: Block { + header, + body: vec![tx, tx_deposit], + ommers: vec![], + withdrawals: None, + }, + senders: vec![addr, addr], + }, + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); + let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + + // deposit_receipt_version is not present in pre canyon transactions + assert!(deposit_receipt.deposit_receipt_version.is_none()); + assert!(tx_receipt.deposit_receipt_version.is_none()); + + // deposit_nonce is present only in deposit transactions + assert!(deposit_receipt.deposit_nonce.is_some()); + assert!(tx_receipt.deposit_nonce.is_none()); + } + + #[test] + fn op_deposit_fields_post_canyon() { + // ensure_create2_deployer will fail if timestamp is set to less then 2 + let header = Header { + timestamp: 2, + number: 1, + gas_limit: 1_000_000, + gas_used: 42_000, + receipts_root: b256!( + "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" + ), + ..Default::default() + }; + + let mut db = create_op_state_provider(); + let addr = Address::ZERO; + let account = Account { balance: U256::MAX, ..Account::default() }; + + db.insert_account(addr, account, None, HashMap::new()); + + let chain_spec = + Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).canyon_activated().build()); + + let tx = TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21_000, + to: TransactionKind::Call(addr), + ..Default::default() + }), + Signature::default(), + ); + + let tx_deposit = TransactionSigned::from_transaction_and_signature( + Transaction::Deposit(reth_primitives::TxDeposit { + from: addr, + to: TransactionKind::Call(addr), + gas_limit: 21_000, + ..Default::default() + }), + Signature::optimism_deposit_tx_signature(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + + // attempt to execute an empty block with parent beacon block root, this should not fail + executor + .execute_one( + ( + &BlockWithSenders { + block: Block { + header, + body: vec![tx, tx_deposit], + ommers: vec![], + withdrawals: None, + }, + senders: vec![addr, addr], + }, + U256::ZERO, + ) + .into(), + ) + .expect("Executing a block while canyon is active should not fail"); + + let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); + let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + + // deposit_receipt_version is set to 1 for post canyon deposit transactions + assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); + assert!(tx_receipt.deposit_receipt_version.is_none()); + + // deposit_nonce is present only in deposit transactions + assert!(deposit_receipt.deposit_nonce.is_some()); + assert!(tx_receipt.deposit_nonce.is_none()); + } +} diff --git a/crates/node-optimism/src/evm.rs b/crates/node-optimism/src/evm/mod.rs similarity index 98% rename from crates/node-optimism/src/evm.rs rename to crates/node-optimism/src/evm/mod.rs index 6470f1cf4da0c..086253a0d8bb9 100644 --- a/crates/node-optimism/src/evm.rs +++ b/crates/node-optimism/src/evm/mod.rs @@ -6,6 +6,9 @@ use reth_primitives::{ }; use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; +mod execute; +pub use execute::*; + /// Optimism-related EVM configuration. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] diff --git a/crates/node-optimism/src/rpc.rs b/crates/node-optimism/src/rpc.rs index 633a62b0b88d3..66eb824505e3e 100644 --- a/crates/node-optimism/src/rpc.rs +++ b/crates/node-optimism/src/rpc.rs @@ -6,7 +6,6 @@ use reth_rpc::eth::{ error::{EthApiError, EthResult, ToRpcError}, traits::RawTransactionForwarder, }; -use reth_tracing::tracing; use std::sync::{atomic::AtomicUsize, Arc}; /// Error type when interacting with the Sequencer diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 2bd04d27847bd..5c62f324eb107 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -18,6 +18,7 @@ reth-interfaces.workspace = true reth-provider.workspace = true reth-consensus-common.workspace = true reth-evm.workspace = true +reth-trie = { workspace = true, optional = true } # revm revm.workspace = true @@ -30,6 +31,7 @@ tracing.workspace = true reth-trie.workspace = true [features] +test-utils = ["dep:reth-trie"] optimism = [ "revm/optimism", "reth-primitives/optimism", diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 05f60cfba8eaa..f4ed01ada2f33 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -38,7 +38,7 @@ pub mod stack; pub mod optimism; /// Common test helpers -#[cfg(test)] +#[cfg(any(test, feature = "test-utils"))] pub mod test_utils; // Convenience re-exports. diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index fbed5eae072b8..f467b22a05c13 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -460,20 +460,16 @@ pub fn compare_receipts_root_and_logs_bloom( #[cfg(test)] mod tests { - use std::collections::HashMap; - - use revm::{Database, TransitionState}; - + use super::*; + use crate::test_utils::{StateProviderTest, TestEvmConfig}; use reth_primitives::{ bytes, constants::{BEACON_ROOTS_ADDRESS, EIP1559_INITIAL_BASE_FEE, SYSTEM_ADDRESS}, keccak256, Account, Bytes, ChainSpecBuilder, ForkCondition, Signature, Transaction, TransactionKind, TxEip1559, MAINNET, }; - - use crate::test_utils::{StateProviderTest, TestEvmConfig}; - - use super::*; + use revm::{Database, TransitionState}; + use std::collections::HashMap; static BEACON_ROOT_CONTRACT_CODE: Bytes = bytes!("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"); diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index cf0b8299807f9..1937369872321 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -25,6 +25,7 @@ use { }, }; +/// Mock state for testing #[derive(Debug, Default, Clone, Eq, PartialEq)] pub struct StateProviderTest { accounts: HashMap, Account)>, From 04aef71d723dceeecd10bee1cf2eaada3f4806af Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 19 Apr 2024 11:42:59 +0200 Subject: [PATCH 230/700] chore(engine): extract fcu make canonical result processing (#7737) --- crates/consensus/beacon/src/engine/mod.rs | 87 +++++++++++++---------- 1 file changed, 49 insertions(+), 38 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index dc5829bc85ca7..bb369605015ad 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -390,7 +390,7 @@ where fn forkchoice_updated( &mut self, state: ForkchoiceState, - mut attrs: Option, + attrs: Option, ) -> RethResult { trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); @@ -404,46 +404,57 @@ where let make_canonical_result = self.blockchain.make_canonical(state.head_block_hash); let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - let status = match make_canonical_result { + let status = self.on_forkchoice_updated_make_canonical_result( + state, + attrs, + make_canonical_result, + elapsed, + )?; + trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); + Ok(status) + } + + /// Process the result of attempting to make forkchoice state head hash canonical. + /// + /// # Returns + /// + /// A forkchoice state update outcome or fatal error. + fn on_forkchoice_updated_make_canonical_result( + &mut self, + state: ForkchoiceState, + mut attrs: Option, + make_canonical_result: Result, + elapsed: Duration, + ) -> RethResult { + match make_canonical_result { Ok(outcome) => { - match &outcome { + let should_update_head = match &outcome { CanonicalOutcome::AlreadyCanonical { header } => { - if self.on_head_already_canonical(header, &mut attrs) { - let _ = self.update_head(header.clone()); - self.listeners.notify( - BeaconConsensusEngineEvent::CanonicalChainCommitted( - Box::new(header.clone()), - elapsed, - ), - ); - } + self.on_head_already_canonical(header, &mut attrs) } CanonicalOutcome::Committed { head } => { - debug!( - target: "consensus::engine", - hash=?state.head_block_hash, - number=head.number, - "Canonicalized new head" - ); - // new VALID update that moved the canonical chain forward - let _ = self.update_head(head.clone()); - self.listeners.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( - Box::new(head.clone()), - elapsed, - )); + debug!(target: "consensus::engine", hash=?state.head_block_hash, number=head.number, "Canonicalized new head"); + true } }; + if should_update_head { + let head = outcome.header(); + let _ = self.update_head(head.clone()); + self.listeners.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( + Box::new(head.clone()), + elapsed, + )); + } + // Validate that the forkchoice state is consistent. - if let Some(invalid_fcu_response) = + let on_updated = if let Some(invalid_fcu_response) = self.ensure_consistent_forkchoice_state(state)? { - trace!(target: "consensus::engine", ?state, "Forkchoice state is inconsistent, returning invalid response"); - return Ok(invalid_fcu_response) - } - - if let Some(attrs) = attrs { + trace!(target: "consensus::engine", ?state, "Forkchoice state is inconsistent"); + invalid_fcu_response + } else if let Some(attrs) = attrs { // the CL requested to build a new payload on top of this new VALID head let head = outcome.into_header().unseal(); self.process_payload_attributes(attrs, head, state) @@ -452,20 +463,20 @@ where PayloadStatusEnum::Valid, Some(state.head_block_hash), )) - } + }; + Ok(on_updated) } Err(err) => { if err.is_fatal() { error!(target: "consensus::engine", %err, "Encountered fatal error"); - return Err(err.into()) + Err(err.into()) + } else { + Ok(OnForkChoiceUpdated::valid( + self.on_failed_canonical_forkchoice_update(&state, err), + )) } - - OnForkChoiceUpdated::valid(self.on_failed_canonical_forkchoice_update(&state, err)) } - }; - - trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); - Ok(status) + } } /// Invoked when head hash references a `VALID` block that is already canonical. From e7945f92198bbe558a88b4bd82b94cc562b5f840 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 19 Apr 2024 12:08:41 +0200 Subject: [PATCH 231/700] fix(engine): remove block execution error matching (#7735) --- crates/consensus/beacon/src/engine/mod.rs | 59 +++++++++++------------ 1 file changed, 27 insertions(+), 32 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index bb369605015ad..8a1a4266a88b1 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -15,8 +15,9 @@ use reth_interfaces::{ BlockStatus, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }, consensus::ForkchoiceState, - executor::{BlockExecutionError, BlockValidationError}, + executor::BlockValidationError, p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, + provider::ProviderResult, sync::{NetworkSyncUpdater, SyncState}, RethError, RethResult, }; @@ -391,7 +392,7 @@ where &mut self, state: ForkchoiceState, attrs: Option, - ) -> RethResult { + ) -> Result { trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); // Pre-validate forkchoice state update and return if it's invalid or @@ -425,7 +426,7 @@ where mut attrs: Option, make_canonical_result: Result, elapsed: Duration, - ) -> RethResult { + ) -> Result { match make_canonical_result { Ok(outcome) => { let should_update_head = match &outcome { @@ -469,7 +470,7 @@ where Err(err) => { if err.is_fatal() { error!(target: "consensus::engine", %err, "Encountered fatal error"); - Err(err.into()) + Err(err) } else { Ok(OnForkChoiceUpdated::valid( self.on_failed_canonical_forkchoice_update(&state, err), @@ -527,23 +528,21 @@ where state: ForkchoiceState, attrs: Option, tx: oneshot::Sender>, - ) -> OnForkchoiceUpdateOutcome { + ) -> Result { self.metrics.forkchoice_updated_messages.increment(1); self.blockchain.on_forkchoice_update_received(&state); let on_updated = match self.forkchoice_updated(state, attrs) { Ok(response) => response, Err(error) => { - if let RethError::Execution(ref err) = error { - if err.is_fatal() { - // FCU resulted in a fatal error from which we can't recover - let err = err.clone(); - let _ = tx.send(Err(error)); - return OnForkchoiceUpdateOutcome::Fatal(err) - } + if error.is_fatal() { + // FCU resulted in a fatal error from which we can't recover + let err = error.clone(); + let _ = tx.send(Err(RethError::Canonical(error))); + return Err(err) } - let _ = tx.send(Err(error)); - return OnForkchoiceUpdateOutcome::Processed + let _ = tx.send(Err(RethError::Canonical(error))); + return Ok(OnForkchoiceUpdateOutcome::Processed) } }; @@ -568,7 +567,7 @@ where if self.sync.has_reached_max_block(tip_number) { // Terminate the sync early if it's reached the maximum user // configured block. - return OnForkchoiceUpdateOutcome::ReachedMaxBlock + return Ok(OnForkchoiceUpdateOutcome::ReachedMaxBlock) } } ForkchoiceStatus::Syncing => { @@ -580,7 +579,7 @@ where // notify listeners about new processed FCU self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, fcu_status)); - OnForkchoiceUpdateOutcome::Processed + Ok(OnForkchoiceUpdateOutcome::Processed) } /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less @@ -844,7 +843,7 @@ where fn ensure_consistent_forkchoice_state( &mut self, state: ForkchoiceState, - ) -> RethResult> { + ) -> ProviderResult> { // Ensure that the finalized block, if not zero, is known and in the canonical chain // after the head block is canonicalized. // @@ -924,17 +923,17 @@ where /// /// Returns an error if the block is not found. #[inline] - fn update_safe_block(&self, safe_block_hash: B256) -> RethResult<()> { + fn update_safe_block(&self, safe_block_hash: B256) -> ProviderResult<()> { if !safe_block_hash.is_zero() { if self.blockchain.safe_block_hash()? == Some(safe_block_hash) { // nothing to update return Ok(()) } - let safe = - self.blockchain.find_block_by_hash(safe_block_hash, BlockSource::Any)?.ok_or_else( - || RethError::Provider(ProviderError::UnknownBlockHash(safe_block_hash)), - )?; + let safe = self + .blockchain + .find_block_by_hash(safe_block_hash, BlockSource::Any)? + .ok_or_else(|| ProviderError::UnknownBlockHash(safe_block_hash))?; self.blockchain.set_safe(safe.header.seal(safe_block_hash)); } Ok(()) @@ -944,7 +943,7 @@ where /// /// Returns an error if the block is not found. #[inline] - fn update_finalized_block(&self, finalized_block_hash: B256) -> RethResult<()> { + fn update_finalized_block(&self, finalized_block_hash: B256) -> ProviderResult<()> { if !finalized_block_hash.is_zero() { if self.blockchain.finalized_block_hash()? == Some(finalized_block_hash) { // nothing to update @@ -954,9 +953,7 @@ where let finalized = self .blockchain .find_block_by_hash(finalized_block_hash, BlockSource::Any)? - .ok_or_else(|| { - RethError::Provider(ProviderError::UnknownBlockHash(finalized_block_hash)) - })?; + .ok_or_else(|| ProviderError::UnknownBlockHash(finalized_block_hash))?; self.blockchain.finalize_block(finalized.number); self.blockchain.set_finalized(finalized.header.seal(finalized_block_hash)); } @@ -1762,14 +1759,14 @@ where match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { match this.on_forkchoice_updated(state, payload_attrs, tx) { - OnForkchoiceUpdateOutcome::Processed => {} - OnForkchoiceUpdateOutcome::ReachedMaxBlock => { + Ok(OnForkchoiceUpdateOutcome::Processed) => {} + Ok(OnForkchoiceUpdateOutcome::ReachedMaxBlock) => { // reached the max block, we can terminate the future return Poll::Ready(Ok(())) } - OnForkchoiceUpdateOutcome::Fatal(err) => { + Err(err) => { // fatal error, we can terminate the future - return Poll::Ready(Err(RethError::Execution(err).into())) + return Poll::Ready(Err(RethError::Canonical(err).into())) } } } @@ -1838,8 +1835,6 @@ enum OnForkchoiceUpdateOutcome { Processed, /// FCU was processed successfully and reached max block. ReachedMaxBlock, - /// FCU resulted in a __fatal__ block execution error from which we can't recover. - Fatal(BlockExecutionError), } /// Represents outcomes of processing a sync event From f14bf14d19511853559997d867d99d0cb1a797fe Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 12:18:23 +0200 Subject: [PATCH 232/700] chore: relax some trait bounds (#7739) --- crates/blockchain-tree/src/blockchain_tree.rs | 28 ++++++++++--------- crates/blockchain-tree/src/shareable.rs | 14 ++++------ crates/node-builder/src/builder.rs | 4 +-- 3 files changed, 21 insertions(+), 25 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 28094853566ca..7c51590ebd566 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -57,7 +57,7 @@ use tracing::{debug, error, info, instrument, trace, warn}; /// * [BlockchainTree::make_canonical]: Check if we have the hash of a block that is the current /// canonical head and commit it to db. #[derive(Debug)] -pub struct BlockchainTree { +pub struct BlockchainTree { /// The state of the tree /// /// Tracks all the chains, the block indices, and the block buffer. @@ -75,6 +75,20 @@ pub struct BlockchainTree { prune_modes: Option, } +impl BlockchainTree { + /// Subscribe to new blocks events. + /// + /// Note: Only canonical blocks are emitted by the tree. + pub fn subscribe_canon_state(&self) -> CanonStateNotifications { + self.canon_state_notification_sender.subscribe() + } + + /// Returns a clone of the sender for the canonical state notifications. + pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { + self.canon_state_notification_sender.clone() + } +} + impl BlockchainTree where DB: Database + Clone, @@ -1104,18 +1118,6 @@ where Ok(outcome) } - /// Subscribe to new blocks events. - /// - /// Note: Only canonical blocks are emitted by the tree. - pub fn subscribe_canon_state(&self) -> CanonStateNotifications { - self.canon_state_notification_sender.subscribe() - } - - /// Returns a clone of the sender for the canonical state notifications. - pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { - self.canon_state_notification_sender.clone() - } - /// Write the given chain to the database as canonical. fn commit_canonical_to_database( &self, diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index f839e20186d69..7a0eb36fa49bd 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -25,18 +25,14 @@ use std::{ }; use tracing::trace; -/// Shareable blockchain tree that is behind tokio::RwLock +/// Shareable blockchain tree that is behind a RwLock #[derive(Clone, Debug)] -pub struct ShareableBlockchainTree { +pub struct ShareableBlockchainTree { /// BlockchainTree pub tree: Arc>>, } -impl ShareableBlockchainTree -where - DB: Database + Clone, - EF: ExecutorFactory, -{ +impl ShareableBlockchainTree { /// Create a new shareable database. pub fn new(tree: BlockchainTree) -> Self { Self { tree: Arc::new(RwLock::new(tree)) } @@ -202,8 +198,8 @@ where impl CanonStateSubscriptions for ShareableBlockchainTree where - DB: Database + Clone, - EF: ExecutorFactory, + DB: Send + Sync, + EF: Send + Sync, { fn subscribe_to_canonical_state(&self) -> reth_provider::CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 76df0fc8f56f6..7b84fad6b395c 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -44,9 +44,7 @@ use reth_node_core::{ primitives::{kzg::KzgSettings, Head}, utils::write_peers_to_file, }; -use reth_node_events::node; - -use reth_node_events::cl::ConsensusLayerHealthEvents; +use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, format_ether, ChainSpec}; use reth_provider::{ providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, From defe5ff0afa12bb4537fe7ea7ea4e3245b384eca Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Fri, 19 Apr 2024 19:35:20 +0800 Subject: [PATCH 233/700] refactor: split reth-stages into reth-stages-api and reth-stages (#7666) Co-authored-by: Oliver Nordbjerg --- Cargo.lock | 27 ++++++++- Cargo.toml | 4 +- bin/reth/src/commands/import.rs | 1 + crates/blockchain-tree/Cargo.toml | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/consensus/auto-seal/Cargo.toml | 2 +- crates/consensus/auto-seal/src/task.rs | 2 +- crates/node-e2e-tests/tests/it/eth.rs | 1 - crates/stages-api/Cargo.toml | 40 +++++++++++++ .../docs/mermaid/pipeline.mmd | 0 crates/{stages => stages-api}/src/error.rs | 7 ++- crates/stages-api/src/lib.rs | 14 +++++ .../src/metrics/listener.rs | 3 +- .../{stages => stages-api}/src/metrics/mod.rs | 0 .../src/metrics/sync_metrics.rs | 20 +++---- .../src/pipeline/builder.rs | 0 .../src/pipeline/ctrl.rs | 0 .../src/pipeline/event.rs | 0 .../src/pipeline/mod.rs | 39 ++++++++----- .../src/pipeline/progress.rs | 11 ++-- .../src/pipeline/set.rs | 0 crates/{stages => stages-api}/src/stage.rs | 0 crates/stages-api/src/test_utils/mod.rs | 2 + .../src/test_utils/stage.rs | 0 crates/{stages => stages-api}/src/util.rs | 0 crates/stages/Cargo.toml | 3 +- crates/stages/benches/criterion.rs | 2 +- crates/stages/benches/setup/mod.rs | 2 +- crates/stages/src/lib.rs | 12 +--- crates/stages/src/prelude.rs | 10 +--- crates/stages/src/stages/bodies.rs | 56 +++++++++++-------- crates/stages/src/stages/execution.rs | 9 +-- crates/stages/src/stages/finish.rs | 2 +- crates/stages/src/stages/hashing_account.rs | 2 +- crates/stages/src/stages/hashing_storage.rs | 2 +- crates/stages/src/stages/headers.rs | 4 +- .../src/stages/index_account_history.rs | 2 +- .../src/stages/index_storage_history.rs | 2 +- crates/stages/src/stages/merkle.rs | 4 +- crates/stages/src/stages/mod.rs | 3 +- crates/stages/src/stages/sender_recovery.rs | 4 +- crates/stages/src/stages/tx_lookup.rs | 2 +- crates/stages/src/stages/utils.rs | 2 +- crates/stages/src/test_utils/macros.rs | 14 ++--- crates/stages/src/test_utils/mod.rs | 3 - crates/stages/src/test_utils/runner.rs | 4 +- crates/stages/src/test_utils/set.rs | 5 +- 47 files changed, 212 insertions(+), 114 deletions(-) create mode 100644 crates/stages-api/Cargo.toml rename crates/{stages => stages-api}/docs/mermaid/pipeline.mmd (100%) rename crates/{stages => stages-api}/src/error.rs (98%) create mode 100644 crates/stages-api/src/lib.rs rename crates/{stages => stages-api}/src/metrics/listener.rs (98%) rename crates/{stages => stages-api}/src/metrics/mod.rs (100%) rename crates/{stages => stages-api}/src/metrics/sync_metrics.rs (64%) rename crates/{stages => stages-api}/src/pipeline/builder.rs (100%) rename crates/{stages => stages-api}/src/pipeline/ctrl.rs (100%) rename crates/{stages => stages-api}/src/pipeline/event.rs (100%) rename crates/{stages => stages-api}/src/pipeline/mod.rs (98%) rename crates/{stages => stages-api}/src/pipeline/progress.rs (77%) rename crates/{stages => stages-api}/src/pipeline/set.rs (100%) rename crates/{stages => stages-api}/src/stage.rs (100%) create mode 100644 crates/stages-api/src/test_utils/mod.rs rename crates/{stages => stages-api}/src/test_utils/stage.rs (100%) rename crates/{stages => stages-api}/src/util.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 5f76031ce87e2..b08629d28d385 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6147,7 +6147,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm", - "reth-stages", + "reth-stages-api", "reth-transaction-pool", "tokio", "tokio-stream", @@ -6238,7 +6238,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm", - "reth-stages", + "reth-stages-api", "reth-trie", "reth-trie-parallel", "tokio", @@ -7427,6 +7427,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm", + "reth-stages-api", "reth-static-file", "reth-tokio-util", "reth-trie", @@ -7438,6 +7439,28 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-stages-api" +version = "0.2.0-beta.5" +dependencies = [ + "aquamarine", + "assert_matches", + "auto_impl", + "futures-util", + "metrics", + "reth-db", + "reth-interfaces", + "reth-metrics", + "reth-primitives", + "reth-provider", + "reth-static-file", + "reth-tokio-util", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "reth-static-file" version = "0.2.0-beta.5" diff --git a/Cargo.toml b/Cargo.toml index 2ce5d813daf54..e025f29d52e97 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,6 +53,7 @@ members = [ "crates/node-api/", "crates/node-e2e-tests/", "crates/stages/", + "crates/stages-api", "crates/static-file/", "crates/storage/codecs/", "crates/storage/codecs/derive/", @@ -95,9 +96,9 @@ resolver = "2" rust.missing_debug_implementations = "warn" rust.missing_docs = "warn" rust.unreachable_pub = "warn" -rustdoc.all = "warn" rust.unused_must_use = "deny" rust.rust_2018_idioms = "deny" +rustdoc.all = "warn" [workspace.lints.clippy] # These are some of clippy's nursery (i.e., experimental) lints that we like. @@ -254,6 +255,7 @@ reth-rpc-engine-api = { path = "crates/rpc/rpc-engine-api" } reth-rpc-types = { path = "crates/rpc/rpc-types" } reth-rpc-types-compat = { path = "crates/rpc/rpc-types-compat" } reth-stages = { path = "crates/stages" } +reth-stages-api = { path = "crates/stages-api" } reth-static-file = { path = "crates/static-file" } reth-tasks = { path = "crates/tasks" } reth-tokio-util = { path = "crates/tokio-util" } diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index ce9cd3efe7afe..d15106ce3ffbe 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -34,6 +34,7 @@ use reth_provider::{HeaderSyncMode, ProviderFactory, StageCheckpointReader}; use reth_stages::{ prelude::*, stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage}, + Pipeline, StageSet, }; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 81bf9ea053361..3a6ab1439e58f 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -16,7 +16,7 @@ reth-primitives.workspace = true reth-interfaces.workspace = true reth-db.workspace = true reth-provider.workspace = true -reth-stages.workspace = true +reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-parallel = { workspace = true, features = ["parallel"] } diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 7c51590ebd566..eb699ff1af632 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -26,7 +26,7 @@ use reth_provider::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, ChainSpecProvider, DisplayBlocksChain, ExecutorFactory, HeaderProvider, ProviderError, }; -use reth_stages::{MetricEvent, MetricEventsSender}; +use reth_stages_api::{MetricEvent, MetricEventsSender}; use std::{ collections::{btree_map::Entry, BTreeMap, HashSet}, sync::Arc, diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 72a593b5a64fb..5fbf4f07a5e13 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -17,7 +17,7 @@ reth-beacon-consensus.workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true -reth-stages.workspace = true +reth-stages-api.workspace = true reth-revm.workspace = true reth-transaction-pool.workspace = true reth-evm.workspace = true diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 6d7a29a330038..e76b4333e8cf1 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -6,7 +6,7 @@ use reth_evm::ConfigureEvm; use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::{Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders}; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; -use reth_stages::PipelineEvent; +use reth_stages_api::PipelineEvent; use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; use std::{ collections::VecDeque, diff --git a/crates/node-e2e-tests/tests/it/eth.rs b/crates/node-e2e-tests/tests/it/eth.rs index 5686c6e18e5a7..a2c761221dfcc 100644 --- a/crates/node-e2e-tests/tests/it/eth.rs +++ b/crates/node-e2e-tests/tests/it/eth.rs @@ -1,6 +1,5 @@ use node_e2e_tests::{node::NodeHelper, wallet::Wallet}; use reth::{ - self, args::RpcServerArgs, builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml new file mode 100644 index 0000000000000..8d1eccd6eae3e --- /dev/null +++ b/crates/stages-api/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "reth-stages-api" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true +reth-provider.workspace = true +reth-db.workspace = true +reth-interfaces.workspace = true +reth-static-file.workspace = true +assert_matches.workspace = true +reth-tokio-util.workspace = true + +# metrics +reth-metrics.workspace = true +metrics.workspace = true + +# async +tokio = { workspace = true, features = ["sync"] } +tokio-stream.workspace = true +futures-util.workspace = true + +# misc +thiserror.workspace = true +tracing.workspace = true +auto_impl = "1" +aquamarine.workspace = true + +[features] +test-utils = [] + +[lints] +workspace = true diff --git a/crates/stages/docs/mermaid/pipeline.mmd b/crates/stages-api/docs/mermaid/pipeline.mmd similarity index 100% rename from crates/stages/docs/mermaid/pipeline.mmd rename to crates/stages-api/docs/mermaid/pipeline.mmd diff --git a/crates/stages/src/error.rs b/crates/stages-api/src/error.rs similarity index 98% rename from crates/stages/src/error.rs rename to crates/stages-api/src/error.rs index e8a5e3a71ff78..3b744e7cbe2c6 100644 --- a/crates/stages/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -1,10 +1,11 @@ -use crate::pipeline::PipelineEvent; use reth_interfaces::{ - consensus, db::DatabaseError as DbError, executor, p2p::error::DownloadError, - provider::ProviderError, RethError, + consensus, db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, }; use reth_primitives::{BlockNumber, SealedHeader, StaticFileSegment, TxNumber}; +use reth_provider::ProviderError; use thiserror::Error; + +use crate::PipelineEvent; use tokio::sync::mpsc::error::SendError; /// Represents the specific error type within a block error. diff --git a/crates/stages-api/src/lib.rs b/crates/stages-api/src/lib.rs new file mode 100644 index 0000000000000..fa6cd74e6407c --- /dev/null +++ b/crates/stages-api/src/lib.rs @@ -0,0 +1,14 @@ +//! Staged syncing primitives for reth. +mod error; +mod metrics; +mod pipeline; +mod stage; +#[allow(missing_docs)] +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; +mod util; + +pub use crate::metrics::*; +pub use error::*; +pub use pipeline::*; +pub use stage::*; diff --git a/crates/stages/src/metrics/listener.rs b/crates/stages-api/src/metrics/listener.rs similarity index 98% rename from crates/stages/src/metrics/listener.rs rename to crates/stages-api/src/metrics/listener.rs index 39ccc29a31af5..2aa5667440830 100644 --- a/crates/stages/src/metrics/listener.rs +++ b/crates/stages-api/src/metrics/listener.rs @@ -45,7 +45,8 @@ pub enum MetricEvent { #[derive(Debug)] pub struct MetricsListener { events_rx: UnboundedReceiver, - pub(crate) sync_metrics: SyncMetrics, + /// underline metrics of stages + pub sync_metrics: SyncMetrics, } impl MetricsListener { diff --git a/crates/stages/src/metrics/mod.rs b/crates/stages-api/src/metrics/mod.rs similarity index 100% rename from crates/stages/src/metrics/mod.rs rename to crates/stages-api/src/metrics/mod.rs diff --git a/crates/stages/src/metrics/sync_metrics.rs b/crates/stages-api/src/metrics/sync_metrics.rs similarity index 64% rename from crates/stages/src/metrics/sync_metrics.rs rename to crates/stages-api/src/metrics/sync_metrics.rs index 148368f024eb5..64c38c21e1d1b 100644 --- a/crates/stages/src/metrics/sync_metrics.rs +++ b/crates/stages-api/src/metrics/sync_metrics.rs @@ -6,14 +6,14 @@ use reth_primitives::stage::StageId; use std::collections::HashMap; #[derive(Debug, Default)] -pub(crate) struct SyncMetrics { - pub(crate) stages: HashMap, - pub(crate) execution_stage: ExecutionStageMetrics, +pub struct SyncMetrics { + pub stages: HashMap, + pub execution_stage: ExecutionStageMetrics, } impl SyncMetrics { /// Returns existing or initializes a new instance of [StageMetrics] for the provided [StageId]. - pub(crate) fn get_stage_metrics(&mut self, stage_id: StageId) -> &mut StageMetrics { + pub fn get_stage_metrics(&mut self, stage_id: StageId) -> &mut StageMetrics { self.stages .entry(stage_id) .or_insert_with(|| StageMetrics::new_with_labels(&[("stage", stage_id.to_string())])) @@ -22,19 +22,19 @@ impl SyncMetrics { #[derive(Metrics)] #[metrics(scope = "sync")] -pub(crate) struct StageMetrics { +pub struct StageMetrics { /// The block number of the last commit for a stage. - pub(crate) checkpoint: Gauge, + pub checkpoint: Gauge, /// The number of processed entities of the last commit for a stage, if applicable. - pub(crate) entities_processed: Gauge, + pub entities_processed: Gauge, /// The number of total entities of the last commit for a stage, if applicable. - pub(crate) entities_total: Gauge, + pub entities_total: Gauge, } /// Execution stage metrics. #[derive(Metrics)] #[metrics(scope = "sync.execution")] -pub(crate) struct ExecutionStageMetrics { +pub struct ExecutionStageMetrics { /// The total amount of gas processed (in millions) - pub(crate) mgas_processed_total: Counter, + pub mgas_processed_total: Counter, } diff --git a/crates/stages/src/pipeline/builder.rs b/crates/stages-api/src/pipeline/builder.rs similarity index 100% rename from crates/stages/src/pipeline/builder.rs rename to crates/stages-api/src/pipeline/builder.rs diff --git a/crates/stages/src/pipeline/ctrl.rs b/crates/stages-api/src/pipeline/ctrl.rs similarity index 100% rename from crates/stages/src/pipeline/ctrl.rs rename to crates/stages-api/src/pipeline/ctrl.rs diff --git a/crates/stages/src/pipeline/event.rs b/crates/stages-api/src/pipeline/event.rs similarity index 100% rename from crates/stages/src/pipeline/event.rs rename to crates/stages-api/src/pipeline/event.rs diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs similarity index 98% rename from crates/stages/src/pipeline/mod.rs rename to crates/stages-api/src/pipeline/mod.rs index eb1f40cbd17c2..d19325a3cada8 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -1,6 +1,8 @@ -use crate::{ - error::*, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageExt, UnwindInput, -}; +mod ctrl; +mod event; +pub use crate::pipeline::ctrl::ControlFlow; +pub use event::*; + use futures_util::Future; use reth_db::database::Database; use reth_interfaces::RethResult; @@ -20,15 +22,22 @@ use tokio::sync::watch; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; -mod builder; -mod ctrl; -mod event; -mod progress; -mod set; +// (todo) remove it +#[allow(missing_docs)] +pub mod builder; -pub use crate::pipeline::ctrl::ControlFlow; +// (todo) remove it +#[allow(missing_docs)] +pub mod progress; +// (todo) remove it +#[allow(missing_docs)] +pub mod set; + +use crate::{ + BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, PipelineError, Stage, + StageError, StageExt, UnwindInput, +}; pub use builder::*; -pub use event::*; use progress::*; pub use set::*; @@ -372,7 +381,7 @@ where let exec_input = ExecInput { target, checkpoint: prev_checkpoint }; self.listeners.notify(PipelineEvent::Prepare { - pipeline_stages_progress: event::PipelineStagesProgress { + pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, total: total_stages, }, @@ -390,7 +399,7 @@ where } self.listeners.notify(PipelineEvent::Run { - pipeline_stages_progress: event::PipelineStagesProgress { + pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, total: total_stages, }, @@ -415,7 +424,7 @@ where provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; self.listeners.notify(PipelineEvent::Ran { - pipeline_stages_progress: event::PipelineStagesProgress { + pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, total: total_stages, }, @@ -896,9 +905,9 @@ mod tests { /// /// - Stage A syncs to block 10 /// - Stage B triggers an unwind, marking block 5 as bad - /// - Stage B unwinds to it's previous progress, block 0 but since it is still at block 0, it is + /// - Stage B unwinds to its previous progress, block 0 but since it is still at block 0, it is /// skipped entirely (there is nothing to unwind) - /// - Stage A unwinds to it's previous progress, block 0 + /// - Stage A unwinds to its previous progress, block 0 /// - Stage A syncs back up to block 10 /// - Stage B syncs to block 10 /// - The pipeline finishes diff --git a/crates/stages/src/pipeline/progress.rs b/crates/stages-api/src/pipeline/progress.rs similarity index 77% rename from crates/stages/src/pipeline/progress.rs rename to crates/stages-api/src/pipeline/progress.rs index 1c4bbcf6cb014..cb124a8bc00cb 100644 --- a/crates/stages/src/pipeline/progress.rs +++ b/crates/stages-api/src/pipeline/progress.rs @@ -1,15 +1,14 @@ -use super::ctrl::ControlFlow; -use crate::util::opt; +use crate::{util::opt, ControlFlow}; use reth_primitives::BlockNumber; #[derive(Debug, Default)] -pub(crate) struct PipelineProgress { +pub struct PipelineProgress { /// Block number reached by the stage. - pub(crate) block_number: Option, + pub block_number: Option, /// The maximum block number achieved by any stage during the execution of the pipeline. - pub(crate) maximum_block_number: Option, + pub maximum_block_number: Option, /// The minimum block number achieved by any stage during the execution of the pipeline. - pub(crate) minimum_block_number: Option, + pub minimum_block_number: Option, } impl PipelineProgress { diff --git a/crates/stages/src/pipeline/set.rs b/crates/stages-api/src/pipeline/set.rs similarity index 100% rename from crates/stages/src/pipeline/set.rs rename to crates/stages-api/src/pipeline/set.rs diff --git a/crates/stages/src/stage.rs b/crates/stages-api/src/stage.rs similarity index 100% rename from crates/stages/src/stage.rs rename to crates/stages-api/src/stage.rs diff --git a/crates/stages-api/src/test_utils/mod.rs b/crates/stages-api/src/test_utils/mod.rs new file mode 100644 index 0000000000000..1a44c2fa82743 --- /dev/null +++ b/crates/stages-api/src/test_utils/mod.rs @@ -0,0 +1,2 @@ +mod stage; +pub use stage::TestStage; diff --git a/crates/stages/src/test_utils/stage.rs b/crates/stages-api/src/test_utils/stage.rs similarity index 100% rename from crates/stages/src/test_utils/stage.rs rename to crates/stages-api/src/test_utils/stage.rs diff --git a/crates/stages/src/util.rs b/crates/stages-api/src/util.rs similarity index 100% rename from crates/stages/src/util.rs rename to crates/stages-api/src/util.rs diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 84ef28e0ff097..4c9fc8dde7db5 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -23,6 +23,7 @@ reth-tokio-util.workspace = true reth-etl.workspace = true reth-static-file.workspace = true reth-config.workspace = true +reth-stages-api = {workspace = true , features = ["test-utils"]} # async tokio = { workspace = true, features = ["sync"] } @@ -78,7 +79,7 @@ serde_json.workspace = true pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } [features] -test-utils = ["reth-interfaces/test-utils", "reth-db/test-utils", "reth-provider/test-utils"] +test-utils = ["reth-interfaces/test-utils", "reth-db/test-utils", "reth-provider/test-utils", "reth-stages-api/test-utils"] [[bench]] name = "criterion" diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index 03cb52383aaed..13f7d5386a3b3 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -12,8 +12,8 @@ use reth_primitives::{stage::StageCheckpoint, BlockNumber}; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TransactionLookupStage}, test_utils::TestStageDB, - ExecInput, Stage, StageExt, UnwindInput, }; +use reth_stages_api::{ExecInput, Stage, StageExt, UnwindInput}; use std::{ops::RangeInclusive, sync::Arc}; mod setup; diff --git a/crates/stages/benches/setup/mod.rs b/crates/stages/benches/setup/mod.rs index 76c1faaf0a468..e94fb81b3bb9a 100644 --- a/crates/stages/benches/setup/mod.rs +++ b/crates/stages/benches/setup/mod.rs @@ -18,7 +18,6 @@ use reth_primitives::{fs, Account, Address, SealedBlock, B256, U256}; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::{StorageKind, TestStageDB}, - ExecInput, Stage, UnwindInput, }; use reth_trie::StateRoot; use std::{collections::BTreeMap, path::Path, sync::Arc}; @@ -27,6 +26,7 @@ mod constants; mod account_hashing; pub use account_hashing::*; +use reth_stages_api::{ExecInput, Stage, UnwindInput}; pub(crate) type StageRange = (ExecInput, UnwindInput); diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 6e123f750b5cc..e113025438c95 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -77,12 +77,6 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod error; -mod metrics; -mod pipeline; -mod stage; -mod util; - #[allow(missing_docs)] #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; @@ -95,7 +89,5 @@ pub mod stages; pub mod sets; -pub use crate::metrics::*; -pub use error::*; -pub use pipeline::*; -pub use stage::*; +// re-export the stages API +pub use reth_stages_api::*; diff --git a/crates/stages/src/prelude.rs b/crates/stages/src/prelude.rs index 3826c8d2c350f..a6c56e56e1fac 100644 --- a/crates/stages/src/prelude.rs +++ b/crates/stages/src/prelude.rs @@ -1,8 +1,4 @@ -pub use crate::{ - error::{PipelineError, StageError}, - pipeline::{Pipeline, PipelineBuilder, PipelineEvent, StageSet, StageSetBuilder}, - sets::{ - DefaultStages, ExecutionStages, HashingStages, HistoryIndexingStages, OfflineStages, - OnlineStages, - }, +pub use crate::sets::{ + DefaultStages, ExecutionStages, HashingStages, HistoryIndexingStages, OfflineStages, + OnlineStages, }; diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index b52274b1e3dfb..6dfe7a6a8d2cd 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -1,5 +1,11 @@ -use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; +use std::{ + cmp::Ordering, + task::{ready, Context, Poll}, +}; + use futures_util::TryStreamExt; +use tracing::*; + use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, @@ -19,11 +25,9 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockReader, DatabaseProviderRW, HeaderProvider, ProviderError, StatsReader, }; -use std::{ - cmp::Ordering, - task::{ready, Context, Poll}, -}; -use tracing::*; +use reth_stages_api::{ExecInput, ExecOutput, StageError, UnwindInput, UnwindOutput}; + +use reth_stages_api::Stage; // TODO(onbjerg): Metrics and events (gradual status for e.g. CLI) /// The body stage downloads block bodies. @@ -374,14 +378,17 @@ fn stage_checkpoint( #[cfg(test)] mod tests { - use super::*; - use crate::test_utils::{ - stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, - }; use assert_matches::assert_matches; + use reth_primitives::stage::StageUnitCheckpoint; use test_utils::*; + use crate::test_utils::{ + stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, + }; + + use super::*; + stage_test_suite_ext!(BodyTestRunner, body); /// Checks that the stage downloads at most `batch_size` blocks. @@ -588,15 +595,16 @@ mod tests { } mod test_utils { - use crate::{ - stages::bodies::BodyStage, - test_utils::{ - ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestStageDB, - UnwindStageTestRunner, - }, - ExecInput, ExecOutput, UnwindInput, + use std::{ + collections::{HashMap, VecDeque}, + ops::RangeInclusive, + pin::Pin, + sync::Arc, + task::{Context, Poll}, }; + use futures_util::Stream; + use reth_db::{ cursor::DbCursorRO, models::{StoredBlockBodyIndices, StoredBlockOmmers}, @@ -626,12 +634,14 @@ mod tests { use reth_provider::{ providers::StaticFileWriter, HeaderProvider, ProviderFactory, TransactionsProvider, }; - use std::{ - collections::{HashMap, VecDeque}, - ops::RangeInclusive, - pin::Pin, - sync::Arc, - task::{Context, Poll}, + use reth_stages_api::{ExecInput, ExecOutput, UnwindInput}; + + use crate::{ + stages::bodies::BodyStage, + test_utils::{ + ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestStageDB, + UnwindStageTestRunner, + }, }; /// The block hash of the genesis block. diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 7c6cefbb83f48..ecaae33c315a7 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -1,7 +1,4 @@ -use crate::{ - stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, BlockErrorKind, ExecInput, ExecOutput, - MetricEvent, MetricEventsSender, Stage, StageError, UnwindInput, UnwindOutput, -}; +use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; use num_traits::Zero; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, @@ -22,6 +19,10 @@ use reth_provider::{ BlockReader, DatabaseProviderRW, ExecutorFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, TransactionVariant, }; +use reth_stages_api::{ + BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, + UnwindInput, UnwindOutput, +}; use std::{ cmp::Ordering, ops::RangeInclusive, diff --git a/crates/stages/src/stages/finish.rs b/crates/stages/src/stages/finish.rs index e0e0057c3c7ec..c7b2f5a8efaec 100644 --- a/crates/stages/src/stages/finish.rs +++ b/crates/stages/src/stages/finish.rs @@ -1,7 +1,7 @@ -use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::database::Database; use reth_primitives::stage::{StageCheckpoint, StageId}; use reth_provider::DatabaseProviderRW; +use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; /// The finish stage. /// diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 4afccc77f8f06..051b6a85f9321 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -1,4 +1,3 @@ -use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use itertools::Itertools; use reth_config::config::EtlConfig; use reth_db::{ @@ -16,6 +15,7 @@ use reth_primitives::{ Account, B256, }; use reth_provider::{AccountExtReader, DatabaseProviderRW, HashingWriter, StatsReader}; +use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use std::{ fmt::Debug, ops::{Range, RangeInclusive}, diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 54f4b9520eb69..97da1278d8c0a 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -1,4 +1,3 @@ -use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use itertools::Itertools; use reth_config::config::EtlConfig; use reth_db::{ @@ -18,6 +17,7 @@ use reth_primitives::{ BufMut, StorageEntry, B256, }; use reth_provider::{DatabaseProviderRW, HashingWriter, StatsReader, StorageReader}; +use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use std::{ fmt::Debug, sync::mpsc::{self, Receiver}, diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index 3d71d877683d1..a862d4afcb396 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -1,4 +1,3 @@ -use crate::{BlockErrorKind, ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use futures_util::StreamExt; use reth_codecs::Compact; use reth_config::config::EtlConfig; @@ -26,6 +25,9 @@ use reth_provider::{ BlockHashReader, DatabaseProviderRW, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, }; +use reth_stages_api::{ + BlockErrorKind, ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput, +}; use std::{ sync::Arc, task::{ready, Context, Poll}, diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 89c77d6e1b869..d4524065127d4 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -1,5 +1,4 @@ use super::{collect_history_indices, load_history_indices}; -use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_config::config::EtlConfig; use reth_db::{ database::Database, models::ShardedKey, table::Decode, tables, transaction::DbTxMut, @@ -11,6 +10,7 @@ use reth_primitives::{ use reth_provider::{ DatabaseProviderRW, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, }; +use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use std::fmt::Debug; use tracing::info; diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index b321f1c56211e..6d5b6e2ade842 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -1,5 +1,4 @@ use super::{collect_history_indices, load_history_indices}; -use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_config::config::EtlConfig; use reth_db::{ database::Database, @@ -15,6 +14,7 @@ use reth_primitives::{ use reth_provider::{ DatabaseProviderRW, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, }; +use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use std::fmt::Debug; use tracing::info; diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index e1d651169100f..9b4eec87f8db7 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -1,4 +1,3 @@ -use crate::{BlockErrorKind, ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_codecs::Compact; use reth_db::{ database::Database, @@ -15,6 +14,9 @@ use reth_provider::{ DatabaseProviderRW, HeaderProvider, ProviderError, StageCheckpointReader, StageCheckpointWriter, StatsReader, }; +use reth_stages_api::{ + BlockErrorKind, ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput, +}; use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress}; use std::fmt::Debug; use tracing::*; diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 8d97491f1f49b..3539451f10635 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -40,7 +40,7 @@ use utils::*; #[cfg(test)] mod tests { use super::*; - use crate::{stage::Stage, test_utils::TestStageDB, ExecInput}; + use crate::test_utils::TestStageDB; use alloy_rlp::Decodable; use reth_db::{ cursor::DbCursorRO, @@ -61,6 +61,7 @@ mod tests { StorageReader, }; use reth_revm::EvmProcessorFactory; + use reth_stages_api::{ExecInput, Stage}; use std::sync::Arc; #[tokio::test] diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index afb65c560605e..04a30cb2e7e5e 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -1,4 +1,3 @@ -use crate::{BlockErrorKind, ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::{ cursor::DbCursorRW, database::Database, @@ -16,6 +15,9 @@ use reth_provider::{ BlockReader, DatabaseProviderRW, HeaderProvider, ProviderError, PruneCheckpointReader, StatsReader, }; +use reth_stages_api::{ + BlockErrorKind, ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput, +}; use std::{fmt::Debug, ops::Range, sync::mpsc}; use thiserror::Error; use tracing::*; diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 7bdeb4e1a5933..918be21c5ba94 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -1,4 +1,3 @@ -use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use num_traits::Zero; use reth_config::config::EtlConfig; use reth_db::{ @@ -18,6 +17,7 @@ use reth_provider::{ BlockReader, DatabaseProviderRW, PruneCheckpointReader, PruneCheckpointWriter, StatsReader, TransactionsProvider, TransactionsProviderExt, }; +use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use tracing::*; /// The transaction lookup stage. diff --git a/crates/stages/src/stages/utils.rs b/crates/stages/src/stages/utils.rs index e040210909576..be0ecc7f64ae1 100644 --- a/crates/stages/src/stages/utils.rs +++ b/crates/stages/src/stages/utils.rs @@ -1,5 +1,4 @@ //! Utils for `stages`. -use crate::StageError; use reth_config::config::EtlConfig; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, @@ -10,6 +9,7 @@ use reth_db::{ }; use reth_etl::Collector; use reth_primitives::BlockNumber; +use reth_stages_api::StageError; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; use tracing::info; diff --git a/crates/stages/src/test_utils/macros.rs b/crates/stages/src/test_utils/macros.rs index 0ffb16f08611d..0ce346d704f30 100644 --- a/crates/stages/src/test_utils/macros.rs +++ b/crates/stages/src/test_utils/macros.rs @@ -9,7 +9,7 @@ macro_rules! stage_test_suite { let runner = $runner::default(); // Execute the stage with empty database - let input = crate::stage::ExecInput::default(); + let input = reth_stages_api::ExecInput::default(); // Run stage execution let result = runner.execute(input).await; @@ -34,7 +34,7 @@ macro_rules! stage_test_suite { // Set up the runner let mut runner = $runner::default(); - let input = crate::stage::ExecInput { + let input = reth_stages_api::ExecInput { target: Some(target), checkpoint: Some(reth_primitives::stage::StageCheckpoint::new(current_checkpoint)), }; @@ -67,10 +67,10 @@ macro_rules! stage_test_suite { async fn [< unwind_no_new_entries_ $name>] () { // Set up the runner let mut runner = $runner::default(); - let input = crate::stage::UnwindInput::default(); + let input = reth_stages_api::UnwindInput::default(); // Seed the database - runner.seed_execution(crate::stage::ExecInput::default()).expect("failed to seed"); + runner.seed_execution(reth_stages_api::ExecInput::default()).expect("failed to seed"); runner.before_unwind(input).expect("failed to execute before_unwind hook"); @@ -98,7 +98,7 @@ macro_rules! stage_test_suite { // Set up the runner let mut runner = $runner::default(); - let execute_input = crate::stage::ExecInput { + let execute_input = reth_stages_api::ExecInput { target: Some(target), checkpoint: Some(reth_primitives::stage::StageCheckpoint::new(current_checkpoint)), }; @@ -125,7 +125,7 @@ macro_rules! stage_test_suite { // Run stage unwind - let unwind_input = crate::stage::UnwindInput { + let unwind_input = reth_stages_api::UnwindInput { unwind_to: current_checkpoint, checkpoint: reth_primitives::stage::StageCheckpoint::new(target), bad_block: None, @@ -165,7 +165,7 @@ macro_rules! stage_test_suite_ext { // Set up the runner let mut runner = $runner::default(); - let input = crate::stage::ExecInput { + let input = reth_stages_api::ExecInput { target: Some(current_checkpoint), checkpoint: Some(reth_primitives::stage::StageCheckpoint::new(current_checkpoint)), }; diff --git a/crates/stages/src/test_utils/mod.rs b/crates/stages/src/test_utils/mod.rs index dd788bca74e36..9e60417551ab9 100644 --- a/crates/stages/src/test_utils/mod.rs +++ b/crates/stages/src/test_utils/mod.rs @@ -15,9 +15,6 @@ pub(crate) use runner::{ mod test_db; pub use test_db::{StorageKind, TestStageDB}; -mod stage; -pub use stage::TestStage; - mod set; pub use set::TestStages; diff --git a/crates/stages/src/test_utils/runner.rs b/crates/stages/src/test_utils/runner.rs index b8ef44084f3e8..fd2064ac47f99 100644 --- a/crates/stages/src/test_utils/runner.rs +++ b/crates/stages/src/test_utils/runner.rs @@ -1,8 +1,10 @@ use super::TestStageDB; -use crate::{ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput}; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_interfaces::db::DatabaseError; use reth_provider::ProviderError; +use reth_stages_api::{ + ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput, +}; use std::sync::Arc; use tokio::sync::oneshot; diff --git a/crates/stages/src/test_utils/set.rs b/crates/stages/src/test_utils/set.rs index c5b14928456c5..f740d4131c340 100644 --- a/crates/stages/src/test_utils/set.rs +++ b/crates/stages/src/test_utils/set.rs @@ -1,6 +1,7 @@ -use super::{TestStage, TEST_STAGE_ID}; -use crate::{ExecOutput, StageError, StageSet, StageSetBuilder, UnwindOutput}; +use super::TEST_STAGE_ID; +use crate::{StageSet, StageSetBuilder}; use reth_db::database::Database; +use reth_stages_api::{test_utils::TestStage, ExecOutput, StageError, UnwindOutput}; use std::collections::VecDeque; #[derive(Default, Debug)] From cf163ba9d81c9e11ee3798b8474c1b4c679c24e2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 13:43:16 +0200 Subject: [PATCH 234/700] feat: group optimism crates (#7740) --- Cargo.toml | 4 ++-- crates/{node-optimism => optimism/node}/Cargo.toml | 0 crates/{node-optimism => optimism/node}/src/args.rs | 0 crates/{node-optimism => optimism/node}/src/engine.rs | 0 crates/{node-optimism => optimism/node}/src/evm/execute.rs | 0 crates/{node-optimism => optimism/node}/src/evm/mod.rs | 0 crates/{node-optimism => optimism/node}/src/lib.rs | 0 crates/{node-optimism => optimism/node}/src/node.rs | 0 crates/{node-optimism => optimism/node}/src/rpc.rs | 0 crates/{node-optimism => optimism/node}/src/txpool.rs | 0 crates/{node-optimism => optimism/node}/tests/it/builder.rs | 0 crates/{node-optimism => optimism/node}/tests/it/main.rs | 0 12 files changed, 2 insertions(+), 2 deletions(-) rename crates/{node-optimism => optimism/node}/Cargo.toml (100%) rename crates/{node-optimism => optimism/node}/src/args.rs (100%) rename crates/{node-optimism => optimism/node}/src/engine.rs (100%) rename crates/{node-optimism => optimism/node}/src/evm/execute.rs (100%) rename crates/{node-optimism => optimism/node}/src/evm/mod.rs (100%) rename crates/{node-optimism => optimism/node}/src/lib.rs (100%) rename crates/{node-optimism => optimism/node}/src/node.rs (100%) rename crates/{node-optimism => optimism/node}/src/rpc.rs (100%) rename crates/{node-optimism => optimism/node}/src/txpool.rs (100%) rename crates/{node-optimism => optimism/node}/tests/it/builder.rs (100%) rename crates/{node-optimism => optimism/node}/tests/it/main.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index e025f29d52e97..81f1c9d6ec8de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ members = [ "crates/ethereum-engine-primitives/", "crates/node-ethereum/", "crates/node-builder/", - "crates/node-optimism/", + "crates/optimism/node/", "crates/node-core/", "crates/node-api/", "crates/node-e2e-tests/", @@ -216,7 +216,7 @@ reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum-engine-primitives" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } -reth-node-optimism = { path = "crates/node-optimism" } +reth-node-optimism = { path = "crates/optimism/node" } reth-node-core = { path = "crates/node-core" } reth-node-api = { path = "crates/node-api" } reth-downloaders = { path = "crates/net/downloaders" } diff --git a/crates/node-optimism/Cargo.toml b/crates/optimism/node/Cargo.toml similarity index 100% rename from crates/node-optimism/Cargo.toml rename to crates/optimism/node/Cargo.toml diff --git a/crates/node-optimism/src/args.rs b/crates/optimism/node/src/args.rs similarity index 100% rename from crates/node-optimism/src/args.rs rename to crates/optimism/node/src/args.rs diff --git a/crates/node-optimism/src/engine.rs b/crates/optimism/node/src/engine.rs similarity index 100% rename from crates/node-optimism/src/engine.rs rename to crates/optimism/node/src/engine.rs diff --git a/crates/node-optimism/src/evm/execute.rs b/crates/optimism/node/src/evm/execute.rs similarity index 100% rename from crates/node-optimism/src/evm/execute.rs rename to crates/optimism/node/src/evm/execute.rs diff --git a/crates/node-optimism/src/evm/mod.rs b/crates/optimism/node/src/evm/mod.rs similarity index 100% rename from crates/node-optimism/src/evm/mod.rs rename to crates/optimism/node/src/evm/mod.rs diff --git a/crates/node-optimism/src/lib.rs b/crates/optimism/node/src/lib.rs similarity index 100% rename from crates/node-optimism/src/lib.rs rename to crates/optimism/node/src/lib.rs diff --git a/crates/node-optimism/src/node.rs b/crates/optimism/node/src/node.rs similarity index 100% rename from crates/node-optimism/src/node.rs rename to crates/optimism/node/src/node.rs diff --git a/crates/node-optimism/src/rpc.rs b/crates/optimism/node/src/rpc.rs similarity index 100% rename from crates/node-optimism/src/rpc.rs rename to crates/optimism/node/src/rpc.rs diff --git a/crates/node-optimism/src/txpool.rs b/crates/optimism/node/src/txpool.rs similarity index 100% rename from crates/node-optimism/src/txpool.rs rename to crates/optimism/node/src/txpool.rs diff --git a/crates/node-optimism/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs similarity index 100% rename from crates/node-optimism/tests/it/builder.rs rename to crates/optimism/node/tests/it/builder.rs diff --git a/crates/node-optimism/tests/it/main.rs b/crates/optimism/node/tests/it/main.rs similarity index 100% rename from crates/node-optimism/tests/it/main.rs rename to crates/optimism/node/tests/it/main.rs From f4dda95cbd23ffb883ffd498c06d31007d7578cd Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 19 Apr 2024 14:17:31 +0200 Subject: [PATCH 235/700] feat(op): allow import without state (#7661) --- Cargo.lock | 167 +++++++++++++------------- bin/reth/src/commands/import.rs | 27 +++-- crates/stages-api/src/pipeline/set.rs | 25 ++++ 3 files changed, 127 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b08629d28d385..f63c384028052 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,7 +332,7 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -471,7 +471,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", "syn-solidity", "tiny-keccak", ] @@ -489,7 +489,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.57", + "syn 2.0.58", "syn-solidity", ] @@ -652,7 +652,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -825,9 +825,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" +checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60" dependencies = [ "brotli", "flate2", @@ -872,7 +872,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -883,7 +883,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -915,7 +915,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -1043,7 +1043,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.57", + "syn 2.0.58", "which", ] @@ -1240,7 +1240,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", "synstructure", ] @@ -1280,9 +1280,9 @@ dependencies = [ [[package]] name = "brotli" -version = "3.5.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" +checksum = "125740193d7fee5cc63ab9e16c2fdc4e07c74ba755cc53b327d6ea029e9fc569" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1291,9 +1291,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.5.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" +checksum = "65622a320492e09b5e0ac436b14c54ff68199bac392d0e89a6832c4518eea525" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1349,7 +1349,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -1541,7 +1541,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.0", + "strsim 0.11.1", ] [[package]] @@ -1553,7 +1553,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -1630,13 +1630,13 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" -version = "7.1.0" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c64043d6c7b7a4c58e39e7efccfdea7b93d885a795d0c054a69dbbf4dd52686" +checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" dependencies = [ "crossterm", - "strum 0.25.0", - "strum_macros 0.25.3", + "strum 0.26.2", + "strum_macros 0.26.2", "unicode-width", ] @@ -1990,7 +1990,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -2147,7 +2147,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -2180,7 +2180,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core 0.20.8", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -2249,9 +2249,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "zeroize", @@ -2286,7 +2286,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -2437,7 +2437,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -2634,7 +2634,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -2647,7 +2647,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -2658,7 +2658,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -2970,7 +2970,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -3169,9 +3169,9 @@ dependencies = [ [[package]] name = "half" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ "cfg-if", "crunchy", @@ -3462,7 +3462,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -3561,7 +3561,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -3711,7 +3711,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -4092,7 +4092,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -4388,13 +4388,12 @@ dependencies = [ [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.5.0", "libc", - "redox_syscall 0.4.1", ] [[package]] @@ -4639,7 +4638,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -4779,7 +4778,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -5051,7 +5050,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -5289,9 +5288,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.8" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" +checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" dependencies = [ "memchr", "thiserror", @@ -5341,7 +5340,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -5370,7 +5369,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -5550,7 +5549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" dependencies = [ "proc-macro2", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -5908,9 +5907,9 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom 0.2.14", "libredox", @@ -5963,9 +5962,9 @@ checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "regress" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06f9a1f7cd8473611ba1a480cf35f9c5cffc2954336ba90a982fdb7e7d7f51e" +checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" dependencies = [ "hashbrown 0.14.3", "memchr", @@ -6280,7 +6279,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -6730,7 +6729,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.57", + "syn 2.0.58", "trybuild", ] @@ -7997,9 +7996,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "rusty-fork" @@ -8216,7 +8215,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -8290,7 +8289,7 @@ dependencies = [ "darling 0.20.8", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -8315,7 +8314,7 @@ checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -8625,9 +8624,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -8657,7 +8656,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -8670,7 +8669,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -8738,9 +8737,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.57" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a6ae1e52eb25aab8f3fb9fca13be982a373b8f1157ca14b897a825ba4a2d35" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -8756,7 +8755,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -8773,7 +8772,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -8865,7 +8864,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -8904,7 +8903,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -9062,7 +9061,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -9275,7 +9274,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -9738,7 +9737,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", "wasm-bindgen-shared", ] @@ -9772,7 +9771,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9835,9 +9834,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -10098,9 +10097,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" [[package]] name = "xmltree" @@ -10131,7 +10130,7 @@ checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", "synstructure", ] @@ -10152,7 +10151,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -10172,7 +10171,7 @@ checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", "synstructure", ] @@ -10193,7 +10192,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] @@ -10215,7 +10214,7 @@ checksum = "7b4e5997cbf58990550ef1f0e5124a05e47e1ebd33a84af25739be6031a62c20" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.58", ] [[package]] diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index d15106ce3ffbe..d28ebcf40e626 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -41,6 +41,17 @@ use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; use tracing::{debug, info}; +/// Stages that require state. +const STATE_STAGES: &[StageId] = &[ + StageId::Execution, + StageId::MerkleUnwind, + StageId::AccountHashing, + StageId::StorageHashing, + StageId::MerkleExecute, + StageId::IndexStorageHistory, + StageId::IndexAccountHistory, +]; + /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] pub struct ImportCommand { @@ -70,9 +81,9 @@ pub struct ImportCommand { )] chain: Arc, - /// Disables execution stage. + /// Disables stages that require state. #[arg(long, verbatim_doc_comment)] - disable_execution: bool, + no_state: bool, /// Import OP Mainnet chain below Bedrock. Caution! Flag must be set as env var, since the env /// var is read by another process too, in order to make below Bedrock import work. @@ -100,12 +111,12 @@ impl ImportCommand { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); if self.op_mainnet_below_bedrock { - self.disable_execution = true; + self.no_state = true; debug!(target: "reth::cli", "Importing OP mainnet below bedrock"); } - if self.disable_execution { - debug!(target: "reth::cli", "Execution stage disabled"); + if self.no_state { + debug!(target: "reth::cli", "Stages requiring state disabled"); } debug!(target: "reth::cli", @@ -163,7 +174,7 @@ impl ImportCommand { provider_factory.static_file_provider(), PruneModes::default(), ), - self.disable_execution, + self.no_state, ) .await?; @@ -201,7 +212,7 @@ impl ImportCommand { consensus: &Arc, file_client: Arc, static_file_producer: StaticFileProducer, - disable_execution: bool, + no_state: bool, ) -> eyre::Result<(Pipeline, impl Stream)> where DB: Database + Clone + Unpin + 'static, @@ -263,7 +274,7 @@ impl ImportCommand { .max(config.stages.storage_hashing.clean_threshold), config.prune.clone().map(|prune| prune.segments).unwrap_or_default(), )) - .disable_if(StageId::Execution, || disable_execution), + .disable_all_if(STATE_STAGES, || no_state), ) .build(provider_factory, static_file_producer); diff --git a/crates/stages-api/src/pipeline/set.rs b/crates/stages-api/src/pipeline/set.rs index dde9e02380185..ede824359e076 100644 --- a/crates/stages-api/src/pipeline/set.rs +++ b/crates/stages-api/src/pipeline/set.rs @@ -187,6 +187,18 @@ where self } + /// Disables all given stages. See [`disable`](Self::disable). + pub fn disable_all(mut self, stages: &[StageId]) -> Self { + for stage_id in stages { + let entry = self + .stages + .get_mut(stage_id) + .expect("Cannot disable a stage that is not in the set."); + entry.enabled = false; + } + self + } + /// Disables the given stage if the given closure returns true. /// /// See [Self::disable] @@ -200,6 +212,19 @@ where self } + /// Disables all given stages if the given closure returns true. + /// + /// See [Self::disable] + pub fn disable_all_if(self, stages: &[StageId], f: F) -> Self + where + F: FnOnce() -> bool, + { + if f() { + return self.disable_all(stages) + } + self + } + /// Consumes the builder and returns the contained [`Stage`]s in the order specified. pub fn build(mut self) -> Vec>> { let mut stages = Vec::new(); From 8a0f8cf835d9059cf334616b338fdc1ab7af4d1b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 14:25:11 +0200 Subject: [PATCH 236/700] chore: stages crates touchups (#7742) --- Cargo.lock | 11 +--------- Cargo.toml | 1 + crates/interfaces/Cargo.toml | 2 +- crates/net/eth-wire-types/Cargo.toml | 1 - crates/net/network/Cargo.toml | 2 +- crates/stages-api/Cargo.toml | 17 ++++++++------ crates/stages-api/src/lib.rs | 16 +++++++++++++- crates/stages-api/src/pipeline/mod.rs | 1 - .../{test_utils/stage.rs => test_utils.rs} | 5 +++++ crates/stages-api/src/test_utils/mod.rs | 2 -- crates/stages/Cargo.toml | 22 +++++-------------- crates/stages/src/lib.rs | 3 ++- crates/stages/src/sets.rs | 2 +- crates/stages/src/stages/execution.rs | 2 +- crates/stages/src/stages/mod.rs | 2 +- crates/storage/provider/Cargo.toml | 2 +- crates/transaction-pool/Cargo.toml | 2 +- crates/trie/Cargo.toml | 2 +- 18 files changed, 47 insertions(+), 48 deletions(-) rename crates/stages-api/src/{test_utils/stage.rs => test_utils.rs} (92%) delete mode 100644 crates/stages-api/src/test_utils/mod.rs diff --git a/Cargo.lock b/Cargo.lock index f63c384028052..d8bde9658f7dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7400,41 +7400,32 @@ name = "reth-stages" version = "0.2.0-beta.5" dependencies = [ "alloy-rlp", - "aquamarine", "assert_matches", - "auto_impl", "criterion", "futures-util", "itertools 0.12.1", - "metrics", "num-traits", "paste", "pprof", "rand 0.8.5", "rayon", - "reth-blockchain-tree", "reth-codecs", "reth-config", "reth-db", "reth-downloaders", - "reth-eth-wire", "reth-etl", + "reth-evm-ethereum", "reth-interfaces", - "reth-metrics", - "reth-node-ethereum", - "reth-node-optimism", "reth-primitives", "reth-provider", "reth-revm", "reth-stages-api", "reth-static-file", - "reth-tokio-util", "reth-trie", "serde_json", "tempfile", "thiserror", "tokio", - "tokio-stream", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index 81f1c9d6ec8de..b0402d44cd7c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -298,6 +298,7 @@ alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } # misc +auto_impl = "1" aquamarine = "0.5" bytes = "1.5" bitflags = "2.4" diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 2dde42801e3ab..8f4aa494a0ea4 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -21,7 +21,7 @@ futures.workspace = true tokio = { workspace = true, features = ["sync"] } # misc -auto_impl = "1.0" +auto_impl.workspace = true thiserror.workspace = true tracing.workspace = true secp256k1 = { workspace = true, default-features = false, features = [ diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 54f3763a24669..7e9365d481e7a 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -7,7 +7,6 @@ rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true -exclude.workspace = true [lints] workspace = true diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 079a436348a0d..8e4c110bb501d 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -51,7 +51,7 @@ reth-metrics = { workspace = true, features = ["common"] } metrics.workspace = true # misc -auto_impl = "1" +auto_impl.workspace = true aquamarine.workspace = true tracing.workspace = true fnv = "1.0" diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml index 8d1eccd6eae3e..d991a47af55f4 100644 --- a/crates/stages-api/Cargo.toml +++ b/crates/stages-api/Cargo.toml @@ -6,7 +6,9 @@ rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true -exclude.workspace = true + +[lints] +workspace = true [dependencies] # reth @@ -15,7 +17,6 @@ reth-provider.workspace = true reth-db.workspace = true reth-interfaces.workspace = true reth-static-file.workspace = true -assert_matches.workspace = true reth-tokio-util.workspace = true # metrics @@ -28,13 +29,15 @@ tokio-stream.workspace = true futures-util.workspace = true # misc +aquamarine.workspace = true thiserror.workspace = true tracing.workspace = true -auto_impl = "1" -aquamarine.workspace = true +auto_impl.workspace = true + +[dev-dependencies] +assert_matches.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-interfaces = { workspace = true, features = ["test-utils"] } [features] test-utils = [] - -[lints] -workspace = true diff --git a/crates/stages-api/src/lib.rs b/crates/stages-api/src/lib.rs index fa6cd74e6407c..084680ce44615 100644 --- a/crates/stages-api/src/lib.rs +++ b/crates/stages-api/src/lib.rs @@ -1,9 +1,21 @@ //! Staged syncing primitives for reth. +//! +//! ## Feature Flags +//! +//! - `test-utils`: Utilities for testing + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + mod error; mod metrics; mod pipeline; mod stage; -#[allow(missing_docs)] #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; mod util; @@ -12,3 +24,5 @@ pub use crate::metrics::*; pub use error::*; pub use pipeline::*; pub use stage::*; + +use aquamarine as _; diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index d19325a3cada8..7f54e13ff370c 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -2,7 +2,6 @@ mod ctrl; mod event; pub use crate::pipeline::ctrl::ControlFlow; pub use event::*; - use futures_util::Future; use reth_db::database::Database; use reth_interfaces::RethResult; diff --git a/crates/stages-api/src/test_utils/stage.rs b/crates/stages-api/src/test_utils.rs similarity index 92% rename from crates/stages-api/src/test_utils/stage.rs rename to crates/stages-api/src/test_utils.rs index a76e46e67cd6b..3caba0737baaa 100644 --- a/crates/stages-api/src/test_utils/stage.rs +++ b/crates/stages-api/src/test_utils.rs @@ -1,9 +1,14 @@ +#![allow(missing_docs)] + use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::database::Database; use reth_primitives::stage::StageId; use reth_provider::DatabaseProviderRW; use std::collections::VecDeque; +/// A test stage that can be used for testing. +/// +/// This can be used to mock expected outputs of [Stage::execute] and [Stage::unwind] #[derive(Debug)] pub struct TestStage { id: StageId, diff --git a/crates/stages-api/src/test_utils/mod.rs b/crates/stages-api/src/test_utils/mod.rs deleted file mode 100644 index 1a44c2fa82743..0000000000000 --- a/crates/stages-api/src/test_utils/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod stage; -pub use stage::TestStage; diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 4c9fc8dde7db5..00aff1fd67503 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -19,46 +19,33 @@ reth-db.workspace = true reth-codecs.workspace = true reth-provider.workspace = true reth-trie = { workspace = true, features = ["metrics"] } -reth-tokio-util.workspace = true reth-etl.workspace = true -reth-static-file.workspace = true reth-config.workspace = true reth-stages-api = {workspace = true , features = ["test-utils"]} # async tokio = { workspace = true, features = ["sync"] } -tokio-stream.workspace = true futures-util.workspace = true # observability tracing.workspace = true -# io -tempfile.workspace = true - -# metrics -reth-metrics.workspace = true -metrics.workspace = true - # misc thiserror.workspace = true -aquamarine.workspace = true itertools.workspace = true rayon.workspace = true num-traits = "0.2.15" -auto_impl = "1" +tempfile = { workspace = true, optional = true} [dev-dependencies] # reth reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils", "mdbx"] } +reth-evm-ethereum.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true -reth-eth-wire.workspace = true # TODO(onbjerg): We only need this for [BlockBody] -reth-node-ethereum.workspace = true -reth-node-optimism.workspace = true -reth-blockchain-tree.workspace = true reth-revm.workspace = true +reth-static-file.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } @@ -68,6 +55,7 @@ tokio = { workspace = true, features = ["rt", "sync", "macros"] } assert_matches.workspace = true rand.workspace = true paste.workspace = true +tempfile.workspace = true # Stage benchmarks criterion = { workspace = true, features = ["async_futures"] } @@ -79,7 +67,7 @@ serde_json.workspace = true pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } [features] -test-utils = ["reth-interfaces/test-utils", "reth-db/test-utils", "reth-provider/test-utils", "reth-stages-api/test-utils"] +test-utils = ["reth-interfaces/test-utils", "reth-db/test-utils", "reth-provider/test-utils", "reth-stages-api/test-utils", "dep:tempfile"] [[bench]] name = "criterion" diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index e113025438c95..f8e427763c09b 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -22,7 +22,7 @@ //! # use reth_stages::Pipeline; //! # use reth_stages::sets::DefaultStages; //! # use tokio::sync::watch; -//! # use reth_node_ethereum::EthEvmConfig; +//! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::ProviderFactory; //! # use reth_provider::HeaderSyncMode; //! # use reth_provider::test_utils::create_test_provider_factory; @@ -76,6 +76,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #[allow(missing_docs)] #[cfg(any(test, feature = "test-utils"))] diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 1185de25f0ae2..a92988af9a283 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -14,7 +14,7 @@ //! # use reth_stages::sets::{OfflineStages}; //! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PruneModes, MAINNET}; -//! # use reth_node_ethereum::EthEvmConfig; +//! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index ecaae33c315a7..1bb21228e77c2 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -622,8 +622,8 @@ mod tests { use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_db::models::AccountBeforeTx; + use reth_evm_ethereum::EthEvmConfig; use reth_interfaces::executor::BlockValidationError; - use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ address, hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Address, Bytecode, ChainSpecBuilder, PruneMode, ReceiptsLogPruneConfig, SealedBlock, StorageEntry, diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 3539451f10635..fe1012f4200fa 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -50,8 +50,8 @@ mod tests { transaction::{DbTx, DbTxMut}, AccountsHistory, DatabaseEnv, }; + use reth_evm_ethereum::EthEvmConfig; use reth_interfaces::test_utils::generators::{self, random_block}; - use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ address, hex_literal::hex, keccak256, Account, Bytecode, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, StaticFileSegment, U256, diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index c54adc050c5e9..6f9305e88b185 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -35,7 +35,7 @@ reth-metrics.workspace = true metrics.workspace = true # misc -auto_impl = "1.0" +auto_impl.workspace = true itertools.workspace = true pin-project.workspace = true parking_lot.workspace = true diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 59af6730290f3..5b6b8548682ff 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -39,7 +39,7 @@ schnellru.workspace = true serde = { workspace = true, features = ["derive", "rc"], optional = true } fnv = "1.0.7" bitflags.workspace = true -auto_impl = "1.0" +auto_impl.workspace = true smallvec.workspace = true itertools.workspace = true diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index faf87452387a1..39662908673df 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -27,7 +27,7 @@ tracing.workspace = true # misc thiserror.workspace = true derive_more.workspace = true -auto_impl = "1" +auto_impl.workspace = true # `metrics` feature reth-metrics = { workspace = true, optional = true } From 4c60ed05ba31c1cea4f7f4fbdc9ce770d8a6b85e Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Fri, 19 Apr 2024 20:31:04 +0800 Subject: [PATCH 237/700] chore: remove todo comments from #7599, and improve the visibility of certain methods. (#7741) --- crates/stages-api/src/metrics/listener.rs | 3 +-- crates/stages-api/src/metrics/sync_metrics.rs | 20 +++++++++---------- crates/stages-api/src/pipeline/mod.rs | 13 +++--------- crates/stages-api/src/pipeline/progress.rs | 8 ++++---- 4 files changed, 18 insertions(+), 26 deletions(-) diff --git a/crates/stages-api/src/metrics/listener.rs b/crates/stages-api/src/metrics/listener.rs index 2aa5667440830..39ccc29a31af5 100644 --- a/crates/stages-api/src/metrics/listener.rs +++ b/crates/stages-api/src/metrics/listener.rs @@ -45,8 +45,7 @@ pub enum MetricEvent { #[derive(Debug)] pub struct MetricsListener { events_rx: UnboundedReceiver, - /// underline metrics of stages - pub sync_metrics: SyncMetrics, + pub(crate) sync_metrics: SyncMetrics, } impl MetricsListener { diff --git a/crates/stages-api/src/metrics/sync_metrics.rs b/crates/stages-api/src/metrics/sync_metrics.rs index 64c38c21e1d1b..148368f024eb5 100644 --- a/crates/stages-api/src/metrics/sync_metrics.rs +++ b/crates/stages-api/src/metrics/sync_metrics.rs @@ -6,14 +6,14 @@ use reth_primitives::stage::StageId; use std::collections::HashMap; #[derive(Debug, Default)] -pub struct SyncMetrics { - pub stages: HashMap, - pub execution_stage: ExecutionStageMetrics, +pub(crate) struct SyncMetrics { + pub(crate) stages: HashMap, + pub(crate) execution_stage: ExecutionStageMetrics, } impl SyncMetrics { /// Returns existing or initializes a new instance of [StageMetrics] for the provided [StageId]. - pub fn get_stage_metrics(&mut self, stage_id: StageId) -> &mut StageMetrics { + pub(crate) fn get_stage_metrics(&mut self, stage_id: StageId) -> &mut StageMetrics { self.stages .entry(stage_id) .or_insert_with(|| StageMetrics::new_with_labels(&[("stage", stage_id.to_string())])) @@ -22,19 +22,19 @@ impl SyncMetrics { #[derive(Metrics)] #[metrics(scope = "sync")] -pub struct StageMetrics { +pub(crate) struct StageMetrics { /// The block number of the last commit for a stage. - pub checkpoint: Gauge, + pub(crate) checkpoint: Gauge, /// The number of processed entities of the last commit for a stage, if applicable. - pub entities_processed: Gauge, + pub(crate) entities_processed: Gauge, /// The number of total entities of the last commit for a stage, if applicable. - pub entities_total: Gauge, + pub(crate) entities_total: Gauge, } /// Execution stage metrics. #[derive(Metrics)] #[metrics(scope = "sync.execution")] -pub struct ExecutionStageMetrics { +pub(crate) struct ExecutionStageMetrics { /// The total amount of gas processed (in millions) - pub mgas_processed_total: Counter, + pub(crate) mgas_processed_total: Counter, } diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 7f54e13ff370c..bb15129589521 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -21,16 +21,9 @@ use tokio::sync::watch; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; -// (todo) remove it -#[allow(missing_docs)] -pub mod builder; - -// (todo) remove it -#[allow(missing_docs)] -pub mod progress; -// (todo) remove it -#[allow(missing_docs)] -pub mod set; +mod builder; +mod progress; +mod set; use crate::{ BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, PipelineError, Stage, diff --git a/crates/stages-api/src/pipeline/progress.rs b/crates/stages-api/src/pipeline/progress.rs index cb124a8bc00cb..e47d4e89f4cf4 100644 --- a/crates/stages-api/src/pipeline/progress.rs +++ b/crates/stages-api/src/pipeline/progress.rs @@ -2,13 +2,13 @@ use crate::{util::opt, ControlFlow}; use reth_primitives::BlockNumber; #[derive(Debug, Default)] -pub struct PipelineProgress { +pub(crate) struct PipelineProgress { /// Block number reached by the stage. - pub block_number: Option, + pub(crate) block_number: Option, /// The maximum block number achieved by any stage during the execution of the pipeline. - pub maximum_block_number: Option, + pub(crate) maximum_block_number: Option, /// The minimum block number achieved by any stage during the execution of the pipeline. - pub minimum_block_number: Option, + pub(crate) minimum_block_number: Option, } impl PipelineProgress { From 6a6b3cc3e4a870210655d71cb4506a74872cb0ba Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 15:23:45 +0200 Subject: [PATCH 238/700] chore: introduce ethereum folder (#7744) --- Cargo.toml | 4 ++-- crates/{evm-ethereum => ethereum/evm}/Cargo.toml | 0 crates/{evm-ethereum => ethereum/evm}/src/execute.rs | 0 crates/{evm-ethereum => ethereum/evm}/src/lib.rs | 0 4 files changed, 2 insertions(+), 2 deletions(-) rename crates/{evm-ethereum => ethereum/evm}/Cargo.toml (100%) rename crates/{evm-ethereum => ethereum/evm}/src/execute.rs (100%) rename crates/{evm-ethereum => ethereum/evm}/src/lib.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index b0402d44cd7c9..2c5e9d2db85b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ members = [ "crates/ethereum-forks/", "crates/etl/", "crates/evm/", - "crates/evm-ethereum/", + "crates/ethereum/evm", "crates/exex/", "crates/interfaces/", "crates/metrics/", @@ -227,7 +227,7 @@ reth-ethereum-forks = { path = "crates/ethereum-forks" } reth-ethereum-payload-builder = { path = "crates/payload/ethereum" } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } -reth-evm-ethereum = { path = "crates/evm-ethereum" } +reth-evm-ethereum = { path = "crates/ethereum/evm" } reth-exex = { path = "crates/exex" } reth-optimism-payload-builder = { path = "crates/payload/optimism" } reth-interfaces = { path = "crates/interfaces" } diff --git a/crates/evm-ethereum/Cargo.toml b/crates/ethereum/evm/Cargo.toml similarity index 100% rename from crates/evm-ethereum/Cargo.toml rename to crates/ethereum/evm/Cargo.toml diff --git a/crates/evm-ethereum/src/execute.rs b/crates/ethereum/evm/src/execute.rs similarity index 100% rename from crates/evm-ethereum/src/execute.rs rename to crates/ethereum/evm/src/execute.rs diff --git a/crates/evm-ethereum/src/lib.rs b/crates/ethereum/evm/src/lib.rs similarity index 100% rename from crates/evm-ethereum/src/lib.rs rename to crates/ethereum/evm/src/lib.rs From 6646a329ad819db9840d2caea02dd7d22894df86 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 15:23:53 +0200 Subject: [PATCH 239/700] chore: move node-api to node dir (#7743) --- Cargo.toml | 4 ++-- crates/{node-api => node/api}/Cargo.toml | 0 crates/{node-api => node/api}/src/lib.rs | 0 crates/{node-api => node/api}/src/node.rs | 0 crates/{node-api => node/api}/src/primitives.rs | 0 5 files changed, 2 insertions(+), 2 deletions(-) rename crates/{node-api => node/api}/Cargo.toml (100%) rename crates/{node-api => node/api}/src/lib.rs (100%) rename crates/{node-api => node/api}/src/node.rs (100%) rename crates/{node-api => node/api}/src/primitives.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 2c5e9d2db85b8..d382a5ce7a79e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ members = [ "crates/node-builder/", "crates/optimism/node/", "crates/node-core/", - "crates/node-api/", + "crates/node/api/", "crates/node-e2e-tests/", "crates/stages/", "crates/stages-api", @@ -218,7 +218,7 @@ reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } reth-node-optimism = { path = "crates/optimism/node" } reth-node-core = { path = "crates/node-core" } -reth-node-api = { path = "crates/node-api" } +reth-node-api = { path = "crates/node/api" } reth-downloaders = { path = "crates/net/downloaders" } reth-ecies = { path = "crates/net/ecies" } reth-eth-wire = { path = "crates/net/eth-wire" } diff --git a/crates/node-api/Cargo.toml b/crates/node/api/Cargo.toml similarity index 100% rename from crates/node-api/Cargo.toml rename to crates/node/api/Cargo.toml diff --git a/crates/node-api/src/lib.rs b/crates/node/api/src/lib.rs similarity index 100% rename from crates/node-api/src/lib.rs rename to crates/node/api/src/lib.rs diff --git a/crates/node-api/src/node.rs b/crates/node/api/src/node.rs similarity index 100% rename from crates/node-api/src/node.rs rename to crates/node/api/src/node.rs diff --git a/crates/node-api/src/primitives.rs b/crates/node/api/src/primitives.rs similarity index 100% rename from crates/node-api/src/primitives.rs rename to crates/node/api/src/primitives.rs From 49c02c3b8e91fac28a4d5befa73e6146eb56c1e8 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Fri, 19 Apr 2024 16:39:52 +0200 Subject: [PATCH 240/700] feat: send `CanonStateNotification`s from execution stage (#7578) --- Cargo.lock | 151 +++++++-------- bin/reth/Cargo.toml | 12 +- bin/reth/src/commands/debug_cmd/execution.rs | 2 + bin/reth/src/commands/debug_cmd/merkle.rs | 2 + bin/reth/src/commands/import.rs | 4 +- bin/reth/src/commands/stage/dump/merkle.rs | 2 + bin/reth/src/commands/stage/run.rs | 2 + bin/reth/src/commands/stage/unwind.rs | 2 + crates/consensus/beacon/Cargo.toml | 4 +- crates/consensus/beacon/src/engine/error.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 2 +- crates/consensus/beacon/src/engine/sync.rs | 2 +- crates/exex/src/manager.rs | 20 ++ crates/node-builder/src/builder.rs | 6 +- crates/node-builder/src/setup.rs | 5 + crates/stages/Cargo.toml | 19 +- crates/stages/src/stages/execution.rs | 178 ++++++++---------- crates/stages/src/stages/mod.rs | 2 + .../bundle_state_with_receipts.rs | 8 +- .../src/providers/database/provider.rs | 7 +- .../storage/provider/src/traits/executor.rs | 8 +- 21 files changed, 246 insertions(+), 194 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d8bde9658f7dc..b7cee919e3198 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,7 +332,7 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -471,7 +471,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "syn-solidity", "tiny-keccak", ] @@ -489,7 +489,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.58", + "syn 2.0.60", "syn-solidity", ] @@ -652,7 +652,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -872,7 +872,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -883,7 +883,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -899,9 +899,9 @@ dependencies = [ [[package]] name = "aurora-engine-modexp" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfacad86e9e138fca0670949eb8ed4ffdf73a55bded8887efe0863cd1a3a6f70" +checksum = "0aef7712851e524f35fbbb74fa6599c5cd8692056a1c36f9ca0d2001b670e7e5" dependencies = [ "hex", "num", @@ -915,7 +915,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1043,7 +1043,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.58", + "syn 2.0.60", "which", ] @@ -1240,7 +1240,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "synstructure", ] @@ -1349,7 +1349,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1452,9 +1452,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1553,7 +1553,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1990,7 +1990,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2147,7 +2147,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2180,7 +2180,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core 0.20.8", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2286,7 +2286,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2437,7 +2437,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2634,7 +2634,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2647,7 +2647,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2658,7 +2658,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2970,7 +2970,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3471,9 +3471,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", @@ -3529,7 +3529,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.2.0", + "hyper 1.3.1", "pin-project-lite", "socket2 0.5.6", "tokio", @@ -3561,7 +3561,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3711,7 +3711,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3933,9 +3933,9 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "iri-string" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81669f3b77acd397a241a988f05190b1785cb83f0287d8fb3a05f0648405d65f" +checksum = "7f5f6c2df22c009ac44f6f1499308e7a3ac7ba42cd2378475cc691510e1eef1b" dependencies = [ "memchr", "serde", @@ -4092,7 +4092,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -4638,7 +4638,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -4778,7 +4778,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -5050,7 +5050,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -5340,7 +5340,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -5369,7 +5369,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -5544,12 +5544,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" +checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550" dependencies = [ "proc-macro2", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -5609,9 +5609,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -6026,7 +6026,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.2.0", + "hyper 1.3.1", "hyper-util", "ipnet", "js-sys", @@ -6096,6 +6096,7 @@ dependencies = [ "reth-discv4", "reth-downloaders", "reth-ethereum-payload-builder", + "reth-exex", "reth-interfaces", "reth-network", "reth-network-api", @@ -6200,6 +6201,7 @@ dependencies = [ "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", + "reth-stages-api", "reth-static-file", "reth-tasks", "reth-tokio-util", @@ -6279,7 +6281,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -6729,7 +6731,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.58", + "syn 2.0.60", "trybuild", ] @@ -7415,6 +7417,7 @@ dependencies = [ "reth-downloaders", "reth-etl", "reth-evm-ethereum", + "reth-exex", "reth-interfaces", "reth-primitives", "reth-provider", @@ -8182,9 +8185,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" dependencies = [ "serde_derive", ] @@ -8200,20 +8203,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "serde_json" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "indexmap 2.2.6", "itoa", @@ -8280,7 +8283,7 @@ dependencies = [ "darling 0.20.8", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8305,7 +8308,7 @@ checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8647,7 +8650,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8660,7 +8663,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8728,9 +8731,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.58" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", @@ -8746,7 +8749,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8763,7 +8766,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8855,7 +8858,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8894,7 +8897,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -9052,7 +9055,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -9113,7 +9116,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.9", + "toml_edit 0.22.11", ] [[package]] @@ -9149,9 +9152,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.9" +version = "0.22.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4" +checksum = "fb686a972ccef8537b39eead3968b0e8616cb5040dbb9bba93007c8e07c9215f" dependencies = [ "indexmap 2.2.6", "serde", @@ -9265,7 +9268,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -9728,7 +9731,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "wasm-bindgen-shared", ] @@ -9762,7 +9765,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10121,7 +10124,7 @@ checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "synstructure", ] @@ -10142,7 +10145,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -10162,7 +10165,7 @@ checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "synstructure", ] @@ -10183,7 +10186,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -10205,7 +10208,7 @@ checksum = "7b4e5997cbf58990550ef1f0e5124a05e47e1ebd33a84af25739be6031a62c20" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index bf8922ccf07cc..ea1ee87f01b2f 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -17,6 +17,7 @@ workspace = true reth-config.workspace = true reth-primitives = { workspace = true, features = ["arbitrary", "clap"] } reth-db = { workspace = true, features = ["mdbx"] } +reth-exex.workspace = true reth-provider = { workspace = true } reth-revm.workspace = true reth-stages.workspace = true @@ -49,7 +50,9 @@ reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-node-api.workspace = true reth-node-ethereum.workspace = true -reth-node-optimism = { workspace = true, optional = true, features = ["optimism"] } +reth-node-optimism = { workspace = true, optional = true, features = [ + "optimism", +] } reth-node-core.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true @@ -81,7 +84,12 @@ ratatui = "0.25.0" human_bytes = "0.4.1" # async -tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] } +tokio = { workspace = true, features = [ + "sync", + "macros", + "time", + "rt-multi-thread", +] } futures.workspace = true # misc diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 839e037ef771d..10f485a735802 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -19,6 +19,7 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_exex::ExExManagerHandle; use reth_interfaces::{ consensus::Consensus, p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, @@ -142,6 +143,7 @@ impl Command { .max(stage_conf.account_hashing.clean_threshold) .max(stage_conf.storage_hashing.clean_threshold), config.prune.clone().map(|prune| prune.segments).unwrap_or_default(), + ExExManagerHandle::empty(), )), ) .build(provider_factory, static_file_producer); diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 742e51c707e5a..ed8783e965149 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -15,6 +15,7 @@ use reth_beacon_consensus::BeaconConsensus; use reth_cli_runner::CliContext; use reth_config::Config; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; +use reth_exex::ExExManagerHandle; use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient}; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; @@ -211,6 +212,7 @@ impl Command { }, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::all(), + ExExManagerHandle::empty(), ); let mut account_hashing_stage = AccountHashingStage::default(); diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index d28ebcf40e626..dc31409245f62 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -19,6 +19,7 @@ use reth_downloaders::{ file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_exex::ExExManagerHandle; use reth_interfaces::{ consensus::Consensus, p2p::{ @@ -272,7 +273,8 @@ impl ImportCommand { .clean_threshold .max(config.stages.account_hashing.clean_threshold) .max(config.stages.storage_hashing.clean_threshold), - config.prune.clone().map(|prune| prune.segments).unwrap_or_default(), + config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), + ExExManagerHandle::empty(), )) .disable_all_if(STATE_STAGES, || no_state), ) diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/bin/reth/src/commands/stage/dump/merkle.rs index 32f828b324106..08ac0a3aaefe0 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/bin/reth/src/commands/stage/dump/merkle.rs @@ -3,6 +3,7 @@ use crate::utils::DbTool; use eyre::Result; use reth_config::config::EtlConfig; use reth_db::{database::Database, table::TableImporter, tables, DatabaseEnv}; +use reth_exex::ExExManagerHandle; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_node_ethereum::EthEvmConfig; use reth_primitives::{stage::StageCheckpoint, BlockNumber, PruneModes}; @@ -95,6 +96,7 @@ async fn unwind_and_copy( }, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::all(), + ExExManagerHandle::empty(), ); exec_stage.unwind( diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index b8f9bc527893a..43bbe78592a6c 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -17,6 +17,7 @@ use reth_beacon_consensus::BeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_db::init_db; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; +use reth_exex::ExExManagerHandle; use reth_node_ethereum::EthEvmConfig; use reth_primitives::ChainSpec; use reth_provider::{ProviderFactory, StageCheckpointReader, StageCheckpointWriter}; @@ -239,6 +240,7 @@ impl Command { }, config.stages.merkle.clean_threshold, config.prune.map(|prune| prune.segments).unwrap_or_default(), + ExExManagerHandle::empty(), )), None, ) diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 8e9141399f4ca..7810a4416fb3e 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -15,6 +15,7 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_exex::ExExManagerHandle; use reth_interfaces::consensus::Consensus; use reth_node_core::{ args::{get_secret_key, NetworkArgs}, @@ -211,6 +212,7 @@ impl Command { .max(stage_conf.account_hashing.clean_threshold) .max(stage_conf.storage_hashing.clean_threshold), config.prune.clone().map(|prune| prune.segments).unwrap_or_default(), + ExExManagerHandle::empty(), )) .set(AccountHashingStage::default()) .set(StorageHashingStage::default()) diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 60279a65a3529..f195c98bd9508 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -15,7 +15,7 @@ workspace = true reth-beacon-consensus-core.workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true -reth-stages.workspace = true +reth-stages-api.workspace = true reth-db.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true @@ -65,5 +65,5 @@ optimism = [ "reth-interfaces/optimism", "reth-provider/optimism", "reth-blockchain-tree/optimism", - "reth-beacon-consensus-core/optimism" + "reth-beacon-consensus-core/optimism", ] diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index 925414e03251c..245199f53223c 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -1,7 +1,7 @@ use crate::engine::hooks::EngineHookError; use reth_interfaces::RethError; use reth_rpc_types::engine::ForkchoiceUpdateError; -use reth_stages::PipelineError; +use reth_stages_api::PipelineError; /// Beacon engine result. pub type BeaconEngineResult = Result; diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 8a1a4266a88b1..877e6f4501993 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -33,7 +33,7 @@ use reth_provider::{ use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; -use reth_stages::{ControlFlow, Pipeline}; +use reth_stages_api::{ControlFlow, Pipeline}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventListeners; use std::{ diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 1c41c9ffe323d..96163e9963380 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -12,7 +12,7 @@ use reth_interfaces::p2p::{ headers::client::HeadersClient, }; use reth_primitives::{BlockNumber, ChainSpec, SealedBlock, B256}; -use reth_stages::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; +use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventListeners; use std::{ diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 332650607d9c2..a89b09171229a 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -355,6 +355,26 @@ pub struct ExExManagerHandle { } impl ExExManagerHandle { + /// Creates an empty manager handle. + /// + /// Use this if there is no manager present. + /// + /// The handle will always be ready, and have a capacity of 0. + pub fn empty() -> Self { + let (exex_tx, _) = mpsc::unbounded_channel(); + let (_, is_ready_rx) = watch::channel(true); + let (_, finished_height_rx) = watch::channel(FinishedExExHeight::NoExExs); + + Self { + exex_tx, + num_exexs: 0, + is_ready_receiver: is_ready_rx.clone(), + is_ready: WatchStream::new(is_ready_rx), + current_capacity: Arc::new(AtomicUsize::new(0)), + finished_height: finished_height_rx, + } + } + /// Synchronously send a notification over the channel to all execution extensions. /// /// Senders should call [`Self::has_capacity`] first. diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 7b84fad6b395c..161c8c404b56b 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -27,7 +27,7 @@ use reth_db::{ test_utils::{create_test_rw_db, TempDatabase}, DatabaseEnv, }; -use reth_exex::{ExExContext, ExExHandle, ExExManager}; +use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; use reth_interfaces::p2p::either::EitherDownloader; use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; use reth_node_api::{ @@ -711,6 +711,8 @@ where } // Configure the pipeline + let pipeline_exex_handle = + exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); let (mut pipeline, client) = if config.dev.dev { info!(target: "reth::cli", "Starting Reth in dev mode"); @@ -743,6 +745,7 @@ where max_block, static_file_producer, evm_config, + pipeline_exex_handle, ) .await?; @@ -765,6 +768,7 @@ where max_block, static_file_producer, evm_config, + pipeline_exex_handle, ) .await?; diff --git a/crates/node-builder/src/setup.rs b/crates/node-builder/src/setup.rs index 827f711cebda6..bb67cad661d30 100644 --- a/crates/node-builder/src/setup.rs +++ b/crates/node-builder/src/setup.rs @@ -7,6 +7,7 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_exex::ExExManagerHandle; use reth_interfaces::{ consensus::Consensus, p2p::{ @@ -49,6 +50,7 @@ pub async fn build_networked_pipeline( max_block: Option, static_file_producer: StaticFileProducer, evm_config: EvmConfig, + exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where DB: Database + Unpin + Clone + 'static, @@ -76,6 +78,7 @@ where prune_config, static_file_producer, evm_config, + exex_manager_handle, ) .await?; @@ -96,6 +99,7 @@ pub async fn build_pipeline( prune_config: Option, static_file_producer: StaticFileProducer, evm_config: EvmConfig, + exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where DB: Database + Clone + 'static, @@ -166,6 +170,7 @@ where .max(stage_config.account_hashing.clean_threshold) .max(stage_config.storage_hashing.clean_threshold), prune_modes.clone(), + exex_manager_handle, ) .with_metrics_tx(metrics_tx), ) diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 00aff1fd67503..080b7792d5dcf 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-exex.workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-db.workspace = true @@ -21,7 +22,7 @@ reth-provider.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-etl.workspace = true reth-config.workspace = true -reth-stages-api = {workspace = true , features = ["test-utils"]} +reth-stages-api = { workspace = true, features = ["test-utils"] } # async tokio = { workspace = true, features = ["sync"] } @@ -35,7 +36,7 @@ thiserror.workspace = true itertools.workspace = true rayon.workspace = true num-traits = "0.2.15" -tempfile = { workspace = true, optional = true} +tempfile = { workspace = true, optional = true } [dev-dependencies] # reth @@ -64,10 +65,20 @@ criterion = { workspace = true, features = ["async_futures"] } serde_json.workspace = true [target.'cfg(not(target_os = "windows"))'.dev-dependencies] -pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } +pprof = { workspace = true, features = [ + "flamegraph", + "frame-pointer", + "criterion", +] } [features] -test-utils = ["reth-interfaces/test-utils", "reth-db/test-utils", "reth-provider/test-utils", "reth-stages-api/test-utils", "dep:tempfile"] +test-utils = [ + "reth-interfaces/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-stages-api/test-utils", + "dep:tempfile", +] [[bench]] name = "criterion" diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 1bb21228e77c2..ac48a2aeb3c63 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -1,23 +1,20 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; use num_traits::Zero; use reth_db::{ - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - database::Database, - models::BlockNumberAddress, - static_file::HeaderMask, - tables, - transaction::{DbTx, DbTxMut}, + cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; +use reth_exex::ExExManagerHandle; use reth_primitives::{ stage::{ CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, StageCheckpoint, StageId, }, - BlockNumber, Header, PruneModes, StaticFileSegment, U256, + BlockNumber, Header, PruneModes, StaticFileSegment, }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - BlockReader, DatabaseProviderRW, ExecutorFactory, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, StatsReader, TransactionVariant, + BlockReader, CanonStateNotification, Chain, DatabaseProviderRW, ExecutorFactory, + HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, + TransactionVariant, }; use reth_stages_api::{ BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, @@ -26,6 +23,8 @@ use reth_stages_api::{ use std::{ cmp::Ordering, ops::RangeInclusive, + sync::Arc, + task::{ready, Context, Poll}, time::{Duration, Instant}, }; use tracing::*; @@ -74,6 +73,8 @@ pub struct ExecutionStage { external_clean_threshold: u64, /// Pruning configuration. prune_modes: PruneModes, + /// Handle to communicate with ExEx manager. + exex_manager_handle: ExExManagerHandle, } impl ExecutionStage { @@ -83,6 +84,7 @@ impl ExecutionStage { thresholds: ExecutionStageThresholds, external_clean_threshold: u64, prune_modes: PruneModes, + exex_manager_handle: ExExManagerHandle, ) -> Self { Self { metrics_tx: None, @@ -90,6 +92,7 @@ impl ExecutionStage { executor_factory, thresholds, prune_modes, + exex_manager_handle, } } @@ -102,6 +105,7 @@ impl ExecutionStage { ExecutionStageThresholds::default(), MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::none(), + ExExManagerHandle::empty(), ) } @@ -156,6 +160,7 @@ impl ExecutionStage { let mut cumulative_gas = 0; let batch_start = Instant::now(); + let mut blocks = Vec::new(); for block_number in start_block..=max_block { // Fetch the block let fetch_block_start = Instant::now(); @@ -191,9 +196,13 @@ impl ExecutionStage { } stage_progress = block_number; - stage_checkpoint.progress.processed += block.gas_used; + // If we have ExEx's we need to save the block in memory for later + if self.exex_manager_handle.has_exexs() { + blocks.push(block); + } + // Check if we should commit now let bundle_size_hint = executor.size_hint().unwrap_or_default() as u64; if self.thresholds.is_end_of_batch( @@ -209,6 +218,25 @@ impl ExecutionStage { let state = executor.take_output_state(); let write_preparation_duration = time.elapsed(); + // Check if we should send a [`CanonStateNotification`] to execution extensions. + // + // Note: Since we only write to `blocks` if there are any ExEx's we don't need to perform + // the `has_exexs` check here as well + if !blocks.is_empty() { + let chain = Arc::new(Chain::new( + blocks.into_iter().map(|block| { + let hash = block.header.hash_slow(); + block.seal(hash) + }), + state.clone(), + None, + )); + + // NOTE: We can ignore the error here, since an error means that the channel is closed, + // which means the manager has died, which then in turn means the node is shutting down. + let _ = self.exex_manager_handle.send(CanonStateNotification::Commit { new: chain }); + } + let time = Instant::now(); // write output state.write_to_storage( @@ -360,6 +388,16 @@ impl Stage for ExecutionStage { StageId::Execution } + fn poll_execute_ready( + &mut self, + cx: &mut Context<'_>, + _: ExecInput, + ) -> Poll> { + ready!(self.exex_manager_handle.poll_ready(cx)); + + Poll::Ready(Ok(())) + } + /// Execute the stage fn execute( &mut self, @@ -375,74 +413,33 @@ impl Stage for ExecutionStage { provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { - let tx = provider.tx_ref(); - // Acquire changeset cursors - let mut account_changeset = tx.cursor_dup_write::()?; - let mut storage_changeset = tx.cursor_dup_write::()?; - let (range, unwind_to, _) = input.unwind_block_range_with_threshold(self.thresholds.max_blocks.unwrap_or(u64::MAX)); - if range.is_empty() { return Ok(UnwindOutput { checkpoint: input.checkpoint.with_block_number(input.unwind_to), }) } - // get all batches for account change - // Check if walk and walk_dup would do the same thing - let account_changeset_batch = - account_changeset.walk_range(range.clone())?.collect::, _>>()?; - - // revert all changes to PlainState - for (_, changeset) in account_changeset_batch.into_iter().rev() { - if let Some(account_info) = changeset.info { - tx.put::(changeset.address, account_info)?; - } else { - tx.delete::(changeset.address, None)?; - } + // Unwind account and storage changesets, as well as receipts. + // + // This also updates `PlainStorageState` and `PlainAccountState`. + let bundle_state_with_receipts = provider.unwind_or_peek_state::(range.clone())?; + + // Construct a `CanonStateNotification` if we have ExEx's installed. + if self.exex_manager_handle.has_exexs() { + // Get the blocks for the unwound range. This is needed for `CanonStateNotification`. + let blocks = provider.get_take_block_range::(range.clone())?; + let chain = Chain::new(blocks, bundle_state_with_receipts, None); + + // NOTE: We can ignore the error here, since an error means that the channel is closed, + // which means the manager has died, which then in turn means the node is shutting down. + let _ = self.exex_manager_handle.send(CanonStateNotification::Reorg { + old: Arc::new(chain), + new: Arc::new(Chain::default()), + }); } - // get all batches for storage change - let storage_changeset_batch = storage_changeset - .walk_range(BlockNumberAddress::range(range.clone()))? - .collect::, _>>()?; - - // revert all changes to PlainStorage - let mut plain_storage_cursor = tx.cursor_dup_write::()?; - - for (key, storage) in storage_changeset_batch.into_iter().rev() { - let address = key.address(); - if let Some(v) = plain_storage_cursor.seek_by_key_subkey(address, storage.key)? { - if v.key == storage.key { - plain_storage_cursor.delete_current()?; - } - } - if storage.value != U256::ZERO { - plain_storage_cursor.upsert(address, storage)?; - } - } - - // Discard unwinded changesets - provider.unwind_table_by_num::(unwind_to)?; - - let mut rev_storage_changeset_walker = storage_changeset.walk_back(None)?; - while let Some((key, _)) = rev_storage_changeset_walker.next().transpose()? { - if key.block_number() < *range.start() { - break - } - // delete all changesets - rev_storage_changeset_walker.delete_current()?; - } - - // Look up the start index for the transaction range - let first_tx_num = provider - .block_body_indices(*range.start())? - .ok_or(ProviderError::BlockBodyIndicesNotFound(*range.start()))? - .first_tx_num(); - - let mut stage_checkpoint = input.checkpoint.execution_stage_checkpoint(); - // Unwind all receipts for transactions in the block range if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { // We only use static files for Receipts, if there is no receipt pruning of any kind. @@ -451,34 +448,24 @@ impl Stage for ExecutionStage { // if the expected highest receipt in the files is higher than the database. // Which is essentially what happens here when we unwind this stage. let _static_file_producer = prepare_static_file_producer(provider, *range.start())?; - - // Update the checkpoint. - if let Some(stage_checkpoint) = stage_checkpoint.as_mut() { - for block_number in range { - stage_checkpoint.progress.processed -= provider - .block_by_number(block_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))? - .gas_used; - } - } } else { - // We use database for Receipts, if there is any kind of receipt pruning/filtering, - // since it is not supported by static files. - let mut cursor = tx.cursor_write::()?; - let mut reverse_walker = cursor.walk_back(None)?; - - while let Some(Ok((tx_number, receipt))) = reverse_walker.next() { - if tx_number < first_tx_num { - break - } - reverse_walker.delete_current()?; + // If there is any kind of receipt pruning/filtering we use the database, since static + // files do not support filters. + // + // If we hit this case, the receipts have already been unwound by the call to + // `unwind_or_peek_state`. + } - if let Some(stage_checkpoint) = stage_checkpoint.as_mut() { - stage_checkpoint.progress.processed -= receipt.cumulative_gas_used; - } + // Update the checkpoint. + let mut stage_checkpoint = input.checkpoint.execution_stage_checkpoint(); + if let Some(stage_checkpoint) = stage_checkpoint.as_mut() { + for block_number in range { + stage_checkpoint.progress.processed -= provider + .block_by_number(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))? + .gas_used; } } - let checkpoint = if let Some(stage_checkpoint) = stage_checkpoint { StageCheckpoint::new(unwind_to).with_execution_stage_checkpoint(stage_checkpoint) } else { @@ -621,17 +608,17 @@ mod tests { use crate::test_utils::TestStageDB; use alloy_rlp::Decodable; use assert_matches::assert_matches; - use reth_db::models::AccountBeforeTx; + use reth_db::{models::AccountBeforeTx, transaction::DbTxMut}; use reth_evm_ethereum::EthEvmConfig; use reth_interfaces::executor::BlockValidationError; use reth_primitives::{ address, hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Address, Bytecode, ChainSpecBuilder, PruneMode, ReceiptsLogPruneConfig, SealedBlock, StorageEntry, - B256, + B256, U256, }; use reth_provider::{test_utils::create_test_provider_factory, AccountReader, ReceiptProvider}; use reth_revm::EvmProcessorFactory; - use std::{collections::BTreeMap, sync::Arc}; + use std::collections::BTreeMap; fn stage() -> ExecutionStage> { let executor_factory = EvmProcessorFactory::new( @@ -648,6 +635,7 @@ mod tests { }, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::none(), + ExExManagerHandle::empty(), ) } diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index fe1012f4200fa..a40da1c496365 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -51,6 +51,7 @@ mod tests { AccountsHistory, DatabaseEnv, }; use reth_evm_ethereum::EthEvmConfig; + use reth_exex::ExExManagerHandle; use reth_interfaces::test_utils::generators::{self, random_block}; use reth_primitives::{ address, hex_literal::hex, keccak256, Account, Bytecode, ChainSpecBuilder, PruneMode, @@ -151,6 +152,7 @@ mod tests { }, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, prune_modes.clone(), + ExExManagerHandle::empty(), ); execution_stage.execute(&provider, input).unwrap(); diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 804dcece877c1..5e595532c3014 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -218,11 +218,13 @@ impl BundleStateWithReceipts { self.first_block } - /// Revert to given block number. + /// Revert the state to the given block number. /// - /// If number is in future, or in the past return false + /// Returns false if the block number is not in the bundle state. /// - /// NOTE: Provided block number will stay inside the bundle state. + /// # Note + /// + /// The provided block number will stay inside the bundle state. pub fn revert_to(&mut self, block_number: BlockNumber) -> bool { let Some(index) = self.block_number_to_index(block_number) else { return false }; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index a2b2ec74a52e0..ba85a4a4005ad 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -363,7 +363,6 @@ impl DatabaseProvider { } // TODO(joshie) TEMPORARY should be moved to trait providers - /// Unwind or peek at last N blocks of state recreating the [`BundleStateWithReceipts`]. /// /// If UNWIND it set to true tip and latest state will be unwind @@ -388,7 +387,7 @@ impl DatabaseProvider { /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn unwind_or_peek_state( + pub fn unwind_or_peek_state( &self, range: RangeInclusive, ) -> ProviderResult { @@ -706,8 +705,8 @@ impl DatabaseProvider { Ok(block_tx) } - /// Return range of blocks and its execution result - fn get_take_block_range( + /// Get or unwind the given range of blocks. + pub fn get_take_block_range( &self, range: impl RangeBounds + Clone, ) -> ProviderResult> { diff --git a/crates/storage/provider/src/traits/executor.rs b/crates/storage/provider/src/traits/executor.rs index bddfeb03eaf76..f12d6416949f9 100644 --- a/crates/storage/provider/src/traits/executor.rs +++ b/crates/storage/provider/src/traits/executor.rs @@ -4,14 +4,12 @@ use crate::{bundle_state::BundleStateWithReceipts, StateProvider}; use reth_interfaces::executor::BlockExecutionError; use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, U256}; -/// Executor factory that would create the EVM with particular state provider. -/// -/// It can be used to mock executor. +/// A factory capable of creating an executor with the given state provider. pub trait ExecutorFactory: Send + Sync + 'static { /// Executor with [`StateProvider`] fn with_state<'a, SP: StateProvider + 'a>( &'a self, - _sp: SP, + sp: SP, ) -> Box + 'a>; } @@ -19,7 +17,7 @@ pub trait ExecutorFactory: Send + Sync + 'static { /// /// This type is capable of executing (multiple) blocks by applying the state changes made by each /// block. The final state of the executor can extracted using -/// [take_output_state](BlockExecutor::take_output_state). +/// [`Self::take_output_state`]. pub trait BlockExecutor { /// The error type returned by the executor. type Error; From d214daebbd1fda25042ff81a157bb190fa1847ce Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 17:07:11 +0200 Subject: [PATCH 241/700] chore: move engine primitives (#7746) --- Cargo.toml | 4 ++-- .../engine-primitives}/Cargo.toml | 0 .../engine-primitives}/src/lib.rs | 0 .../engine-primitives}/src/payload.rs | 0 4 files changed, 2 insertions(+), 2 deletions(-) rename crates/{ethereum-engine-primitives => ethereum/engine-primitives}/Cargo.toml (100%) rename crates/{ethereum-engine-primitives => ethereum/engine-primitives}/src/lib.rs (100%) rename crates/{ethereum-engine-primitives => ethereum/engine-primitives}/src/payload.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index d382a5ce7a79e..688041827fb60 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,7 +45,7 @@ members = [ "crates/rpc/rpc-types/", "crates/rpc/rpc-types-compat/", "crates/engine-primitives/", - "crates/ethereum-engine-primitives/", + "crates/ethereum/engine-primitives/", "crates/node-ethereum/", "crates/node-builder/", "crates/optimism/node/", @@ -213,7 +213,7 @@ reth-discv4 = { path = "crates/net/discv4" } reth-discv5 = { path = "crates/net/discv5" } reth-dns-discovery = { path = "crates/net/dns" } reth-engine-primitives = { path = "crates/engine-primitives" } -reth-ethereum-engine-primitives = { path = "crates/ethereum-engine-primitives" } +reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } reth-node-optimism = { path = "crates/optimism/node" } diff --git a/crates/ethereum-engine-primitives/Cargo.toml b/crates/ethereum/engine-primitives/Cargo.toml similarity index 100% rename from crates/ethereum-engine-primitives/Cargo.toml rename to crates/ethereum/engine-primitives/Cargo.toml diff --git a/crates/ethereum-engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs similarity index 100% rename from crates/ethereum-engine-primitives/src/lib.rs rename to crates/ethereum/engine-primitives/src/lib.rs diff --git a/crates/ethereum-engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs similarity index 100% rename from crates/ethereum-engine-primitives/src/payload.rs rename to crates/ethereum/engine-primitives/src/payload.rs From bd4757b3b53022f871872790c27aa571d2ec2fdb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 17:18:43 +0200 Subject: [PATCH 242/700] chore: make alloy impls feature gated (#7747) --- Cargo.toml | 3 ++- crates/primitives/Cargo.toml | 2 +- crates/primitives/src/transaction/mod.rs | 2 +- crates/storage/codecs/Cargo.toml | 10 ++++++---- crates/storage/codecs/src/lib.rs | 5 +++++ crates/storage/db/Cargo.toml | 2 +- 6 files changed, 16 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 688041827fb60..c1b57e008d099 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -291,7 +291,7 @@ alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "188c4f8" } alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } @@ -321,6 +321,7 @@ itertools = "0.12" parking_lot = "0.12" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation metrics = "0.21.1" +modular-bitfield = "0.11.2" hex-literal = "0.4" once_cell = "1.17" syn = "2.0" diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index daddf3e9b337d..3e08655dbb852 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -40,7 +40,7 @@ byteorder = "1" clap = { workspace = true, features = ["derive"], optional = true } derive_more.workspace = true itertools.workspace = true -modular-bitfield = "0.11.2" +modular-bitfield.workspace = true once_cell.workspace = true rayon.workspace = true serde_with.workspace = true diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 3fd21c9a3d655..f51b5ca3a8cb1 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1104,7 +1104,7 @@ impl Compact for TransactionSignedNoHash { to_compact_ztd_unaware(self, buf) } - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { from_compact_zstd_unaware(buf, _len) } } diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index f585accf69cf9..31f954f868d7e 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -13,15 +13,16 @@ workspace = true [dependencies] reth-codecs-derive = { path = "./derive", default-features = false } -alloy-eips.workspace = true +alloy-eips = { workspace = true, optional = true } alloy-primitives.workspace = true + bytes.workspace = true [dev-dependencies] -alloy-eips = { workspace = true, features = ["arbitrary", "serde"] } +alloy-eips = { workspace = true, default-features = false, features = ["arbitrary", "serde"] } alloy-primitives = { workspace = true, features = ["arbitrary", "serde"] } serde.workspace = true -modular-bitfield = "0.11.2" +modular-bitfield.workspace = true test-fuzz.workspace = true serde_json.workspace = true @@ -30,6 +31,7 @@ proptest.workspace = true proptest-derive.workspace = true [features] -default = ["std"] +default = ["std", "alloy"] std = ["alloy-primitives/std", "bytes/std"] +alloy = ["alloy-eips"] optimism = ["reth-codecs-derive/optimism"] diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index dbfb68b3c0147..9c5d757b9fdeb 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -1,4 +1,8 @@ //! Compact codec. +//! +//! ## Feature Flags +//! +//! - `alloy`: [Compact] implementation for various alloy types. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -16,6 +20,7 @@ pub use reth_codecs_derive::*; use alloy_primitives::{Address, Bloom, Bytes, B256, B512, U256}; use bytes::Buf; +#[cfg(any(test, feature = "alloy"))] mod alloy; /// Trait that implements the `Compact` codec. diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 2986884a1868d..461a84f3e89a5 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -23,7 +23,7 @@ reth-tracing.workspace = true # codecs serde = { workspace = true, default-features = false } parity-scale-codec = { version = "3.2.1", features = ["bytes"] } -modular-bitfield = "0.11.2" +modular-bitfield.workspace = true # metrics reth-metrics.workspace = true From 34c5b425247b9050f9ad80337c3f4d0c1860be73 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 18:31:13 +0200 Subject: [PATCH 243/700] docs: add more docs to recover_signer (#7751) --- crates/primitives/src/transaction/mod.rs | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f51b5ca3a8cb1..9b07ce0097e5f 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1205,7 +1205,13 @@ impl TransactionSigned { /// Recover signer from signature and hash. /// - /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. + /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also [Signature::recover_signer]. + /// + /// Note: + /// + /// This can fail for some early ethereum mainnet transactions pre EIP-2, use + /// [Self::recover_signer_unchecked] if you want to recover the signer without ensuring that the + /// signature has a low `s` value. pub fn recover_signer(&self) -> Option
{ // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -1221,7 +1227,7 @@ impl TransactionSigned { /// value_. /// /// Returns `None` if the transaction's signature is invalid, see also - /// [Self::recover_signer_unchecked]. + /// [Signature::recover_signer_unchecked]. pub fn recover_signer_unchecked(&self) -> Option
{ // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -2164,6 +2170,19 @@ mod tests { assert_eq!(encoded.as_ref(), data.as_slice()); } + // + // + #[test] + fn recover_pre_eip2() { + let data = hex!("f8ea0c850ba43b7400832dc6c0942935aa0a2d2fbb791622c29eb1c117b65b7a908580b884590528a9000000000000000000000001878ace42092b7f1ae1f28d16c1272b1aa80ca4670000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000d02ab486cedc0000000000000000000000000000000000000000000000000000557fe293cabc08cf1ca05bfaf3fda0a56b49cc78b22125feb5ae6a99d2b4781f00507d8b02c173771c85a0b5da0dbe6c5bc53740d0071fc83eb17ba0f709e49e9ae7df60dee625ef51afc5"); + let tx = TransactionSigned::decode_enveloped(&mut data.as_slice()).unwrap(); + let sender = tx.recover_signer(); + assert!(sender.is_none()); + let sender = tx.recover_signer_unchecked().unwrap(); + + assert_eq!(sender, address!("7e9e359edf0dbacf96a9952fa63092d919b0842b")); + } + #[test] fn min_length_encoded_legacy_transaction() { let transaction = TxLegacy::default(); From 4f7f690ac149913a614dfc8eced5b2006d9606b4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 18:34:07 +0200 Subject: [PATCH 244/700] fix: enable --dev for op-reth (#7755) --- crates/node-core/src/args/utils.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/node-core/src/args/utils.rs b/crates/node-core/src/args/utils.rs index 13fcd500d66a4..cf0bee96c92d6 100644 --- a/crates/node-core/src/args/utils.rs +++ b/crates/node-core/src/args/utils.rs @@ -9,11 +9,13 @@ use std::{ time::Duration, }; +use reth_primitives::DEV; + #[cfg(feature = "optimism")] use reth_primitives::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; #[cfg(not(feature = "optimism"))] -use reth_primitives::{DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA}; +use reth_primitives::{GOERLI, HOLESKY, MAINNET, SEPOLIA}; #[cfg(feature = "optimism")] /// Chains supported by op-reth. First value should be used as the default. @@ -77,7 +79,6 @@ pub fn genesis_value_parser(s: &str) -> eyre::Result, eyre::Error "sepolia" => SEPOLIA.clone(), #[cfg(not(feature = "optimism"))] "holesky" => HOLESKY.clone(), - #[cfg(not(feature = "optimism"))] "dev" => DEV.clone(), #[cfg(feature = "optimism")] "optimism" => OP_MAINNET.clone(), From e3c56647883a21157f7d49e2fa95aa99b21ca8a0 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Fri, 19 Apr 2024 18:42:14 +0200 Subject: [PATCH 245/700] ci: stop github from fighting us over labels (#7754) --- .github/scripts/label_pr.js | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/.github/scripts/label_pr.js b/.github/scripts/label_pr.js index 85206c0e66e66..c01f4c98aac1a 100644 --- a/.github/scripts/label_pr.js +++ b/.github/scripts/label_pr.js @@ -1,3 +1,13 @@ +// Filter function for labels we do not want on PRs automatically. +function shouldIncludeLabel (label) { + const isStatus = label.startsWith('S-'); + const isTrackingIssue = label === 'C-tracking-issue'; + const isPreventStale = label === 'M-prevent-stale'; + const isDifficulty = label.startsWith('D-'); + + return !isStatus && !isTrackingIssue && !isPreventStale && !isDifficulty; +} + module.exports = async ({ github, context }) => { try { const prNumber = context.payload.pull_request.number; @@ -11,7 +21,7 @@ module.exports = async ({ github, context }) => { const issueNumber = re?.groups?.issue_number; if (!issueNumber) { - console.log("No issue reference found in PR description."); + console.log('No issue reference found in PR description.'); return; } @@ -20,16 +30,18 @@ module.exports = async ({ github, context }) => { issue_number: issueNumber, }); - const issueLabels = issue.data.labels.map(label => label.name); + const issueLabels = issue.data.labels + .map(label => label.name) + .filter(shouldIncludeLabel); if (issueLabels.length > 0) { - await github.rest.issues.setLabels({ + await github.rest.issues.addLabels({ ...repo, issue_number: prNumber, labels: issueLabels, }); } } catch (err) { - console.error(`Failed to label PR`); + console.error('Failed to label PR'); console.error(err); } -} \ No newline at end of file +} From 4f1099b9d83fecc8870fdeeb94412a411887cfba Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 19 Apr 2024 20:06:47 +0200 Subject: [PATCH 246/700] chore(debug): allow first fcu through in `EngineApiSkipFcu` interceptor (#7756) --- crates/node-core/src/engine_skip_fcu.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/node-core/src/engine_skip_fcu.rs b/crates/node-core/src/engine_skip_fcu.rs index c6bbd791792dc..a6e5e1b01d4fb 100644 --- a/crates/node-core/src/engine_skip_fcu.rs +++ b/crates/node-core/src/engine_skip_fcu.rs @@ -16,7 +16,11 @@ pub struct EngineApiSkipFcu { impl EngineApiSkipFcu { /// Creates new [EngineApiSkipFcu] interceptor. pub fn new(threshold: usize) -> Self { - Self { threshold, skipped: 0 } + Self { + threshold, + // Start with `threshold` so that the first FCU goes through. + skipped: threshold, + } } /// Intercepts an incoming engine API message, skips FCU or forwards it From 20568b8813368fcdd69ddf51141e73642fd862dc Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 19 Apr 2024 22:04:33 +0200 Subject: [PATCH 247/700] perf(discv5): populate kbuckets & improved RLPx peering (#7683) --- crates/net/discv5/src/lib.rs | 73 ++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 28 deletions(-) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 07e6291a79a7c..8a50528717855 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -43,7 +43,7 @@ use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; /// Default number of times to do pulse lookup queries, at bootstrap (5 second intervals). /// -/// Default is 200 seconds. +/// Default is 100 seconds. pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; /// Default duration of look up interval, for pulse look ups at bootstrap. @@ -51,8 +51,17 @@ pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; /// Default is 5 seconds. pub const DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL: u64 = 5; -/// The max log2 distance, is equivalent to the index of the last bit in a discv5 node id. -const MAX_LOG2_DISTANCE: usize = 255; +/// Max kbucket index. +/// +/// This is the max log2distance for 32 byte [`NodeId`](discv5::enr::NodeId) - 1. See . +pub const MAX_KBUCKET_INDEX: usize = 255; + +/// Default lowest kbucket index to attempt filling, in periodic look up query to populate kbuckets. +/// +/// The peer at the 0th kbucket index is at log2distance 1 from the local node ID. See . +/// +/// Default is 0th index. +pub const DEFAULT_MIN_TARGET_KBUCKET_INDEX: usize = 0; /// Transparent wrapper around [`discv5::Discv5`]. #[derive(Clone)] @@ -229,7 +238,7 @@ impl Discv5 { }; // - // 3. start discv5 + // 2. start discv5 // let sk = discv5::enr::CombinedKey::secp256k1_from_bytes(&mut sk.secret_bytes()).unwrap(); let mut discv5 = match discv5::Discv5::new(enr, sk, discv5_config) { @@ -244,14 +253,14 @@ impl Discv5 { let discv5 = Arc::new(discv5); // - // 4. add boot nodes + // 3. add boot nodes // Self::bootstrap(bootstrap_nodes, &discv5).await?; let metrics = Discv5Metrics::default(); // - // 5. bg kbuckets maintenance + // 4. bg kbuckets maintenance // Self::spawn_populate_kbuckets_bg(lookup_interval, metrics.clone(), discv5.clone()); @@ -309,7 +318,7 @@ impl Discv5 { let local_node_id = discv5.local_enr().node_id(); let lookup_interval = Duration::from_secs(lookup_interval); let metrics = metrics.discovered_peers; - let mut log2_distance = 0usize; + let mut kbucket_index = MAX_KBUCKET_INDEX; let pulse_lookup_interval = Duration::from_secs(DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL); // todo: graceful shutdown @@ -335,7 +344,7 @@ impl Discv5 { loop { // make sure node is connected to each subtree in the network by target // selection (ref kademlia) - let target = get_lookup_target(log2_distance, local_node_id); + let target = get_lookup_target(kbucket_index, local_node_id); trace!(target: "net::discv5", %target, @@ -345,12 +354,12 @@ impl Discv5 { lookup(target, &discv5, &metrics).await; - if log2_distance < MAX_LOG2_DISTANCE { - // try to populate bucket one step further away - log2_distance += 1 + if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX { + // try to populate bucket one step closer + kbucket_index -= 1 } else { - // start over with self lookup - log2_distance = 0 + // start over with bucket furthest away + kbucket_index = MAX_KBUCKET_INDEX } tokio::time::sleep(lookup_interval).await; @@ -523,15 +532,17 @@ pub struct DiscoveredPeer { pub fork_id: Option, } -/// Gets the next lookup target, based on which distance is currently being targeted. +/// Gets the next lookup target, based on which bucket is currently being targeted. pub fn get_lookup_target( - log2_distance: usize, + kbucket_index: usize, local_node_id: discv5::enr::NodeId, ) -> discv5::enr::NodeId { + // init target let mut target = local_node_id.raw(); - //make sure target has a 'distance'-long suffix that differs from local node id - if log2_distance != 0 { - let suffix_bit_offset = MAX_LOG2_DISTANCE.saturating_sub(log2_distance); + + // make sure target has a 'log2distance'-long suffix that differs from local node id + if kbucket_index != 0 { + let suffix_bit_offset = MAX_KBUCKET_INDEX.saturating_sub(kbucket_index); let suffix_byte_offset = suffix_bit_offset / 8; // todo: flip the precise bit // let rel_suffix_bit_offset = suffix_bit_offset % 8; @@ -796,24 +807,30 @@ mod tests { #[test] fn select_lookup_target() { - // distance ceiled to the next byte - const fn expected_log2_distance(log2_distance: usize) -> u64 { - let log2_distance = log2_distance / 8; - ((log2_distance + 1) * 8) as u64 + // bucket index ceiled to the next multiple of 4 + const fn expected_bucket_index(kbucket_index: usize) -> u64 { + let log2distance = kbucket_index + 1; + let log2distance = log2distance / 8; + ((log2distance + 1) * 8) as u64 } - let log2_distance = rand::thread_rng().gen_range(0..=MAX_LOG2_DISTANCE); + let bucket_index = rand::thread_rng().gen_range(0..=MAX_KBUCKET_INDEX); let sk = CombinedKey::generate_secp256k1(); let local_node_id = discv5::enr::NodeId::from(sk.public()); - let target = get_lookup_target(log2_distance, local_node_id); + let target = get_lookup_target(bucket_index, local_node_id); let local_node_id = sigp::Key::from(local_node_id); let target = sigp::Key::from(target); - assert_eq!( - expected_log2_distance(log2_distance), - local_node_id.log2_distance(&target).unwrap() - ); + if bucket_index == 0 { + // log2distance undef (inf) + assert!(local_node_id.log2_distance(&target).is_none()) + } else { + assert_eq!( + expected_bucket_index(bucket_index), + local_node_id.log2_distance(&target).unwrap() + ); + } } } From 16b747e20c2eafe4e3ebd06a4ad7068d10756b3a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 19 Apr 2024 22:43:40 +0200 Subject: [PATCH 248/700] fix(deps): bump rustls (#7758) --- Cargo.lock | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b7cee919e3198..4b2f223a91ec0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -306,7 +306,7 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", - "reqwest 0.12.3", + "reqwest 0.12.4", "serde_json", "tokio", "tracing", @@ -345,7 +345,7 @@ dependencies = [ "alloy-transport-http", "futures", "pin-project", - "reqwest 0.12.3", + "reqwest 0.12.4", "serde", "serde_json", "tokio", @@ -540,7 +540,7 @@ source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea dependencies = [ "alloy-json-rpc", "alloy-transport", - "reqwest 0.12.3", + "reqwest 0.12.4", "serde_json", "tower", "url", @@ -3498,7 +3498,7 @@ dependencies = [ "http 0.2.12", "hyper 0.14.28", "log", - "rustls 0.21.10", + "rustls 0.21.11", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", @@ -5993,7 +5993,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.10", + "rustls 0.21.11", "rustls-pemfile 1.0.4", "serde", "serde_json", @@ -6015,9 +6015,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e6cc1e89e689536eb5aeede61520e874df5a4707df811cd5da4aa5fbb2aae19" +checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ "base64 0.22.0", "bytes", @@ -7893,9 +7893,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring 0.17.8", @@ -7905,9 +7905,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", "ring 0.17.8", @@ -9064,7 +9064,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.10", + "rustls 0.21.11", "tokio", ] @@ -9074,7 +9074,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.3", + "rustls 0.22.4", "rustls-pki-types", "tokio", ] @@ -9116,7 +9116,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.11", + "toml_edit 0.22.12", ] [[package]] @@ -9152,9 +9152,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.11" +version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb686a972ccef8537b39eead3968b0e8616cb5040dbb9bba93007c8e07c9215f" +checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ "indexmap 2.2.6", "serde", From d599ce46835891a1afe7970ac8b0cab4efe8b391 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 23:18:33 +0200 Subject: [PATCH 249/700] fix: dont await changes (#7760) --- crates/exex/src/manager.rs | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index a89b09171229a..9345e21808b8d 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -1,3 +1,9 @@ +use crate::ExExEvent; +use metrics::Gauge; +use reth_metrics::{metrics::Counter, Metrics}; +use reth_primitives::{BlockNumber, FinishedExExHeight}; +use reth_provider::CanonStateNotification; +use reth_tracing::tracing::debug; use std::{ collections::VecDeque, future::{poll_fn, Future}, @@ -8,14 +14,6 @@ use std::{ }, task::{Context, Poll}, }; - -use crate::ExExEvent; -use futures::StreamExt; -use metrics::Gauge; -use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::{BlockNumber, FinishedExExHeight}; -use reth_provider::CanonStateNotification; -use reth_tracing::tracing::debug; use tokio::sync::{ mpsc::{self, error::SendError, Receiver, UnboundedReceiver, UnboundedSender}, watch, @@ -347,6 +345,7 @@ pub struct ExExManagerHandle { /// otherwise unused. is_ready_receiver: watch::Receiver, /// A stream of bools denoting whether the manager is ready for new notifications. + #[allow(unused)] is_ready: WatchStream, /// The current capacity of the manager's internal notification buffer. current_capacity: Arc, @@ -426,15 +425,14 @@ impl ExExManagerHandle { } /// Wait until the manager is ready for new notifications. + #[allow(clippy::needless_pass_by_ref_mut)] pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()> { - // if this returns `Poll::Ready(None)` the stream is exhausted, which means the underlying - // channel is closed. - // - // this can only happen if the manager died, and the node is shutting down, so we ignore it - let mut pinned = std::pin::pin!(&mut self.is_ready); - if pinned.poll_next_unpin(cx) == Poll::Ready(Some(true)) { + use futures as _; + // FIXME: if not ready this must be polled + if *self.is_ready_receiver.borrow() { Poll::Ready(()) } else { + cx.waker().wake_by_ref(); Poll::Pending } } From 71f8e678aa53f75c2c35badaa5848262de594cdc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 19 Apr 2024 23:40:37 +0200 Subject: [PATCH 250/700] chore: relax some bounds (#7762) --- crates/stages/src/stages/execution.rs | 64 ++++++++++++++------------- 1 file changed, 33 insertions(+), 31 deletions(-) diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index ac48a2aeb3c63..7f22ecaef37f4 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -60,7 +60,7 @@ use tracing::*; /// to [tables::PlainStorageState] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] -pub struct ExecutionStage { +pub struct ExecutionStage { metrics_tx: Option, /// The stage's internal executor executor_factory: EF, @@ -77,7 +77,7 @@ pub struct ExecutionStage { exex_manager_handle: ExExManagerHandle, } -impl ExecutionStage { +impl ExecutionStage { /// Create new execution stage with specified config. pub fn new( executor_factory: EF, @@ -115,6 +115,37 @@ impl ExecutionStage { self } + /// Adjusts the prune modes related to changesets. + /// + /// This function verifies whether the [`super::MerkleStage`] or Hashing stages will run from + /// scratch. If at least one stage isn't starting anew, it implies that pruning of + /// changesets cannot occur. This is determined by checking the highest clean threshold + /// (`self.external_clean_threshold`) across the stages. + /// + /// Given that `start_block` changes with each checkpoint, it's necessary to inspect + /// [`tables::AccountsTrie`] to ensure that [`super::MerkleStage`] hasn't + /// been previously executed. + fn adjust_prune_modes( + &self, + provider: &DatabaseProviderRW, + start_block: u64, + max_block: u64, + ) -> Result { + let mut prune_modes = self.prune_modes.clone(); + + // If we're not executing MerkleStage from scratch (by threshold or first-sync), then erase + // changeset related pruning configurations + if !(max_block - start_block > self.external_clean_threshold || + provider.count_entries::()?.is_zero()) + { + prune_modes.account_history = None; + prune_modes.storage_history = None; + } + Ok(prune_modes) + } +} + +impl ExecutionStage { /// Execute the stage. pub fn execute_inner( &mut self, @@ -261,35 +292,6 @@ impl ExecutionStage { done, }) } - - /// Adjusts the prune modes related to changesets. - /// - /// This function verifies whether the [`super::MerkleStage`] or Hashing stages will run from - /// scratch. If at least one stage isn't starting anew, it implies that pruning of - /// changesets cannot occur. This is determined by checking the highest clean threshold - /// (`self.external_clean_threshold`) across the stages. - /// - /// Given that `start_block` changes with each checkpoint, it's necessary to inspect - /// [`tables::AccountsTrie`] to ensure that [`super::MerkleStage`] hasn't - /// been previously executed. - fn adjust_prune_modes( - &self, - provider: &DatabaseProviderRW, - start_block: u64, - max_block: u64, - ) -> Result { - let mut prune_modes = self.prune_modes.clone(); - - // If we're not executing MerkleStage from scratch (by threshold or first-sync), then erase - // changeset related pruning configurations - if !(max_block - start_block > self.external_clean_threshold || - provider.count_entries::()?.is_zero()) - { - prune_modes.account_history = None; - prune_modes.storage_history = None; - } - Ok(prune_modes) - } } fn execution_checkpoint( From 566a7804a3e3ffce45932583fadf8d0db7bd209a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 20 Apr 2024 02:00:52 +0200 Subject: [PATCH 251/700] fix(stages): block number in error (#7766) --- crates/interfaces/src/p2p/error.rs | 4 +++- crates/interfaces/src/p2p/headers/downloader.rs | 7 ++++++- crates/interfaces/src/test_utils/headers.rs | 1 + crates/net/downloaders/src/headers/reverse_headers.rs | 1 + 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 667d9d5487d98..3c4e351fc32d8 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -132,10 +132,12 @@ pub type DownloadResult = Result; pub enum DownloadError { /* ==================== HEADER ERRORS ==================== */ /// Header validation failed. - #[error("failed to validate header {hash}: {error}")] + #[error("failed to validate header {hash}, block number {number}: {error}")] HeaderValidation { /// Hash of header failing validation hash: B256, + /// Number of header failing validation + number: u64, /// The details of validation failure #[source] error: Box, diff --git a/crates/interfaces/src/p2p/headers/downloader.rs b/crates/interfaces/src/p2p/headers/downloader.rs index 35dd41a3e8d88..07633681dfc90 100644 --- a/crates/interfaces/src/p2p/headers/downloader.rs +++ b/crates/interfaces/src/p2p/headers/downloader.rs @@ -81,11 +81,16 @@ pub fn validate_header_download( ) -> DownloadResult<()> { // validate header against parent consensus.validate_header_against_parent(header, parent).map_err(|error| { - DownloadError::HeaderValidation { hash: parent.hash(), error: Box::new(error) } + DownloadError::HeaderValidation { + hash: parent.hash(), + number: header.number, + error: Box::new(error), + } })?; // validate header standalone consensus.validate_header(header).map_err(|error| DownloadError::HeaderValidation { hash: parent.hash(), + number: header.number, error: Box::new(error), })?; Ok(()) diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index 24b187c63d2e4..8262d9ae033d1 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -149,6 +149,7 @@ impl Stream for TestDownload { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { hash: empty.hash(), + number: empty.number, error: Box::new(error), }))) } diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index e8e1fa009c29b..3af45c17259fc 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -284,6 +284,7 @@ where peer_id: Some(peer_id), error: DownloadError::HeaderValidation { hash: head.hash(), + number: head.number, error: Box::new(error), }, } From 9df9c03a882e08343a1b6e3f2a4fa70d314381a6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 20 Apr 2024 02:31:14 +0200 Subject: [PATCH 252/700] fix(tracing): wrong header in error (#7767) --- crates/interfaces/src/p2p/headers/downloader.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/interfaces/src/p2p/headers/downloader.rs b/crates/interfaces/src/p2p/headers/downloader.rs index 07633681dfc90..9eea13aabf82d 100644 --- a/crates/interfaces/src/p2p/headers/downloader.rs +++ b/crates/interfaces/src/p2p/headers/downloader.rs @@ -82,14 +82,14 @@ pub fn validate_header_download( // validate header against parent consensus.validate_header_against_parent(header, parent).map_err(|error| { DownloadError::HeaderValidation { - hash: parent.hash(), + hash: header.hash(), number: header.number, error: Box::new(error), } })?; // validate header standalone consensus.validate_header(header).map_err(|error| DownloadError::HeaderValidation { - hash: parent.hash(), + hash: header.hash(), number: header.number, error: Box::new(error), })?; From db5bd0685167e875b52ea570d677c7aca8363bd9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 20 Apr 2024 10:36:08 +0200 Subject: [PATCH 253/700] chore(lint): duplicate attributes (#7770) --- crates/primitives/src/transaction/pooled.rs | 1 - crates/primitives/src/transaction/sidecar.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index f78fe7fea844f..5588d45a78fba 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,7 +1,6 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. -#![cfg(feature = "c-kzg")] #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] use super::error::TransactionConversionError; diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 0e48e6d3a1f29..4c2751a86aa36 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,4 +1,3 @@ -#![cfg(feature = "c-kzg")] #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] #[cfg(any(test, feature = "arbitrary"))] From 615e90b0f8a7444a732c6792fa8fa718f595c9ea Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Sat, 20 Apr 2024 11:00:11 +0200 Subject: [PATCH 254/700] fix(exex): properly check ready state in `poll_ready` (#7772) --- Cargo.lock | 2 -- crates/exex/Cargo.toml | 2 -- crates/exex/src/manager.rs | 43 +++++++++++++++++++------------------- 3 files changed, 22 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b2f223a91ec0..c2883c0e0161a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6626,7 +6626,6 @@ name = "reth-exex" version = "0.2.0-beta.5" dependencies = [ "eyre", - "futures", "metrics", "reth-config", "reth-metrics", @@ -6637,7 +6636,6 @@ dependencies = [ "reth-tasks", "reth-tracing", "tokio", - "tokio-stream", "tokio-util", ] diff --git a/crates/exex/Cargo.toml b/crates/exex/Cargo.toml index d501a906ea27b..71f9c8bdef0df 100644 --- a/crates/exex/Cargo.toml +++ b/crates/exex/Cargo.toml @@ -23,9 +23,7 @@ reth-tasks.workspace = true reth-tracing.workspace = true ## async -futures.workspace = true tokio.workspace = true -tokio-stream.workspace = true tokio-util.workspace = true ## misc diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 9345e21808b8d..59f2bde58bbbc 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -12,14 +12,13 @@ use std::{ atomic::{AtomicUsize, Ordering}, Arc, }, - task::{Context, Poll}, + task::{ready, Context, Poll}, }; use tokio::sync::{ mpsc::{self, error::SendError, Receiver, UnboundedReceiver, UnboundedSender}, watch, }; -use tokio_stream::wrappers::WatchStream; -use tokio_util::sync::{PollSendError, PollSender}; +use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; /// Metrics for an ExEx. #[derive(Metrics)] @@ -217,7 +216,7 @@ impl ExExManager { exex_tx: handle_tx, num_exexs, is_ready_receiver: is_ready_rx.clone(), - is_ready: WatchStream::new(is_ready_rx), + is_ready: ReusableBoxFuture::new(make_wait_future(is_ready_rx)), current_capacity, finished_height: finished_height_rx, }, @@ -340,13 +339,13 @@ pub struct ExExManagerHandle { num_exexs: usize, /// A watch channel denoting whether the manager is ready for new notifications or not. /// - /// This is stored internally alongside a `WatchStream` representation of the same value. This - /// field is only used to create a new `WatchStream` when the handle is cloned, but is - /// otherwise unused. + /// This is stored internally alongside a `ReusableBoxFuture` representation of the same value. + /// This field is only used to create a new `ReusableBoxFuture` when the handle is cloned, + /// but is otherwise unused. is_ready_receiver: watch::Receiver, - /// A stream of bools denoting whether the manager is ready for new notifications. - #[allow(unused)] - is_ready: WatchStream, + /// A reusable future that resolves when the manager is ready for new + /// notifications. + is_ready: ReusableBoxFuture<'static, watch::Receiver>, /// The current capacity of the manager's internal notification buffer. current_capacity: Arc, /// The finished height of all ExEx's. @@ -368,7 +367,7 @@ impl ExExManagerHandle { exex_tx, num_exexs: 0, is_ready_receiver: is_ready_rx.clone(), - is_ready: WatchStream::new(is_ready_rx), + is_ready: ReusableBoxFuture::new(make_wait_future(is_ready_rx)), current_capacity: Arc::new(AtomicUsize::new(0)), finished_height: finished_height_rx, } @@ -425,26 +424,28 @@ impl ExExManagerHandle { } /// Wait until the manager is ready for new notifications. - #[allow(clippy::needless_pass_by_ref_mut)] pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()> { - use futures as _; - // FIXME: if not ready this must be polled - if *self.is_ready_receiver.borrow() { - Poll::Ready(()) - } else { - cx.waker().wake_by_ref(); - Poll::Pending - } + let rx = ready!(self.is_ready.poll(cx)); + self.is_ready.set(make_wait_future(rx)); + Poll::Ready(()) } } +/// Creates a future that resolves once the given watch channel receiver is true. +async fn make_wait_future(mut rx: watch::Receiver) -> watch::Receiver { + // NOTE(onbjerg): We can ignore the error here, because if the channel is closed, the node + // is shutting down. + let _ = rx.wait_for(|ready| *ready).await; + rx +} + impl Clone for ExExManagerHandle { fn clone(&self) -> Self { Self { exex_tx: self.exex_tx.clone(), num_exexs: self.num_exexs, is_ready_receiver: self.is_ready_receiver.clone(), - is_ready: WatchStream::new(self.is_ready_receiver.clone()), + is_ready: ReusableBoxFuture::new(make_wait_future(self.is_ready_receiver.clone())), current_capacity: self.current_capacity.clone(), finished_height: self.finished_height.clone(), } From 6728a5518a82285d8501d994d469f4c5299f68e3 Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Sat, 20 Apr 2024 15:14:17 +0530 Subject: [PATCH 255/700] feat: gracefully shutdown prometheus server (#7728) Co-authored-by: Oliver Nordbjerg Co-authored-by: Oliver Nordbjerg --- bin/reth/src/cli/mod.rs | 2 +- bin/reth/src/commands/stage/mod.rs | 5 ++-- bin/reth/src/commands/stage/run.rs | 4 ++- crates/node-builder/src/builder.rs | 1 + .../src/metrics/prometheus_exporter.rs | 29 +++++++++++++++---- crates/node-core/src/node_config.rs | 2 ++ 6 files changed, 34 insertions(+), 9 deletions(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 1adc4975fdd6f..d511d7182ab66 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -148,7 +148,7 @@ impl Cli { Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Stage(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Stage(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), diff --git a/bin/reth/src/commands/stage/mod.rs b/bin/reth/src/commands/stage/mod.rs index fa9bda84295e6..8f514295e25c4 100644 --- a/bin/reth/src/commands/stage/mod.rs +++ b/bin/reth/src/commands/stage/mod.rs @@ -1,6 +1,7 @@ //! `reth stage` command use clap::{Parser, Subcommand}; +use reth_cli_runner::CliContext; pub mod drop; pub mod dump; @@ -34,9 +35,9 @@ pub enum Subcommands { impl Command { /// Execute `stage` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { match self.command { - Subcommands::Run(command) => command.execute().await, + Subcommands::Run(command) => command.execute(ctx).await, Subcommands::Drop(command) => command.execute().await, Subcommands::Dump(command) => command.execute().await, Subcommands::Unwind(command) => command.execute().await, diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 43bbe78592a6c..32550718f4dc1 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -14,6 +14,7 @@ use crate::{ }; use clap::Parser; use reth_beacon_consensus::BeaconConsensus; +use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; use reth_db::init_db; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; @@ -120,7 +121,7 @@ pub struct Command { impl Command { /// Execute `stage` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { // Raise the fd limit of the process. // Does not do anything on windows. let _ = fdlimit::raise_fd_limit(); @@ -154,6 +155,7 @@ impl Command { Arc::clone(&db), factory.static_file_provider(), metrics_process::Collector::default(), + ctx.task_executor, ) .await?; } diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 161c8c404b56b..c9178708e3051 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -514,6 +514,7 @@ where prometheus_handle, database.clone(), provider_factory.static_file_provider(), + executor.clone(), ) .await?; diff --git a/crates/node-core/src/metrics/prometheus_exporter.rs b/crates/node-core/src/metrics/prometheus_exporter.rs index 28f2a63af0d82..42b291e872d93 100644 --- a/crates/node-core/src/metrics/prometheus_exporter.rs +++ b/crates/node-core/src/metrics/prometheus_exporter.rs @@ -12,6 +12,7 @@ use metrics_util::layers::{PrefixLayer, Stack}; use reth_db::database_metrics::DatabaseMetrics; use reth_metrics::metrics::Unit; use reth_provider::providers::StaticFileProvider; +use reth_tasks::TaskExecutor; use std::{convert::Infallible, net::SocketAddr, sync::Arc}; pub(crate) trait Hook: Fn() + Send + Sync {} @@ -39,13 +40,19 @@ pub(crate) async fn serve_with_hooks( listen_addr: SocketAddr, handle: PrometheusHandle, hooks: impl IntoIterator, + task_executor: TaskExecutor, ) -> eyre::Result<()> { let hooks: Vec<_> = hooks.into_iter().collect(); // Start endpoint - start_endpoint(listen_addr, handle, Arc::new(move || hooks.iter().for_each(|hook| hook()))) - .await - .wrap_err("Could not start Prometheus endpoint")?; + start_endpoint( + listen_addr, + handle, + Arc::new(move || hooks.iter().for_each(|hook| hook())), + task_executor, + ) + .await + .wrap_err("Could not start Prometheus endpoint")?; Ok(()) } @@ -55,6 +62,7 @@ async fn start_endpoint( listen_addr: SocketAddr, handle: PrometheusHandle, hook: Arc, + task_executor: TaskExecutor, ) -> eyre::Result<()> { let make_svc = make_service_fn(move |_| { let handle = handle.clone(); @@ -67,10 +75,20 @@ async fn start_endpoint( })) } }); + let server = Server::try_bind(&listen_addr).wrap_err("Could not bind to address")?.serve(make_svc); - tokio::spawn(async move { server.await.expect("Metrics endpoint crashed") }); + task_executor.spawn_with_graceful_shutdown_signal(move |signal| async move { + if let Err(error) = server + .with_graceful_shutdown(async move { + let _ = signal.await; + }) + .await + { + tracing::error!(%error, "metrics endpoint crashed") + } + }); Ok(()) } @@ -82,6 +100,7 @@ pub async fn serve( db: Metrics, static_file_provider: StaticFileProvider, process: metrics_process::Collector, + task_executor: TaskExecutor, ) -> eyre::Result<()> where Metrics: DatabaseMetrics + 'static + Send + Sync, @@ -102,7 +121,7 @@ where Box::new(collect_memory_stats), Box::new(collect_io_stats), ]; - serve_with_hooks(listen_addr, handle, hooks).await?; + serve_with_hooks(listen_addr, handle, hooks, task_executor).await?; // We describe the metrics after the recorder is installed, otherwise this information is not // registered diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index d6d3a63b98751..929f2f552018b 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -365,6 +365,7 @@ impl NodeConfig { prometheus_handle: PrometheusHandle, db: Metrics, static_file_provider: StaticFileProvider, + task_executor: TaskExecutor, ) -> eyre::Result<()> where Metrics: DatabaseMetrics + 'static + Send + Sync, @@ -377,6 +378,7 @@ impl NodeConfig { db, static_file_provider, metrics_process::Collector::default(), + task_executor, ) .await?; } From e35abf8423dcaae8d82fc2334ee88a7020b9294c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 20 Apr 2024 12:55:36 +0200 Subject: [PATCH 256/700] feat(op): init genesis alloc (#7748) --- bin/reth/src/commands/stage/drop.rs | 5 +-- crates/node-core/src/init.rs | 56 +++++++++++++++-------------- crates/primitives/src/account.rs | 6 ++-- crates/trie/src/proof.rs | 5 ++- 4 files changed, 37 insertions(+), 35 deletions(-) diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index 3e33183cbff34..e79a4c33b4f19 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -133,7 +133,8 @@ impl Command { StageId::Execution.to_string(), Default::default(), )?; - insert_genesis_state::(tx, self.chain.genesis())?; + let alloc = &self.chain.genesis().alloc; + insert_genesis_state::(tx, alloc.len(), alloc.iter())?; } StageEnum::AccountHashing => { tx.clear::()?; @@ -191,7 +192,7 @@ impl Command { StageId::IndexStorageHistory.to_string(), Default::default(), )?; - insert_genesis_history(&provider_rw, &self.chain.genesis)?; + insert_genesis_history(&provider_rw, self.chain.genesis.alloc.iter())?; } StageEnum::TxLookup => { tx.clear::()?; diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 23d91e8d5dbf6..b0a9c9c92080b 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -7,8 +7,8 @@ use reth_db::{ }; use reth_interfaces::{db::DatabaseError, provider::ProviderResult}; use reth_primitives::{ - stage::StageId, Account, Bytecode, ChainSpec, Receipts, StaticFileSegment, StorageEntry, B256, - U256, + stage::StageId, Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, + StaticFileSegment, StorageEntry, B256, U256, }; use reth_provider::{ bundle_state::{BundleStateInit, RevertsInit}, @@ -72,17 +72,19 @@ pub fn init_genesis(factory: ProviderFactory) -> Result(&tx, &static_file_provider, chain.clone())?; - insert_genesis_state::(&tx, genesis)?; + insert_genesis_state::(&tx, alloc.len(), alloc.iter())?; // insert sync stage for stage in StageId::ALL.iter() { @@ -96,16 +98,16 @@ pub fn init_genesis(factory: ProviderFactory) -> Result( +pub fn insert_genesis_state<'a, 'b, DB: Database>( tx: &::TXMut, - genesis: &reth_primitives::Genesis, + capacity: usize, + alloc: impl Iterator, ) -> ProviderResult<()> { - let capacity = genesis.alloc.len(); let mut state_init: BundleStateInit = HashMap::with_capacity(capacity); let mut reverts_init = HashMap::with_capacity(capacity); let mut contracts: HashMap = HashMap::with_capacity(capacity); - for (address, account) in &genesis.alloc { + for (address, account) in alloc { let bytecode_hash = if let Some(code) = &account.code { let bytecode = Bytecode::new_raw(code.clone()); let hash = bytecode.hash_slow(); @@ -163,24 +165,24 @@ pub fn insert_genesis_state( } /// Inserts hashes for the genesis state. -pub fn insert_genesis_hashes( +pub fn insert_genesis_hashes<'a, 'b, DB: Database>( provider: &DatabaseProviderRW, - genesis: &reth_primitives::Genesis, + alloc: impl Iterator + Clone, ) -> ProviderResult<()> { // insert and hash accounts to hashing table - let alloc_accounts = genesis - .alloc - .clone() - .into_iter() - .map(|(addr, account)| (addr, Some(Account::from_genesis_account(account)))); + let alloc_accounts = + alloc.clone().map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); provider.insert_account_for_hashing(alloc_accounts)?; - let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { + let alloc_storage = alloc.filter_map(|(addr, account)| { // only return Some if there is storage - account.storage.map(|storage| { + account.storage.as_ref().map(|storage| { ( - addr, - storage.into_iter().map(|(key, value)| StorageEntry { key, value: value.into() }), + *addr, + storage + .clone() + .into_iter() + .map(|(key, value)| StorageEntry { key, value: value.into() }), ) }) }); @@ -190,17 +192,15 @@ pub fn insert_genesis_hashes( } /// Inserts history indices for genesis accounts and storage. -pub fn insert_genesis_history( +pub fn insert_genesis_history<'a, 'b, DB: Database>( provider: &DatabaseProviderRW, - genesis: &reth_primitives::Genesis, + alloc: impl Iterator + Clone, ) -> ProviderResult<()> { let account_transitions = - genesis.alloc.keys().map(|addr| (*addr, vec![0])).collect::>(); + alloc.clone().map(|(addr, _)| (*addr, vec![0])).collect::>(); provider.insert_account_history_index(account_transitions)?; - let storage_transitions = genesis - .alloc - .iter() + let storage_transitions = alloc .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage))) .flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), vec![0]))) .collect::>(); @@ -235,6 +235,8 @@ pub fn insert_genesis_header( #[cfg(test)] mod tests { + use std::sync::Arc; + use super::*; use reth_db::{ @@ -244,7 +246,7 @@ mod tests { DatabaseEnv, }; use reth_primitives::{ - Address, Chain, ForkTimestamps, Genesis, GenesisAccount, IntegerList, GOERLI, + Address, Chain, ChainSpec, ForkTimestamps, Genesis, GenesisAccount, IntegerList, GOERLI, GOERLI_GENESIS_HASH, MAINNET, MAINNET_GENESIS_HASH, SEPOLIA, SEPOLIA_GENESIS_HASH, }; use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs index 712e5e341c267..d0bc3788e485d 100644 --- a/crates/primitives/src/account.rs +++ b/crates/primitives/src/account.rs @@ -35,13 +35,13 @@ impl Account { self.bytecode_hash.map_or(true, |hash| hash == KECCAK_EMPTY) } - /// Converts [GenesisAccount] to [Account] type - pub fn from_genesis_account(value: GenesisAccount) -> Self { + /// Makes an [Account] from [GenesisAccount] type + pub fn from_genesis_account(value: &GenesisAccount) -> Self { Account { // nonce must exist, so we default to zero when converting a genesis account nonce: value.nonce.unwrap_or_default(), balance: value.balance, - bytecode_hash: value.code.map(keccak256), + bytecode_hash: value.code.as_ref().map(keccak256), } } diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs index 1af1650e5c2f0..55eb47710f038 100644 --- a/crates/trie/src/proof.rs +++ b/crates/trie/src/proof.rs @@ -207,9 +207,8 @@ mod tests { let genesis = chain_spec.genesis(); let alloc_accounts = genesis .alloc - .clone() - .into_iter() - .map(|(addr, account)| (addr, Some(Account::from_genesis_account(account)))); + .iter() + .map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); provider.insert_account_for_hashing(alloc_accounts).unwrap(); let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { From 66d5ecbd47340328d2ebaf3e70b06dea92b2383d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 20 Apr 2024 14:29:00 +0200 Subject: [PATCH 257/700] chore: bump alloy 39b8695 (#7774) --- Cargo.lock | 36 +++++++++++++++++----------------- Cargo.toml | 26 ++++++++++++------------ crates/primitives/src/block.rs | 2 +- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2883c0e0161a..093aa53e10c30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-eips", "alloy-primitives", @@ -178,7 +178,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -196,7 +196,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-primitives", "alloy-serde", @@ -218,7 +218,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-primitives", "serde", @@ -230,7 +230,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-consensus", "alloy-eips", @@ -246,7 +246,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -288,7 +288,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -338,7 +338,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -358,7 +358,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-consensus", "alloy-eips", @@ -380,7 +380,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-primitives", "alloy-serde", @@ -390,7 +390,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-consensus", "alloy-eips", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -420,7 +420,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-primitives", "serde", @@ -430,7 +430,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-primitives", "async-trait", @@ -443,7 +443,7 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-consensus", "alloy-network", @@ -518,7 +518,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -536,7 +536,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=188c4f8#188c4f8f6080d4beaaea653c57261cb3b53a95b3" +source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -7616,7 +7616,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=735f67c#735f67cd450fe952625eb777b86d0e48df3ef28c" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=dc614ee#dc614eec85ee4d4af938865b121fad58ec7dad5f" dependencies = [ "alloy-primitives", "alloy-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index c1b57e008d099..a7ca32ca09f2c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -273,7 +273,7 @@ revm = { version = "8.0.0", features = [ revm-primitives = { version = "3.1.0", features = [ "std", ], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "735f67c" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "dc614ee" } # eth alloy-chains = "0.1.15" @@ -282,20 +282,20 @@ alloy-dyn-abi = "0.7.0" alloy-sol-types = "0.7.0" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "188c4f8" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "188c4f8" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "39b8695" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } # misc auto_impl = "1" diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 2b43e32514d98..06c08db1fb20e 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -657,8 +657,8 @@ impl From for BlockBody { mod tests { use super::{BlockNumberOrTag::*, *}; use crate::hex_literal::hex; + use alloy_eips::eip1898::HexStringMissingPrefixError; use alloy_rlp::{Decodable, Encodable}; - use reth_rpc_types::HexStringMissingPrefixError; use std::str::FromStr; /// Check parsing according to EIP-1898. From 3750edd905f90b3033bd225709b88f9fbd2b17bb Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Sat, 20 Apr 2024 13:58:03 +0100 Subject: [PATCH 258/700] chore(e2e): refactor e2e tests (#7773) --- .github/workflows/integration.yml | 2 +- Cargo.lock | 64 +++++++------ Cargo.toml | 3 +- .../Cargo.toml | 8 +- .../src/engine_api.rs | 43 +++++---- .../src/lib.rs | 3 + .../src/network.rs | 0 .../src/node.rs | 62 ++++++++---- .../src/payload.rs | 51 ++++------ crates/e2e-test-utils/src/traits.rs | 22 +++++ .../src/wallet.rs | 8 +- crates/node-ethereum/Cargo.toml | 9 +- .../tests}/assets/genesis.json | 0 .../it => node-ethereum/tests/e2e}/dev.rs | 0 .../it => node-ethereum/tests/e2e}/eth.rs | 17 ++-- .../it => node-ethereum/tests/e2e}/main.rs | 1 + .../it => node-ethereum/tests/e2e}/p2p.rs | 11 ++- crates/node-ethereum/tests/e2e/utils.rs | 15 +++ crates/optimism/node/Cargo.toml | 3 + .../optimism/node/tests/assets/genesis.json | 96 +++++++++++++++++++ crates/optimism/node/tests/e2e/main.rs | 7 ++ crates/optimism/node/tests/e2e/p2p.rs | 78 +++++++++++++++ crates/optimism/node/tests/e2e/utils.rs | 22 +++++ 23 files changed, 400 insertions(+), 125 deletions(-) rename crates/{node-e2e-tests => e2e-test-utils}/Cargo.toml (84%) rename crates/{node-e2e-tests => e2e-test-utils}/src/engine_api.rs (50%) rename crates/{node-e2e-tests => e2e-test-utils}/src/lib.rs (89%) rename crates/{node-e2e-tests => e2e-test-utils}/src/network.rs (100%) rename crates/{node-e2e-tests => e2e-test-utils}/src/node.rs (67%) rename crates/{node-e2e-tests => e2e-test-utils}/src/payload.rs (56%) create mode 100644 crates/e2e-test-utils/src/traits.rs rename crates/{node-e2e-tests => e2e-test-utils}/src/wallet.rs (88%) rename crates/{node-e2e-tests => node-ethereum/tests}/assets/genesis.json (100%) rename crates/{node-e2e-tests/tests/it => node-ethereum/tests/e2e}/dev.rs (100%) rename crates/{node-e2e-tests/tests/it => node-ethereum/tests/e2e}/eth.rs (89%) rename crates/{node-e2e-tests/tests/it => node-ethereum/tests/e2e}/main.rs (78%) rename crates/{node-e2e-tests/tests/it => node-ethereum/tests/e2e}/p2p.rs (88%) create mode 100644 crates/node-ethereum/tests/e2e/utils.rs create mode 100644 crates/optimism/node/tests/assets/genesis.json create mode 100644 crates/optimism/node/tests/e2e/main.rs create mode 100644 crates/optimism/node/tests/e2e/p2p.rs create mode 100644 crates/optimism/node/tests/e2e/utils.rs diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 7d01b0030c511..6f1e63a7ccf49 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -48,7 +48,7 @@ jobs: run: | cargo nextest run \ --locked --features "asm-keccak ${{ matrix.network }}" \ - --workspace --exclude examples --exclude ef-tests node-e2e-tests \ + --workspace --exclude examples --exclude ef-tests node-ethereum \ -E "kind(test)" sync: diff --git a/Cargo.lock b/Cargo.lock index 093aa53e10c30..90699db28083c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4882,33 +4882,6 @@ dependencies = [ "libc", ] -[[package]] -name = "node-e2e-tests" -version = "0.0.0" -dependencies = [ - "alloy-consensus", - "alloy-network", - "alloy-rpc-types", - "alloy-signer", - "alloy-signer-wallet", - "eyre", - "futures-util", - "jsonrpsee", - "rand 0.8.5", - "reth", - "reth-db", - "reth-node-core", - "reth-node-ethereum", - "reth-payload-builder", - "reth-primitives", - "reth-rpc", - "reth-tracing", - "secp256k1 0.27.0", - "serde_json", - "tokio", - "tokio-stream", -] - [[package]] name = "nom" version = "7.1.3" @@ -6446,6 +6419,33 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-e2e-test-utils" +version = "0.2.0-beta.5" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-rpc-types", + "alloy-signer", + "alloy-signer-wallet", + "eyre", + "futures-util", + "jsonrpsee", + "rand 0.8.5", + "reth", + "reth-db", + "reth-node-core", + "reth-node-ethereum", + "reth-payload-builder", + "reth-primitives", + "reth-rpc", + "reth-tracing", + "secp256k1 0.27.0", + "serde_json", + "tokio", + "tokio-stream", +] + [[package]] name = "reth-ecies" version = "0.2.0-beta.5" @@ -6962,8 +6962,11 @@ version = "0.2.0-beta.5" dependencies = [ "eyre", "futures", + "futures-util", + "reth", "reth-basic-payload-builder", "reth-db", + "reth-e2e-test-utils", "reth-ethereum-engine-primitives", "reth-ethereum-payload-builder", "reth-evm-ethereum", @@ -6971,10 +6974,14 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-builder", + "reth-node-core", "reth-payload-builder", + "reth-primitives", "reth-provider", "reth-tracing", "reth-transaction-pool", + "serde_json", + "tokio", ] [[package]] @@ -7011,8 +7018,10 @@ dependencies = [ "jsonrpsee", "parking_lot 0.12.1", "reqwest 0.11.27", + "reth", "reth-basic-payload-builder", "reth-db", + "reth-e2e-test-utils", "reth-evm", "reth-interfaces", "reth-network", @@ -7033,6 +7042,7 @@ dependencies = [ "serde", "serde_json", "thiserror", + "tokio", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index a7ca32ca09f2c..96e4108293bd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "crates/consensus/beacon-core/", "crates/consensus/common/", "crates/ethereum-forks/", + "crates/e2e-test-utils/", "crates/etl/", "crates/evm/", "crates/ethereum/evm", @@ -51,7 +52,6 @@ members = [ "crates/optimism/node/", "crates/node-core/", "crates/node/api/", - "crates/node-e2e-tests/", "crates/stages/", "crates/stages-api", "crates/static-file/", @@ -212,6 +212,7 @@ reth-db = { path = "crates/storage/db" } reth-discv4 = { path = "crates/net/discv4" } reth-discv5 = { path = "crates/net/discv5" } reth-dns-discovery = { path = "crates/net/dns" } +reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node-builder" } diff --git a/crates/node-e2e-tests/Cargo.toml b/crates/e2e-test-utils/Cargo.toml similarity index 84% rename from crates/node-e2e-tests/Cargo.toml rename to crates/e2e-test-utils/Cargo.toml index d3811f5f5cf1d..f32ff029c2ed9 100644 --- a/crates/node-e2e-tests/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -1,9 +1,11 @@ [package] -name = "node-e2e-tests" -version = "0.0.0" -publish = false +name = "reth-e2e-test-utils" +version.workspace = true edition.workspace = true +rust-version.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [dependencies] diff --git a/crates/node-e2e-tests/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs similarity index 50% rename from crates/node-e2e-tests/src/engine_api.rs rename to crates/e2e-test-utils/src/engine_api.rs index 06c9afa311ca6..ec8b058a30077 100644 --- a/crates/node-e2e-tests/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,56 +1,57 @@ +use crate::traits::PayloadEnvelopeExt; use jsonrpsee::http_client::HttpClient; use reth::{ + api::{EngineTypes, PayloadBuilderAttributes}, providers::CanonStateNotificationStream, - rpc::{ - api::EngineApiClient, - types::engine::{ExecutionPayloadEnvelopeV3, ForkchoiceState}, - }, + rpc::{api::EngineApiClient, types::engine::ForkchoiceState}, }; -use reth_node_ethereum::EthEngineTypes; -use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes, PayloadId}; +use reth_payload_builder::PayloadId; use reth_primitives::B256; +use std::marker::PhantomData; /// Helper for engine api operations -pub struct EngineApiHelper { +pub struct EngineApiHelper { pub canonical_stream: CanonStateNotificationStream, pub engine_api_client: HttpClient, + pub _marker: PhantomData, } -impl EngineApiHelper { +impl EngineApiHelper { /// Retrieves a v3 payload from the engine api pub async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> eyre::Result { - Ok(EngineApiClient::::get_payload_v3(&self.engine_api_client, payload_id) - .await?) + ) -> eyre::Result { + Ok(EngineApiClient::::get_payload_v3(&self.engine_api_client, payload_id).await?) } /// Submits a payload to the engine api pub async fn submit_payload( &self, - payload: EthBuiltPayload, - eth_attr: EthPayloadBuilderAttributes, - ) -> eyre::Result { + payload: E::BuiltPayload, + payload_builder_attributes: E::PayloadBuilderAttributes, + ) -> eyre::Result + where + E::ExecutionPayloadV3: From + PayloadEnvelopeExt, + { // setup payload for submission - let envelope_v3 = ExecutionPayloadEnvelopeV3::from(payload); - let payload_v3 = envelope_v3.execution_payload; + let envelope_v3: ::ExecutionPayloadV3 = payload.into(); // submit payload to engine api - let submission = EngineApiClient::::new_payload_v3( + let submission = EngineApiClient::::new_payload_v3( &self.engine_api_client, - payload_v3, + envelope_v3.execution_payload(), vec![], - eth_attr.parent_beacon_block_root.unwrap(), + payload_builder_attributes.parent_beacon_block_root().unwrap(), ) .await?; - assert!(submission.is_valid()); + assert!(submission.is_valid(), "{}", submission); Ok(submission.latest_valid_hash.unwrap()) } /// Sends forkchoice update to the engine api pub async fn update_forkchoice(&self, hash: B256) -> eyre::Result<()> { - EngineApiClient::::fork_choice_updated_v2( + EngineApiClient::::fork_choice_updated_v2( &self.engine_api_client, ForkchoiceState { head_block_hash: hash, diff --git a/crates/node-e2e-tests/src/lib.rs b/crates/e2e-test-utils/src/lib.rs similarity index 89% rename from crates/node-e2e-tests/src/lib.rs rename to crates/e2e-test-utils/src/lib.rs index 2799c4fe3fc7a..016fb4d3e21b0 100644 --- a/crates/node-e2e-tests/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -12,3 +12,6 @@ mod network; /// Helper for engine api operations mod engine_api; + +/// Helper traits +mod traits; diff --git a/crates/node-e2e-tests/src/network.rs b/crates/e2e-test-utils/src/network.rs similarity index 100% rename from crates/node-e2e-tests/src/network.rs rename to crates/e2e-test-utils/src/network.rs diff --git a/crates/node-e2e-tests/src/node.rs b/crates/e2e-test-utils/src/node.rs similarity index 67% rename from crates/node-e2e-tests/src/node.rs rename to crates/e2e-test-utils/src/node.rs index f4e94b4ae47c9..d88a428f05a10 100644 --- a/crates/node-e2e-tests/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,8 +1,11 @@ -use crate::{engine_api::EngineApiHelper, network::NetworkHelper, payload::PayloadHelper}; +use crate::{ + engine_api::EngineApiHelper, network::NetworkHelper, payload::PayloadHelper, + traits::PayloadEnvelopeExt, +}; use alloy_rpc_types::BlockNumberOrTag; use eyre::Ok; use reth::{ - api::FullNodeComponents, + api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, builder::FullNode, providers::{BlockReaderIdExt, CanonStateSubscriptions}, rpc::{ @@ -10,28 +13,28 @@ use reth::{ types::engine::PayloadAttributes, }, }; - -use reth_node_ethereum::EthEngineTypes; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, Bytes, B256}; - -use std::time::{SystemTime, UNIX_EPOCH}; +use reth_primitives::{Address, BlockNumber, Bytes, B256}; +use std::{ + marker::PhantomData, + time::{SystemTime, UNIX_EPOCH}, +}; use tokio_stream::StreamExt; /// An helper struct to handle node actions pub struct NodeHelper where - Node: FullNodeComponents, + Node: FullNodeComponents, { pub inner: FullNode, payload: PayloadHelper, pub network: NetworkHelper, - pub engine_api: EngineApiHelper, + pub engine_api: EngineApiHelper, } impl NodeHelper where - Node: FullNodeComponents, + Node: FullNodeComponents, { /// Creates a new test node pub async fn new(node: FullNode) -> eyre::Result { @@ -44,17 +47,26 @@ where engine_api: EngineApiHelper { engine_api_client: node.auth_server_handle().http_client(), canonical_stream: node.provider.canonical_state_stream(), + _marker: PhantomData::, }, }) } /// Advances the node forward - pub async fn advance(&mut self, raw_tx: Bytes) -> eyre::Result<(B256, B256)> { + pub async fn advance( + &mut self, + raw_tx: Bytes, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, + ) -> eyre::Result<(B256, B256)> + where + ::ExecutionPayloadV3: + From<::BuiltPayload> + PayloadEnvelopeExt, + { // push tx into pool via RPC server let tx_hash = self.inject_tx(raw_tx).await?; // trigger new payload building draining the pool - let eth_attr = self.payload.new_payload().await.unwrap(); + let eth_attr = self.payload.new_payload(attributes_generator).await.unwrap(); // first event is the payload attributes self.payload.expect_attr_event(eth_attr.clone()).await?; @@ -69,13 +81,14 @@ where let payload = self.payload.expect_built_payload().await?; // submit payload via engine api + let block_number = payload.block().number; let block_hash = self.engine_api.submit_payload(payload, eth_attr.clone()).await?; // trigger forkchoice update via engine api to commit the block to the blockchain self.engine_api.update_forkchoice(block_hash).await?; // assert the block has been committed to the blockchain - self.assert_new_block(tx_hash, block_hash).await?; + self.assert_new_block(tx_hash, block_hash, block_number).await?; Ok((block_hash, tx_hash)) } @@ -91,6 +104,7 @@ where &mut self, tip_tx_hash: B256, block_hash: B256, + block_number: BlockNumber, ) -> eyre::Result<()> { // get head block from notifications stream and verify the tx has been pushed to the // pool is actually present in the canonical block @@ -98,14 +112,20 @@ where let tx = head.tip().transactions().next(); assert_eq!(tx.unwrap().hash().as_slice(), tip_tx_hash.as_slice()); - // wait for the block to commit - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - - // make sure the block hash we submitted via FCU engine api is the new latest block - // using an RPC call - let latest_block = - self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)?.unwrap(); - assert_eq!(latest_block.hash_slow(), block_hash); + loop { + // wait for the block to commit + tokio::time::sleep(std::time::Duration::from_millis(20)).await; + if let Some(latest_block) = + self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)? + { + if latest_block.number == block_number { + // make sure the block hash we submitted via FCU engine api is the new latest + // block using an RPC call + assert_eq!(latest_block.hash_slow(), block_hash); + break + } + } + } Ok(()) } } diff --git a/crates/node-e2e-tests/src/payload.rs b/crates/e2e-test-utils/src/payload.rs similarity index 56% rename from crates/node-e2e-tests/src/payload.rs rename to crates/e2e-test-utils/src/payload.rs index a23f7225f19f4..37138cdd3ebfb 100644 --- a/crates/node-e2e-tests/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,34 +1,31 @@ -use std::time::{SystemTime, UNIX_EPOCH}; - use futures_util::StreamExt; -use reth::{ - api::{EngineTypes, PayloadBuilderAttributes}, - rpc::types::engine::PayloadAttributes, -}; -use reth_node_ethereum::EthEngineTypes; -use reth_payload_builder::{ - EthBuiltPayload, EthPayloadBuilderAttributes, Events, PayloadBuilderHandle, PayloadId, -}; -use reth_primitives::{Address, B256}; +use reth::api::{BuiltPayload, EngineTypes, PayloadBuilderAttributes}; +use reth_payload_builder::{Events, PayloadBuilderHandle, PayloadId}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations pub struct PayloadHelper { pub payload_event_stream: BroadcastStream>, payload_builder: PayloadBuilderHandle, + timestamp: u64, } -impl PayloadHelper { +impl PayloadHelper { /// Creates a new payload helper - pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { + pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; let payload_event_stream = payload_events.into_stream(); - Ok(Self { payload_event_stream, payload_builder }) + // Cancun timestamp + Ok(Self { payload_event_stream, payload_builder, timestamp: 1710338135 }) } /// Creates a new payload job from static attributes - pub async fn new_payload(&self) -> eyre::Result { - let attributes = eth_payload_attributes(); + pub async fn new_payload( + &mut self, + attributes_generator: impl Fn(u64) -> E::PayloadBuilderAttributes, + ) -> eyre::Result { + self.timestamp += 1; + let attributes: E::PayloadBuilderAttributes = attributes_generator(self.timestamp); self.payload_builder.new_payload(attributes.clone()).await.unwrap(); Ok(attributes) } @@ -36,11 +33,11 @@ impl PayloadHelper { /// Asserts that the next event is a payload attributes event pub async fn expect_attr_event( &mut self, - attrs: EthPayloadBuilderAttributes, + attrs: E::PayloadBuilderAttributes, ) -> eyre::Result<()> { let first_event = self.payload_event_stream.next().await.unwrap()?; if let reth::payload::Events::Attributes(attr) = first_event { - assert_eq!(attrs.timestamp, attr.timestamp()); + assert_eq!(attrs.timestamp(), attr.timestamp()); } else { panic!("Expect first event as payload attributes.") } @@ -52,7 +49,7 @@ impl PayloadHelper { loop { let payload = self.payload_builder.best_payload(payload_id).await.unwrap().unwrap(); if payload.block().body.is_empty() { - tokio::time::sleep(std::time::Duration::from_secs(1)).await; + tokio::time::sleep(std::time::Duration::from_millis(20)).await; continue; } break; @@ -60,7 +57,7 @@ impl PayloadHelper { } /// Expects the next event to be a built payload event or panics - pub async fn expect_built_payload(&mut self) -> eyre::Result { + pub async fn expect_built_payload(&mut self) -> eyre::Result { let second_event = self.payload_event_stream.next().await.unwrap()?; if let reth::payload::Events::BuiltPayload(payload) = second_event { Ok(payload) @@ -69,17 +66,3 @@ impl PayloadHelper { } } } - -/// Helper function to create a new eth payload attributes -fn eth_payload_attributes() -> EthPayloadBuilderAttributes { - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); - - let attributes = PayloadAttributes { - timestamp, - prev_randao: B256::ZERO, - suggested_fee_recipient: Address::ZERO, - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - }; - EthPayloadBuilderAttributes::new(B256::ZERO, attributes) -} diff --git a/crates/e2e-test-utils/src/traits.rs b/crates/e2e-test-utils/src/traits.rs new file mode 100644 index 0000000000000..2d8b4789da393 --- /dev/null +++ b/crates/e2e-test-utils/src/traits.rs @@ -0,0 +1,22 @@ +use reth::rpc::types::{ + engine::{ExecutionPayloadEnvelopeV3, OptimismExecutionPayloadEnvelopeV3}, + ExecutionPayloadV3, +}; + +/// The execution payload envelope type. +pub trait PayloadEnvelopeExt: Send + Sync + std::fmt::Debug { + /// Returns the execution payload V3 from the payload + fn execution_payload(&self) -> ExecutionPayloadV3; +} + +impl PayloadEnvelopeExt for OptimismExecutionPayloadEnvelopeV3 { + fn execution_payload(&self) -> ExecutionPayloadV3 { + self.execution_payload.clone() + } +} + +impl PayloadEnvelopeExt for ExecutionPayloadEnvelopeV3 { + fn execution_payload(&self) -> ExecutionPayloadV3 { + self.execution_payload.clone() + } +} diff --git a/crates/node-e2e-tests/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs similarity index 88% rename from crates/node-e2e-tests/src/wallet.rs rename to crates/e2e-test-utils/src/wallet.rs index 2351d0a0b1a2b..0428c86753217 100644 --- a/crates/node-e2e-tests/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -5,19 +5,20 @@ use reth_primitives::{Address, Bytes, U256}; /// One of the accounts of the genesis allocations. pub struct Wallet { inner: LocalWallet, + nonce: u64, } impl Wallet { /// Creates a new account from one of the secret/pubkeys of the genesis allocations (test.json) pub(crate) fn new(phrase: &str) -> Self { let inner = MnemonicBuilder::::default().phrase(phrase).build().unwrap(); - Self { inner } + Self { inner, nonce: 0 } } /// Creates a static transfer and signs it - pub async fn transfer_tx(&self) -> Bytes { + pub async fn transfer_tx(&mut self) -> Bytes { let tx = TransactionRequest { - nonce: Some(0), + nonce: Some(self.nonce), value: Some(U256::from(100)), to: Some(Address::random()), gas_price: Some(20e9 as u128), @@ -25,6 +26,7 @@ impl Wallet { chain_id: Some(1), ..Default::default() }; + self.nonce += 1; let signer = EthereumSigner::from(self.inner.clone()); tx.build(&signer).await.unwrap().encoded_2718().into() } diff --git a/crates/node-ethereum/Cargo.toml b/crates/node-ethereum/Cargo.toml index 4380e57377be1..072f91b28fd67 100644 --- a/crates/node-ethereum/Cargo.toml +++ b/crates/node-ethereum/Cargo.toml @@ -27,8 +27,15 @@ reth-evm-ethereum.workspace = true eyre.workspace = true [dev-dependencies] +reth.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-api.workspace = true - +reth-node-core.workspace = true +reth-primitives.workspace = true +reth-e2e-test-utils.workspace = true futures.workspace = true +tokio.workspace = true +futures-util.workspace = true +serde_json.workspace = true + diff --git a/crates/node-e2e-tests/assets/genesis.json b/crates/node-ethereum/tests/assets/genesis.json similarity index 100% rename from crates/node-e2e-tests/assets/genesis.json rename to crates/node-ethereum/tests/assets/genesis.json diff --git a/crates/node-e2e-tests/tests/it/dev.rs b/crates/node-ethereum/tests/e2e/dev.rs similarity index 100% rename from crates/node-e2e-tests/tests/it/dev.rs rename to crates/node-ethereum/tests/e2e/dev.rs diff --git a/crates/node-e2e-tests/tests/it/eth.rs b/crates/node-ethereum/tests/e2e/eth.rs similarity index 89% rename from crates/node-e2e-tests/tests/it/eth.rs rename to crates/node-ethereum/tests/e2e/eth.rs index a2c761221dfcc..6f9eeb999108a 100644 --- a/crates/node-e2e-tests/tests/it/eth.rs +++ b/crates/node-ethereum/tests/e2e/eth.rs @@ -1,9 +1,10 @@ -use node_e2e_tests::{node::NodeHelper, wallet::Wallet}; +use crate::utils::eth_payload_attributes; use reth::{ args::RpcServerArgs, builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; +use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; use reth_node_ethereum::EthereumNode; use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; use std::sync::Arc; @@ -16,7 +17,7 @@ async fn can_run_eth_node() -> eyre::Result<()> { let exec = exec.executor(); // Chain spec with test allocs - let genesis: Genesis = serde_json::from_str(include_str!("../../assets/genesis.json")).unwrap(); + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) @@ -39,11 +40,11 @@ async fn can_run_eth_node() -> eyre::Result<()> { let mut node = NodeHelper::new(node).await?; // Configure wallet from test mnemonic and create dummy transfer tx - let wallet = Wallet::default(); + let mut wallet = Wallet::default(); let raw_tx = wallet.transfer_tx().await; // make the node advance - node.advance(raw_tx).await?; + node.advance(raw_tx, eth_payload_attributes).await?; Ok(()) } @@ -56,7 +57,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { let exec = exec.executor(); // Chain spec with test allocs - let genesis: Genesis = serde_json::from_str(include_str!("../../assets/genesis.json")).unwrap(); + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) @@ -78,11 +79,11 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { let mut node = NodeHelper::new(node).await?; // Configure wallet from test mnemonic and create dummy transfer tx - let wallet = Wallet::default(); + let mut wallet = Wallet::default(); let raw_tx = wallet.transfer_tx().await; // make the node advance - node.advance(raw_tx).await?; + node.advance(raw_tx, crate::utils::eth_payload_attributes).await?; Ok(()) } @@ -95,7 +96,7 @@ async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyr let exec = exec.executor(); // Chain spec with test allocs - let genesis: Genesis = serde_json::from_str(include_str!("../../assets/genesis.json")).unwrap(); + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) diff --git a/crates/node-e2e-tests/tests/it/main.rs b/crates/node-ethereum/tests/e2e/main.rs similarity index 78% rename from crates/node-e2e-tests/tests/it/main.rs rename to crates/node-ethereum/tests/e2e/main.rs index ba13034645c2c..6a8a010649666 100644 --- a/crates/node-e2e-tests/tests/it/main.rs +++ b/crates/node-ethereum/tests/e2e/main.rs @@ -1,5 +1,6 @@ mod dev; mod eth; mod p2p; +mod utils; fn main() {} diff --git a/crates/node-e2e-tests/tests/it/p2p.rs b/crates/node-ethereum/tests/e2e/p2p.rs similarity index 88% rename from crates/node-e2e-tests/tests/it/p2p.rs rename to crates/node-ethereum/tests/e2e/p2p.rs index d0a2716f59658..940096e189b83 100644 --- a/crates/node-e2e-tests/tests/it/p2p.rs +++ b/crates/node-ethereum/tests/e2e/p2p.rs @@ -1,11 +1,12 @@ use std::sync::Arc; -use node_e2e_tests::{node::NodeHelper, wallet::Wallet}; +use crate::utils::eth_payload_attributes; use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; +use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; use reth_node_ethereum::EthereumNode; use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; @@ -16,7 +17,7 @@ async fn can_sync() -> eyre::Result<()> { let tasks = TaskManager::current(); let exec = tasks.executor(); - let genesis: Genesis = serde_json::from_str(include_str!("../../assets/genesis.json")).unwrap(); + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) @@ -52,7 +53,7 @@ async fn can_sync() -> eyre::Result<()> { let mut second_node = NodeHelper::new(node).await?; - let wallet = Wallet::default(); + let mut wallet = Wallet::default(); let raw_tx = wallet.transfer_tx().await; // Make them peer @@ -64,13 +65,13 @@ async fn can_sync() -> eyre::Result<()> { second_node.network.expect_session().await; // Make the first node advance - let (block_hash, tx_hash) = first_node.advance(raw_tx.clone()).await?; + let (block_hash, tx_hash) = first_node.advance(raw_tx.clone(), eth_payload_attributes).await?; // only send forkchoice update to second node second_node.engine_api.update_forkchoice(block_hash).await?; // expect second node advanced via p2p gossip - second_node.assert_new_block(tx_hash, block_hash).await?; + second_node.assert_new_block(tx_hash, block_hash, 1).await?; Ok(()) } diff --git a/crates/node-ethereum/tests/e2e/utils.rs b/crates/node-ethereum/tests/e2e/utils.rs new file mode 100644 index 0000000000000..52526c45f3092 --- /dev/null +++ b/crates/node-ethereum/tests/e2e/utils.rs @@ -0,0 +1,15 @@ +use reth::rpc::types::engine::PayloadAttributes; +use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_primitives::{Address, B256}; + +/// Helper function to create a new eth payload attributes +pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttributes { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + EthPayloadBuilderAttributes::new(B256::ZERO, attributes) +} diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index c6d2ece405e24..afa23a6c64e25 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -52,8 +52,11 @@ thiserror.workspace = true jsonrpsee.workspace = true [dev-dependencies] +reth.workspace = true reth-db.workspace = true reth-revm = { workspace = true, features = ["test-utils"]} +reth-e2e-test-utils.workspace = true +tokio.workspace = true [features] optimism = [ diff --git a/crates/optimism/node/tests/assets/genesis.json b/crates/optimism/node/tests/assets/genesis.json new file mode 100644 index 0000000000000..2bdfec4309f55 --- /dev/null +++ b/crates/optimism/node/tests/assets/genesis.json @@ -0,0 +1,96 @@ +{ + "config": { + "chainId": 1, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "shanghaiTime": 0, + "cancunTime": 0, + "terminalTotalDifficulty": "0x0", + "terminalTotalDifficultyPassed": true + }, + "nonce": "0x0", + "timestamp": "0x0", + "extraData": "0x00", + "gasLimit": "0x1c9c380", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "0x14dc79964da2c08b23698b3d3cc7ca32193d9955": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x15d34aaf54267db7d7c367839aaf71a00a2c6a65": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1cbd3b2770909d4e10f157cabc84c7264073c9ec": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x2546bcd3c84621e976d8185a91a922ae77ecec30": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x70997970c51812dc3a010c7d01b50e0d17dc79c8": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x71be63f3384f5fb98995898a86b02fb2426c5788": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x90f79bf6eb2c4f870365e785982e1f101e93b906": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x976ea74026e726554db657fa54763abd0c3a0aa9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9c41de96b2088cdc640c6182dfcf5491dc574a57": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xa0ee7a142d267c1f36714e4a8f75612f20a79720": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbcd4042de499d14e55001ccbb24a551f3b954096": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbda5747bfd65f08deb54cb465eb87d40e51b197e": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xcd3b766ccdd6ae721141f452c550ca635964ce71": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdd2fd4581271e230360230f9337d5c0430bf44c0": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xfabb0ac9d68b0b445fb7357272ff202c5651694a": { + "balance": "0xd3c21bcecceda1000000" + } + }, + "number": "0x0" +} diff --git a/crates/optimism/node/tests/e2e/main.rs b/crates/optimism/node/tests/e2e/main.rs new file mode 100644 index 0000000000000..221f8483dd7d0 --- /dev/null +++ b/crates/optimism/node/tests/e2e/main.rs @@ -0,0 +1,7 @@ +#[cfg(feature = "optimism")] +mod p2p; + +#[cfg(feature = "optimism")] +mod utils; + +fn main() {} diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs new file mode 100644 index 0000000000000..5bf36ace982ac --- /dev/null +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -0,0 +1,78 @@ +use std::sync::Arc; + +use crate::utils::optimism_payload_attributes; +use reth::{ + args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, + builder::{NodeBuilder, NodeConfig, NodeHandle}, + tasks::TaskManager, +}; +use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; +use reth_node_optimism::node::OptimismNode; +use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; + +#[tokio::test] +async fn can_sync() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_network(network_config) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(OptimismNode::default()) + .launch() + .await?; + + let mut first_node = NodeHelper::new(node.clone()).await?; + + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(exec) + .node(OptimismNode::default()) + .launch() + .await?; + + let mut second_node = NodeHelper::new(node).await?; + + let mut wallet = Wallet::default(); + let raw_tx = wallet.transfer_tx().await; + + // Make them peer + first_node.network.add_peer(second_node.network.record()).await; + second_node.network.add_peer(first_node.network.record()).await; + + // Make sure they establish a new session + first_node.network.expect_session().await; + second_node.network.expect_session().await; + + // Make the first node advance + let (block_hash, tx_hash) = + first_node.advance(raw_tx.clone(), optimism_payload_attributes).await?; + + // only send forkchoice update to second node + second_node.engine_api.update_forkchoice(block_hash).await?; + + // expect second node advanced via p2p gossip + second_node.assert_new_block(tx_hash, block_hash, 1).await?; + + Ok(()) +} diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs new file mode 100644 index 0000000000000..1f655502e67b1 --- /dev/null +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -0,0 +1,22 @@ +use reth::rpc::types::engine::PayloadAttributes; +use reth_node_optimism::OptimismPayloadBuilderAttributes; +use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_primitives::{Address, B256}; + +/// Helper function to create a new eth payload attributes +pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuilderAttributes { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + OptimismPayloadBuilderAttributes { + payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), + transactions: vec![], + no_tx_pool: false, + gas_limit: Some(30_000_000), + } +} From d81cf8aa5c6ca6e5bb2bf5bc44741790deba9e7b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 20 Apr 2024 22:39:55 +0200 Subject: [PATCH 259/700] fix(discv5): fix bug flip byte lookup tgt (#7764) Co-authored-by: DaniPopes <57450786+DaniPopes@users.noreply.github.com> --- crates/net/discv5/src/lib.rs | 73 ++++++++++++++---------------------- 1 file changed, 29 insertions(+), 44 deletions(-) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 8a50528717855..218d4299dc837 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -22,6 +22,7 @@ use discv5::ListenConfig; use enr::{discv4_id_to_discv5_id, EnrCombinedKeyWrapper}; use futures::future::join_all; use itertools::Itertools; +use rand::{Rng, RngCore}; use reth_primitives::{bytes::Bytes, ForkId, NodeRecord, PeerId}; use secp256k1::SecretKey; use tokio::{sync::mpsc, task}; @@ -387,10 +388,10 @@ impl Discv5 { None } discv5::Event::SessionEstablished(enr, remote_socket) => { - // covers `reth_discv4::DiscoveryUpdate` equivalents `DiscoveryUpdate::Added(_)` + // covers `reth_discv4::DiscoveryUpdate` equivalents `DiscoveryUpdate::Added(_)` // and `DiscoveryUpdate::DiscoveredAtCapacity(_) - // peer has been discovered as part of query, or, by incoming session (peer has + // peer has been discovered as part of query, or, by incoming session (peer has // discovered us) self.metrics.discovered_peers_advertised_networks.increment_once_by_network_type(&enr); @@ -541,19 +542,24 @@ pub fn get_lookup_target( let mut target = local_node_id.raw(); // make sure target has a 'log2distance'-long suffix that differs from local node id - if kbucket_index != 0 { - let suffix_bit_offset = MAX_KBUCKET_INDEX.saturating_sub(kbucket_index); - let suffix_byte_offset = suffix_bit_offset / 8; - // todo: flip the precise bit - // let rel_suffix_bit_offset = suffix_bit_offset % 8; - target[suffix_byte_offset] = !target[suffix_byte_offset]; - - if suffix_byte_offset != 31 { - for b in target.iter_mut().take(31).skip(suffix_byte_offset + 1) { - *b = rand::random::(); - } - } + let bit_offset = MAX_KBUCKET_INDEX.saturating_sub(kbucket_index); + let (byte, bit) = (bit_offset / 8, bit_offset % 8); + // Flip the target bit. + target[byte] ^= 1 << (7 - bit); + + // Randomize the bits after the target. + let mut rng = rand::thread_rng(); + // Randomize remaining bits in the byte we modified. + if bit < 7 { + // Compute the mask of the bits that need to be randomized. + let bits_to_randomize = 0xff >> (bit + 1); + // Clear. + target[byte] &= !bits_to_randomize; + // Randomize. + target[byte] |= rng.gen::() & bits_to_randomize; } + // Randomize remaining bytes. + rng.fill_bytes(&mut target[byte + 1..]); target.into() } @@ -595,13 +601,11 @@ pub async fn lookup( #[cfg(test)] mod tests { + use super::*; use ::enr::{CombinedKey, EnrKey}; - use rand::Rng; use secp256k1::rand::thread_rng; use tracing::trace; - use super::*; - fn discv5_noop() -> Discv5 { let sk = CombinedKey::generate_secp256k1(); Discv5 { @@ -786,11 +790,7 @@ mod tests { pub fn log2_distance(&self, other: &Key) -> Option { let xor_dist = self.distance(other); let log_dist = (256 - xor_dist.0.leading_zeros() as u64); - if log_dist == 0 { - None - } else { - Some(log_dist) - } + (log_dist != 0).then_some(log_dist) } } @@ -807,30 +807,15 @@ mod tests { #[test] fn select_lookup_target() { - // bucket index ceiled to the next multiple of 4 - const fn expected_bucket_index(kbucket_index: usize) -> u64 { - let log2distance = kbucket_index + 1; - let log2distance = log2distance / 8; - ((log2distance + 1) * 8) as u64 - } + for bucket_index in 0..=MAX_KBUCKET_INDEX { + let sk = CombinedKey::generate_secp256k1(); + let local_node_id = discv5::enr::NodeId::from(sk.public()); + let target = get_lookup_target(bucket_index, local_node_id); - let bucket_index = rand::thread_rng().gen_range(0..=MAX_KBUCKET_INDEX); + let local_node_id = sigp::Key::from(local_node_id); + let target = sigp::Key::from(target); - let sk = CombinedKey::generate_secp256k1(); - let local_node_id = discv5::enr::NodeId::from(sk.public()); - let target = get_lookup_target(bucket_index, local_node_id); - - let local_node_id = sigp::Key::from(local_node_id); - let target = sigp::Key::from(target); - - if bucket_index == 0 { - // log2distance undef (inf) - assert!(local_node_id.log2_distance(&target).is_none()) - } else { - assert_eq!( - expected_bucket_index(bucket_index), - local_node_id.log2_distance(&target).unwrap() - ); + assert_eq!(local_node_id.log2_distance(&target), Some(bucket_index as u64 + 1)); } } } From 223dde200fb8432ae3eae92a7e4606fff0d533dd Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sun, 21 Apr 2024 11:20:55 +0200 Subject: [PATCH 260/700] fix(tree): disable cached trie updates for chains with >1 block (#7753) Co-authored-by: Matthias Seitz --- crates/blockchain-tree/src/blockchain_tree.rs | 4 +-- crates/blockchain-tree/src/chain.rs | 4 +-- crates/storage/provider/src/chain.rs | 26 ++++--------------- 3 files changed, 9 insertions(+), 25 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index eb699ff1af632..02bae76bb7b7c 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1715,7 +1715,7 @@ mod tests { ); let block2_chain_id = tree.state.block_indices.get_blocks_chain_id(&block2.hash()).unwrap(); let block2_chain = tree.state.chains.get(&block2_chain_id).unwrap(); - assert!(block2_chain.trie_updates().is_some()); + assert!(block2_chain.trie_updates().is_none()); assert_eq!( tree.make_canonical(block2.hash()).unwrap(), @@ -1750,7 +1750,7 @@ mod tests { let block5_chain_id = tree.state.block_indices.get_blocks_chain_id(&block5.hash()).unwrap(); let block5_chain = tree.state.chains.get(&block5_chain_id).unwrap(); - assert!(block5_chain.trie_updates().is_some()); + assert!(block5_chain.trie_updates().is_none()); assert_eq!( tree.make_canonical(block5.hash()).unwrap(), diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index abefae581790b..2444cf24a901b 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -282,7 +282,7 @@ impl AppendableChain { canonical_fork, }; - let (block_state, trie_updates) = Self::validate_and_execute( + let (block_state, _) = Self::validate_and_execute( block.clone(), parent_block, bundle_state_data, @@ -291,7 +291,7 @@ impl AppendableChain { block_validation_kind, )?; // extend the state. - self.chain.append_block(block, block_state, trie_updates); + self.chain.append_block(block, block_state); Ok(()) } diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 4114ba0970068..5acd845997483 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -26,7 +26,8 @@ pub struct Chain { /// This state also contains the individual changes that lead to the current state. state: BundleStateWithReceipts, /// State trie updates after block is added to the chain. - /// NOTE: Currently, trie updates are present only if the block extends canonical chain. + /// NOTE: Currently, trie updates are present only for + /// single-block chains that extend the canonical chain. trie_updates: Option, } @@ -210,15 +211,10 @@ impl Chain { /// Append a single block with state to the chain. /// This method assumes that blocks attachment to the chain has already been validated. - pub fn append_block( - &mut self, - block: SealedBlockWithSenders, - state: BundleStateWithReceipts, - trie_updates: Option, - ) { + pub fn append_block(&mut self, block: SealedBlockWithSenders, state: BundleStateWithReceipts) { self.blocks.insert(block.number, block); self.state.extend(state); - self.append_trie_updates(trie_updates); + self.trie_updates.take(); // reset } /// Merge two chains by appending the given chain into the current one. @@ -238,23 +234,11 @@ impl Chain { // Insert blocks from other chain self.blocks.extend(other.blocks); self.state.extend(other.state); - self.append_trie_updates(other.trie_updates); + self.trie_updates.take(); // reset Ok(()) } - /// Append trie updates. - /// If existing or incoming trie updates are not set, reset as neither is valid anymore. - fn append_trie_updates(&mut self, other_trie_updates: Option) { - if let Some((trie_updates, other)) = self.trie_updates.as_mut().zip(other_trie_updates) { - // Extend trie updates. - trie_updates.extend(other); - } else { - // Reset trie updates as they are no longer valid. - self.trie_updates.take(); - } - } - /// Split this chain at the given block. /// /// The given block will be the last block in the first returned chain. From 4f10d377f7b1976c823fa16089bf097779b41ca2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 21 Apr 2024 12:13:26 +0200 Subject: [PATCH 261/700] chore(deps): weekly `cargo update` (#7778) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 90699db28083c..425b4306d06cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8890,18 +8890,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", From 02314927f2a852425b898d3ad4b45103ebefca2b Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Sun, 21 Apr 2024 11:14:30 +0100 Subject: [PATCH 262/700] chore: enable optimism e2e tests (#7776) --- .github/workflows/integration.yml | 4 +- Cargo.lock | 1 + crates/e2e-test-utils/src/wallet.rs | 21 +- crates/optimism/node/Cargo.toml | 1 + .../optimism/node/tests/assets/genesis.json | 192 +++++++++--------- crates/optimism/node/tests/e2e/p2p.rs | 14 +- 6 files changed, 126 insertions(+), 107 deletions(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 6f1e63a7ccf49..319896154b196 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -47,9 +47,7 @@ jobs: name: Run tests run: | cargo nextest run \ - --locked --features "asm-keccak ${{ matrix.network }}" \ - --workspace --exclude examples --exclude ef-tests node-ethereum \ - -E "kind(test)" + --locked -p reth-node-optimism --features "optimism" sync: name: sync / 100k blocks diff --git a/Cargo.lock b/Cargo.lock index 425b4306d06cc..6be45296b3048 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7009,6 +7009,7 @@ dependencies = [ name = "reth-node-optimism" version = "0.2.0-beta.5" dependencies = [ + "alloy-primitives", "async-trait", "clap", "eyre", diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index 0428c86753217..43fe7555dc7e6 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -1,29 +1,42 @@ use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; -use alloy_rpc_types::TransactionRequest; +use alloy_rpc_types::{TransactionInput, TransactionRequest}; use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; use reth_primitives::{Address, Bytes, U256}; /// One of the accounts of the genesis allocations. pub struct Wallet { inner: LocalWallet, nonce: u64, + chain_id: u64, } impl Wallet { /// Creates a new account from one of the secret/pubkeys of the genesis allocations (test.json) pub(crate) fn new(phrase: &str) -> Self { let inner = MnemonicBuilder::::default().phrase(phrase).build().unwrap(); - Self { inner, nonce: 0 } + Self { inner, chain_id: 1, nonce: 0 } + } + + /// Sets chain id + pub fn with_chain_id(mut self, chain_id: u64) -> Self { + self.chain_id = chain_id; + self } /// Creates a static transfer and signs it pub async fn transfer_tx(&mut self) -> Bytes { + self.tx(None).await + } + + /// Creates a transaction with data and signs it + pub async fn tx(&mut self, data: Option) -> Bytes { let tx = TransactionRequest { nonce: Some(self.nonce), value: Some(U256::from(100)), to: Some(Address::random()), gas_price: Some(20e9 as u128), - gas: Some(21000), - chain_id: Some(1), + gas: Some(210000), + chain_id: Some(self.chain_id), + input: TransactionInput { input: None, data }, ..Default::default() }; self.nonce += 1; diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index afa23a6c64e25..d66aabd5d6a05 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -57,6 +57,7 @@ reth-db.workspace = true reth-revm = { workspace = true, features = ["test-utils"]} reth-e2e-test-utils.workspace = true tokio.workspace = true +alloy-primitives.workspace = true [features] optimism = [ diff --git a/crates/optimism/node/tests/assets/genesis.json b/crates/optimism/node/tests/assets/genesis.json index 2bdfec4309f55..691eefa0469f9 100644 --- a/crates/optimism/node/tests/assets/genesis.json +++ b/crates/optimism/node/tests/assets/genesis.json @@ -1,96 +1,100 @@ { - "config": { - "chainId": 1, - "homesteadBlock": 0, - "daoForkSupport": true, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "muirGlacierBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "arrowGlacierBlock": 0, - "grayGlacierBlock": 0, - "shanghaiTime": 0, - "cancunTime": 0, - "terminalTotalDifficulty": "0x0", - "terminalTotalDifficultyPassed": true - }, - "nonce": "0x0", - "timestamp": "0x0", - "extraData": "0x00", - "gasLimit": "0x1c9c380", - "difficulty": "0x0", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "coinbase": "0x0000000000000000000000000000000000000000", - "alloc": { - "0x14dc79964da2c08b23698b3d3cc7ca32193d9955": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x15d34aaf54267db7d7c367839aaf71a00a2c6a65": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x1cbd3b2770909d4e10f157cabc84c7264073c9ec": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x2546bcd3c84621e976d8185a91a922ae77ecec30": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x70997970c51812dc3a010c7d01b50e0d17dc79c8": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x71be63f3384f5fb98995898a86b02fb2426c5788": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x90f79bf6eb2c4f870365e785982e1f101e93b906": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x976ea74026e726554db657fa54763abd0c3a0aa9": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x9c41de96b2088cdc640c6182dfcf5491dc574a57": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xa0ee7a142d267c1f36714e4a8f75612f20a79720": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xbcd4042de499d14e55001ccbb24a551f3b954096": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xbda5747bfd65f08deb54cb465eb87d40e51b197e": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xcd3b766ccdd6ae721141f452c550ca635964ce71": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xdd2fd4581271e230360230f9337d5c0430bf44c0": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xfabb0ac9d68b0b445fb7357272ff202c5651694a": { - "balance": "0xd3c21bcecceda1000000" - } - }, - "number": "0x0" + "config": { + "chainId": 8453, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "mergeNetsplitBlock": 0, + "bedrockBlock": 0, + "regolithTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "optimism": { + "eip1559Elasticity": 6, + "eip1559Denominator": 50 + } +}, +"nonce": "0x0", +"timestamp": "0x0", +"extraData": "0x00", +"gasLimit": "0x1c9c380", +"difficulty": "0x0", +"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", +"coinbase": "0x0000000000000000000000000000000000000000", +"alloc": { + "0x14dc79964da2c08b23698b3d3cc7ca32193d9955": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x15d34aaf54267db7d7c367839aaf71a00a2c6a65": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1cbd3b2770909d4e10f157cabc84c7264073c9ec": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x2546bcd3c84621e976d8185a91a922ae77ecec30": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x70997970c51812dc3a010c7d01b50e0d17dc79c8": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x71be63f3384f5fb98995898a86b02fb2426c5788": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x90f79bf6eb2c4f870365e785982e1f101e93b906": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x976ea74026e726554db657fa54763abd0c3a0aa9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9c41de96b2088cdc640c6182dfcf5491dc574a57": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xa0ee7a142d267c1f36714e4a8f75612f20a79720": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbcd4042de499d14e55001ccbb24a551f3b954096": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbda5747bfd65f08deb54cb465eb87d40e51b197e": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xcd3b766ccdd6ae721141f452c550ca635964ce71": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdd2fd4581271e230360230f9337d5c0430bf44c0": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xfabb0ac9d68b0b445fb7357272ff202c5651694a": { + "balance": "0xd3c21bcecceda1000000" + } + }, + "number": "0x0" } diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 5bf36ace982ac..5fe4daa7bfd35 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -8,7 +8,7 @@ use reth::{ }; use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; use reth_node_optimism::node::OptimismNode; -use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; +use reth_primitives::{hex, Bytes, ChainSpecBuilder, Genesis, BASE_MAINNET}; #[tokio::test] async fn can_sync() -> eyre::Result<()> { @@ -20,11 +20,12 @@ async fn can_sync() -> eyre::Result<()> { let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); let chain_spec = Arc::new( ChainSpecBuilder::default() - .chain(MAINNET.chain) + .chain(BASE_MAINNET.chain) .genesis(genesis) - .cancun_activated() + .ecotone_activated() .build(), ); + let mut wallet = Wallet::default().with_chain_id(chain_spec.chain.into()); let network_config = NetworkArgs { discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, @@ -53,9 +54,6 @@ async fn can_sync() -> eyre::Result<()> { let mut second_node = NodeHelper::new(node).await?; - let mut wallet = Wallet::default(); - let raw_tx = wallet.transfer_tx().await; - // Make them peer first_node.network.add_peer(second_node.network.record()).await; second_node.network.add_peer(first_node.network.record()).await; @@ -64,7 +62,11 @@ async fn can_sync() -> eyre::Result<()> { first_node.network.expect_session().await; second_node.network.expect_session().await; + // Taken from optimism tests + let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); + // Make the first node advance + let raw_tx = wallet.tx(Some(l1_block_info)).await; let (block_hash, tx_hash) = first_node.advance(raw_tx.clone(), optimism_payload_attributes).await?; From 612cfcbc9c81c632bc2e3d132d12d015e6f1f796 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 22 Apr 2024 11:46:12 +0200 Subject: [PATCH 263/700] Implement `try_from` rpc tx for `TransactionSignedEcRecovered` (#7752) --- crates/primitives/src/transaction/mod.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 9b07ce0097e5f..775950eea6020 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1822,6 +1822,25 @@ impl IntoRecoveredTransaction for TransactionSignedEcRecovered { } } +impl TryFrom for TransactionSignedEcRecovered { + type Error = ConversionError; + + fn try_from(tx: reth_rpc_types::Transaction) -> Result { + let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; + + TransactionSigned::from_transaction_and_signature( + tx.try_into()?, + Signature { + r: signature.r, + s: signature.s, + odd_y_parity: signature.y_parity.ok_or(ConversionError::MissingYParity)?.0, + }, + ) + .try_into_ecrecovered() + .map_err(|_| ConversionError::InvalidSignature) + } +} + #[cfg(test)] mod tests { use crate::{ From c0926ba10e6fcbb31ae1a603308b8e7956eb5453 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 22 Apr 2024 11:58:19 +0200 Subject: [PATCH 264/700] feat: enable OP hardforks in DEV chainspec (#7787) --- crates/optimism/node/src/txpool.rs | 12 +++--------- crates/primitives/src/chain/spec.rs | 6 ++++++ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 417b65f628dfa..73097ce27d064 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -56,7 +56,6 @@ where // so that we will accept txs into the pool before the first block if block.number == 0 { this.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); - *this.block_info.l1_block_info.write() = Some(Default::default()) } else { this.update_l1_block_info(&block); } @@ -77,7 +76,7 @@ where fn update_l1_block_info(&self, block: &Block) { self.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); if let Ok(cost_addition) = reth_revm::optimism::extract_l1_info(block) { - *self.block_info.l1_block_info.write() = Some(cost_addition); + *self.block_info.l1_block_info.write() = cost_addition; } } @@ -109,12 +108,7 @@ where propagate, } = outcome { - let Some(l1_block_info) = self.block_info.l1_block_info.read().clone() else { - return TransactionValidationOutcome::Error( - *valid_tx.hash(), - "L1BlockInfoError".into(), - ) - }; + let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::new(); valid_tx.transaction().to_recovered_transaction().encode_enveloped(&mut encoded); @@ -199,7 +193,7 @@ where #[derive(Debug, Default)] pub struct OpL1BlockInfo { /// The current L1 block info. - l1_block_info: RwLock>, + l1_block_info: RwLock, /// Current block timestamp. timestamp: AtomicU64, } diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 356f871906268..6b2a6a4dc9104 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -243,6 +243,12 @@ pub static DEV: Lazy> = Lazy::new(|| { ), (Hardfork::Shanghai, ForkCondition::Timestamp(0)), (Hardfork::Cancun, ForkCondition::Timestamp(0)), + #[cfg(feature = "optimism")] + (Hardfork::Regolith, ForkCondition::Timestamp(0)), + #[cfg(feature = "optimism")] + (Hardfork::Bedrock, ForkCondition::Block(0)), + #[cfg(feature = "optimism")] + (Hardfork::Ecotone, ForkCondition::Timestamp(0)), ]), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), deposit_contract: None, // TODO: do we even have? From ce2f1602a18970e91868f5b5ad59238d01505510 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 22 Apr 2024 12:00:46 +0200 Subject: [PATCH 265/700] bench: fix `reth-stages` criterion benchmarks (#7786) --- crates/stages/Cargo.toml | 2 +- crates/stages/benches/criterion.rs | 77 +++++++++++++++++++----------- crates/stages/benches/setup/mod.rs | 16 ++++--- 3 files changed, 61 insertions(+), 34 deletions(-) diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 080b7792d5dcf..df98d1dd7a5f8 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -59,7 +59,7 @@ paste.workspace = true tempfile.workspace = true # Stage benchmarks -criterion = { workspace = true, features = ["async_futures"] } +criterion = { workspace = true, features = ["async_tokio"] } # io serde_json.workspace = true diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index 13f7d5386a3b3..98b97462b9a61 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -1,8 +1,5 @@ #![allow(missing_docs)] -use criterion::{ - async_executor::FuturesExecutor, criterion_group, criterion_main, measurement::WallTime, - BenchmarkGroup, Criterion, -}; +use criterion::{criterion_main, measurement::WallTime, BenchmarkGroup, Criterion}; #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; use reth_config::config::EtlConfig; @@ -15,29 +12,37 @@ use reth_stages::{ }; use reth_stages_api::{ExecInput, Stage, StageExt, UnwindInput}; use std::{ops::RangeInclusive, sync::Arc}; +use tokio::runtime::Runtime; mod setup; use setup::StageRange; -#[cfg(not(target_os = "windows"))] -criterion_group! { - name = benches; - config = Criterion::default().with_profiler(PProfProfiler::new(1000, Output::Flamegraph(None))); - targets = transaction_lookup, account_hashing, senders, merkle -} - -#[cfg(target_os = "windows")] -criterion_group! { - name = benches; - config = Criterion::default(); - targets = transaction_lookup, account_hashing, senders, merkle +// Expanded form of `criterion_group!` +// +// This is currently needed to only instantiate the tokio runtime once. +fn benches() { + #[cfg(not(target_os = "windows"))] + let mut criterion = Criterion::default() + .with_profiler(PProfProfiler::new(1000, Output::Flamegraph(None))) + .configure_from_args(); + + let runtime = Runtime::new().unwrap(); + let _guard = runtime.enter(); + + #[cfg(target_os = "windows")] + let mut criterion = Criterion::default().configure_from_args(); + + transaction_lookup(&mut criterion, &runtime); + account_hashing(&mut criterion, &runtime); + senders(&mut criterion, &runtime); + merkle(&mut criterion, &runtime); } criterion_main!(benches); const DEFAULT_NUM_BLOCKS: u64 = 10_000; -fn account_hashing(c: &mut Criterion) { +fn account_hashing(c: &mut Criterion, runtime: &Runtime) { let mut group = c.benchmark_group("Stages"); // don't need to run each stage for that many times @@ -46,25 +51,39 @@ fn account_hashing(c: &mut Criterion) { let num_blocks = 10_000; let (db, stage, range) = setup::prepare_account_hashing(num_blocks); - measure_stage(&mut group, &db, setup::stage_unwind, stage, range, "AccountHashing".to_string()); + measure_stage( + runtime, + &mut group, + &db, + setup::stage_unwind, + stage, + range, + "AccountHashing".to_string(), + ); } -fn senders(c: &mut Criterion) { +fn senders(c: &mut Criterion, runtime: &Runtime) { let mut group = c.benchmark_group("Stages"); + // don't need to run each stage for that many times group.sample_size(10); let db = setup::txs_testdata(DEFAULT_NUM_BLOCKS); - for batch in [1000usize, 10_000, 100_000, 250_000] { - let stage = SenderRecoveryStage { commit_threshold: DEFAULT_NUM_BLOCKS }; - let label = format!("SendersRecovery-batch-{batch}"); + let stage = SenderRecoveryStage { commit_threshold: DEFAULT_NUM_BLOCKS }; - measure_stage(&mut group, &db, setup::stage_unwind, stage, 0..=DEFAULT_NUM_BLOCKS, label); - } + measure_stage( + runtime, + &mut group, + &db, + setup::stage_unwind, + stage, + 0..=DEFAULT_NUM_BLOCKS, + "SendersRecovery".to_string(), + ); } -fn transaction_lookup(c: &mut Criterion) { +fn transaction_lookup(c: &mut Criterion, runtime: &Runtime) { let mut group = c.benchmark_group("Stages"); // don't need to run each stage for that many times group.sample_size(10); @@ -73,6 +92,7 @@ fn transaction_lookup(c: &mut Criterion) { let db = setup::txs_testdata(DEFAULT_NUM_BLOCKS); measure_stage( + runtime, &mut group, &db, setup::stage_unwind, @@ -82,7 +102,7 @@ fn transaction_lookup(c: &mut Criterion) { ); } -fn merkle(c: &mut Criterion) { +fn merkle(c: &mut Criterion, runtime: &Runtime) { let mut group = c.benchmark_group("Stages"); // don't need to run each stage for that many times group.sample_size(10); @@ -91,6 +111,7 @@ fn merkle(c: &mut Criterion) { let stage = MerkleStage::Both { clean_threshold: u64::MAX }; measure_stage( + runtime, &mut group, &db, setup::unwind_hashes, @@ -101,6 +122,7 @@ fn merkle(c: &mut Criterion) { let stage = MerkleStage::Both { clean_threshold: 0 }; measure_stage( + runtime, &mut group, &db, setup::unwind_hashes, @@ -111,6 +133,7 @@ fn merkle(c: &mut Criterion) { } fn measure_stage( + runtime: &Runtime, group: &mut BenchmarkGroup<'_, WallTime>, db: &TestStageDB, setup: F, @@ -135,7 +158,7 @@ fn measure_stage( let (input, _) = stage_range; group.bench_function(label, move |b| { - b.to_async(FuturesExecutor).iter_with_setup( + b.to_async(runtime).iter_with_setup( || { // criterion setup does not support async, so we have to use our own runtime setup(stage.clone(), db, stage_range) diff --git a/crates/stages/benches/setup/mod.rs b/crates/stages/benches/setup/mod.rs index e94fb81b3bb9a..2151f26c80714 100644 --- a/crates/stages/benches/setup/mod.rs +++ b/crates/stages/benches/setup/mod.rs @@ -21,6 +21,7 @@ use reth_stages::{ }; use reth_trie::StateRoot; use std::{collections::BTreeMap, path::Path, sync::Arc}; +use tokio::runtime::Handle; mod constants; @@ -37,12 +38,14 @@ pub(crate) fn stage_unwind>>>( ) { let (_, unwind) = range; - tokio::runtime::Runtime::new().unwrap().block_on(async { - let mut stage = stage.clone(); - let provider = db.factory.provider_rw().unwrap(); + // NOTE(onbjerg): This is unfortunately needed because Criterion does not support async setup + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + let mut stage = stage.clone(); + let provider = db.factory.provider_rw().unwrap(); - // Clear previous run - stage + // Clear previous run + stage .unwind(&provider, unwind) .map_err(|e| { format!( @@ -52,7 +55,8 @@ pub(crate) fn stage_unwind>>>( }) .unwrap(); - provider.commit().unwrap(); + provider.commit().unwrap(); + }) }); } From fa9f23db980de9573a72a8ed5c24955eec5d6799 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 22 Apr 2024 12:04:25 +0200 Subject: [PATCH 266/700] feat(op): signature encoding (#7769) Co-authored-by: Roman Krasiuk --- crates/primitives/src/transaction/mod.rs | 14 ++++++++++--- .../primitives/src/transaction/signature.rs | 20 +++++++++++++++++-- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 775950eea6020..31cb277f00886 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -997,8 +997,9 @@ impl TransactionSignedNoHash { } // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature Note: this is very hacky and only - // relevant for op-mainnet pre bedrock + // transactions with an empty signature + // + // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock if self.is_legacy() && self.signature == Signature::optimism_deposit_tx_signature() { return Some(Address::ZERO) } @@ -2213,7 +2214,14 @@ mod tests { ); let encoded = &alloy_rlp::encode(signed_tx); - assert_eq!(hex!("c98080808080801b8080"), encoded[..]); + assert_eq!( + if cfg!(feature = "optimism") { + hex!("c9808080808080808080") + } else { + hex!("c98080808080801b8080") + }, + &encoded[..] + ); assert_eq!(MIN_LENGTH_LEGACY_TX_ENCODED, encoded.len()); TransactionSigned::decode(&mut &encoded[..]).unwrap(); diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 200bf6989abe4..84ae2915f621e 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -82,6 +82,14 @@ impl Signature { // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 self.odd_y_parity as u64 + chain_id * 2 + 35 } else { + #[cfg(feature = "optimism")] + // pre bedrock system transactions were sent from the zero address as legacy + // transactions with an empty signature + // + // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock + if *self == Self::optimism_deposit_tx_signature() { + return 0 + } self.odd_y_parity as u64 + 27 } } @@ -92,12 +100,20 @@ impl Signature { buf: &mut &[u8], ) -> alloy_rlp::Result<(Self, Option)> { let v = u64::decode(buf)?; - let r = Decodable::decode(buf)?; - let s = Decodable::decode(buf)?; + let r: U256 = Decodable::decode(buf)?; + let s: U256 = Decodable::decode(buf)?; if v < 35 { // non-EIP-155 legacy scheme, v = 27 for even y-parity, v = 28 for odd y-parity if v != 27 && v != 28 { + #[cfg(feature = "optimism")] + // pre bedrock system transactions were sent from the zero address as legacy + // transactions with an empty signature + // + // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock + if v == 0 && r.is_zero() && s.is_zero() { + return Ok((Signature { r, s, odd_y_parity: false }, None)) + } return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) } let odd_y_parity = v == 28; From adcc0643ba42f0df7067db8a34ad096beeccbbe6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 22 Apr 2024 13:28:13 +0200 Subject: [PATCH 267/700] chore: relax some bounds (#7789) --- crates/stages/src/sets.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index a92988af9a283..833f2af8eef03 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -254,14 +254,14 @@ where /// - [`HistoryIndexingStages`] #[derive(Debug, Default)] #[non_exhaustive] -pub struct OfflineStages { +pub struct OfflineStages { /// Executor factory needs for execution stage pub executor_factory: EF, /// ETL configuration etl_config: EtlConfig, } -impl OfflineStages { +impl OfflineStages { /// Create a new set of offline stages with default values. pub fn new(executor_factory: EF, etl_config: EtlConfig) -> Self { Self { executor_factory, etl_config } @@ -280,12 +280,12 @@ impl StageSet for OfflineStages { /// A set containing all stages that are required to execute pre-existing block data. #[derive(Debug)] #[non_exhaustive] -pub struct ExecutionStages { +pub struct ExecutionStages { /// Executor factory that will create executors. executor_factory: EF, } -impl ExecutionStages { +impl ExecutionStages { /// Create a new set of execution stages with default values. pub fn new(executor_factory: EF) -> Self { Self { executor_factory } From cab4420b41806810160d4b6a56e4514923800d8b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 22 Apr 2024 13:56:11 +0200 Subject: [PATCH 268/700] chore(deps): move http crate deps to workspace (#7791) --- Cargo.toml | 2 ++ crates/node-core/Cargo.toml | 2 +- crates/optimism/node/Cargo.toml | 4 ++-- crates/rpc/rpc/Cargo.toml | 4 ++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 96e4108293bd8..cea64d99411b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -349,6 +349,8 @@ futures-util = "0.3.25" hyper = "0.14.25" tower = "0.4" tower-http = "0.4" +http = "0.2.8" +http-body = "0.4.5" # p2p discv5 = { git = "https://github.com/sigp/discv5", rev = "04ac004" } diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index cdf42cee247dc..690258a55037c 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -88,7 +88,7 @@ procfs = "0.16.0" proptest.workspace = true tempfile.workspace = true jsonrpsee.workspace = true -assert_matches = "1.5.0" +assert_matches.workspace = true [features] optimism = [ diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index d66aabd5d6a05..8f10c00d74545 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -35,8 +35,8 @@ revm-primitives.workspace = true # async async-trait.workspace = true hyper.workspace = true -http = "0.2.8" -http-body = "0.4.5" +http.workspace = true +http-body.workspace = true reqwest = { version = "0.11", default-features = false, features = [ "rustls-tls", ]} diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index f62eeaf93ebf9..357309de7b5ba 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -42,8 +42,8 @@ revm-primitives = { workspace = true, features = ["serde"] } # rpc jsonrpsee.workspace = true -http = "0.2.8" -http-body = "0.4.5" +http.workspace = true +http-body.workspace = true hyper.workspace = true jsonwebtoken = "8" From cf789dce7e23580b69f51ce639020dc2b31c4c26 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 22 Apr 2024 14:05:42 +0200 Subject: [PATCH 269/700] chore: use workspace deps in nippy jar (#7792) --- crates/storage/nippy-jar/Cargo.toml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/crates/storage/nippy-jar/Cargo.toml b/crates/storage/nippy-jar/Cargo.toml index 1fed32e27e497..d979759b4873f 100644 --- a/crates/storage/nippy-jar/Cargo.toml +++ b/crates/storage/nippy-jar/Cargo.toml @@ -20,7 +20,10 @@ reth-primitives.workspace = true # filter ph = "0.8.0" -cuckoofilter = { version = "0.5.0", features = ["serde_support", "serde_bytes"] } +cuckoofilter = { version = "0.5.0", features = [ + "serde_support", + "serde_bytes", +] } # compression zstd = { version = "0.13", features = ["experimental", "zdict_builder"] } @@ -31,14 +34,14 @@ sucds = "~0.8" memmap2 = "0.7.1" bincode = "1.3" -serde = { version = "1.0", features = ["derive"] } -tracing = "0.1.0" +serde = { workspace = true, features = ["derive"] } +tracing.workspace = true anyhow = "1.0" thiserror.workspace = true derive_more.workspace = true [dev-dependencies] -rand = { version = "0.8", features = ["small_rng"] } +rand = { workspace = true, features = ["small_rng"] } tempfile.workspace = true From b0cb2d0df63a56bb8e42076b59235b7147050ab4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 22 Apr 2024 14:07:21 +0200 Subject: [PATCH 270/700] chore: remove consensus setup from node-core (#7793) --- Cargo.lock | 1 - crates/node-builder/src/builder.rs | 24 ++++++++++++++++++---- crates/node-core/Cargo.toml | 2 -- crates/node-core/src/init.rs | 7 ++----- crates/node-core/src/node_config.rs | 31 ++--------------------------- 5 files changed, 24 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6be45296b3048..473ec4583242c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6920,7 +6920,6 @@ dependencies = [ "procfs", "proptest", "rand 0.8.5", - "reth-auto-seal-consensus", "reth-beacon-consensus", "reth-config", "reth-consensus-common", diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index c9178708e3051..49be32b33bf15 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -13,9 +13,10 @@ use crate::{ use eyre::Context; use futures::{future, future::Either, stream, stream_select, Future, StreamExt}; use rayon::ThreadPoolBuilder; +use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, - BeaconConsensusEngine, + BeaconConsensus, BeaconConsensusEngine, }; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, @@ -28,7 +29,7 @@ use reth_db::{ DatabaseEnv, }; use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; -use reth_interfaces::p2p::either::EitherDownloader; +use reth_interfaces::{consensus::Consensus, p2p::either::EitherDownloader}; use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; use reth_node_api::{ FullNodeComponents, FullNodeComponentsAdapter, FullNodeTypes, FullNodeTypesAdapter, NodeTypes, @@ -524,7 +525,12 @@ where info!(target: "reth::cli", "\n{}", config.chain.display_hardforks()); - let consensus = config.consensus(); + // setup the consensus instance + let consensus: Arc = if config.dev.dev { + Arc::new(AutoSealConsensus::new(Arc::clone(&config.chain))) + } else { + Arc::new(BeaconConsensus::new(Arc::clone(&config.chain))) + }; debug!(target: "reth::cli", "Spawning stages metrics listener task"); let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); @@ -721,7 +727,17 @@ where info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); } - let mining_mode = config.mining_mode(transaction_pool.pending_transactions_listener()); + // install auto-seal + let pending_transactions_listener = transaction_pool.pending_transactions_listener(); + + let mining_mode = if let Some(interval) = config.dev.block_time { + MiningMode::interval(interval) + } else if let Some(max_transactions) = config.dev.block_max_transactions { + MiningMode::instant(max_transactions, pending_transactions_listener) + } else { + info!(target: "reth::cli", "No mining mode specified, defaulting to ReadyTransaction"); + MiningMode::instant(1, pending_transactions_listener) + }; let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( Arc::clone(&config.chain), diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 690258a55037c..d6df37f09c331 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -33,7 +33,6 @@ reth-evm.workspace = true reth-engine-primitives.workspace = true reth-tasks.workspace = true reth-consensus-common.workspace = true -reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true # ethereum @@ -98,7 +97,6 @@ optimism = [ "reth-rpc-engine-api/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-auto-seal-consensus/optimism", "reth-consensus-common/optimism", "reth-beacon-consensus/optimism", ] diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index b0a9c9c92080b..7f529c2b0b40a 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -235,10 +235,7 @@ pub fn insert_genesis_header( #[cfg(test)] mod tests { - use std::sync::Arc; - use super::*; - use reth_db::{ cursor::DbCursorRO, models::{storage_sharded_key::StorageShardedKey, ShardedKey}, @@ -246,8 +243,8 @@ mod tests { DatabaseEnv, }; use reth_primitives::{ - Address, Chain, ChainSpec, ForkTimestamps, Genesis, GenesisAccount, IntegerList, GOERLI, - GOERLI_GENESIS_HASH, MAINNET, MAINNET_GENESIS_HASH, SEPOLIA, SEPOLIA_GENESIS_HASH, + Chain, ForkTimestamps, Genesis, IntegerList, GOERLI, GOERLI_GENESIS_HASH, MAINNET, + MAINNET_GENESIS_HASH, SEPOLIA, SEPOLIA_GENESIS_HASH, }; use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 929f2f552018b..2b186b19c3efc 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -12,15 +12,13 @@ use crate::{ use discv5::ListenConfig; use metrics_exporter_prometheus::PrometheusHandle; use once_cell::sync::Lazy; -use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; -use reth_beacon_consensus::BeaconConsensus; use reth_config::{config::PruneConfig, Config}; use reth_db::{database::Database, database_metrics::DatabaseMetrics}; -use reth_interfaces::{consensus::Consensus, p2p::headers::client::HeadersClient, RethResult}; +use reth_interfaces::{p2p::headers::client::HeadersClient, RethResult}; use reth_network::{NetworkBuilder, NetworkConfig, NetworkManager}; use reth_primitives::{ constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, kzg::KzgSettings, stage::StageId, - BlockHashOrNumber, BlockNumber, ChainSpec, Head, SealedHeader, TxHash, B256, MAINNET, + BlockHashOrNumber, BlockNumber, ChainSpec, Head, SealedHeader, B256, MAINNET, }; use reth_provider::{ providers::StaticFileProvider, BlockHashReader, BlockNumReader, HeaderProvider, @@ -29,7 +27,6 @@ use reth_provider::{ use reth_tasks::TaskExecutor; use secp256k1::SecretKey; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; -use tokio::sync::mpsc::Receiver; use tracing::*; /// The default prometheus recorder handle. We use a global static to ensure that it is only @@ -291,18 +288,6 @@ impl NodeConfig { Ok(max_block) } - /// Get the [MiningMode] from the given dev args - pub fn mining_mode(&self, pending_transactions_listener: Receiver) -> MiningMode { - if let Some(interval) = self.dev.block_time { - MiningMode::interval(interval) - } else if let Some(max_transactions) = self.dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) - } else { - info!(target: "reth::cli", "No mining mode specified, defaulting to ReadyTransaction"); - MiningMode::instant(1, pending_transactions_listener) - } - } - /// Create the [NetworkConfig] for the node pub fn network_config( &self, @@ -337,18 +322,6 @@ impl NodeConfig { Ok(builder) } - /// Returns the [Consensus] instance to use. - /// - /// By default this will be a [BeaconConsensus] instance, but if the `--dev` flag is set, it - /// will be an [AutoSealConsensus] instance. - pub fn consensus(&self) -> Arc { - if self.dev.dev { - Arc::new(AutoSealConsensus::new(Arc::clone(&self.chain))) - } else { - Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))) - } - } - /// Loads 'MAINNET_KZG_TRUSTED_SETUP' pub fn kzg_settings(&self) -> eyre::Result> { Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) From 8382529f7a1794e0516fbe7fe623295bb4e34557 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 22 Apr 2024 14:09:10 +0200 Subject: [PATCH 271/700] chore: pull async-stream into workspace deps (#7794) --- Cargo.toml | 1 + crates/net/eth-wire-types/Cargo.toml | 15 ++++++++++++--- crates/net/eth-wire/Cargo.toml | 15 ++++++++++++--- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cea64d99411b0..1e016ab87f1a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -342,6 +342,7 @@ tokio = { version = "1.21", default-features = false } tokio-util = { version = "0.7.4", features = ["codec"] } # async +async-stream = "0.3" async-trait = "0.1.68" futures = "0.3.26" pin-project = "1.0.12" diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 7e9365d481e7a..d68fbbd1f0c46 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -35,15 +35,24 @@ reth-tracing.workspace = true test-fuzz.workspace = true tokio-util = { workspace = true, features = ["io", "codec"] } rand.workspace = true -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", +] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-derive.workspace = true -async-stream = "0.3" +async-stream.workspace = true [features] default = ["serde"] serde = ["dep:serde"] -arbitrary = ["reth-primitives/arbitrary", "dep:arbitrary", "dep:proptest", "dep:proptest-derive"] +arbitrary = [ + "reth-primitives/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-derive", +] optimism = ["reth-primitives/optimism"] diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index f5b157e7362dc..cddc84cf9d77c 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -49,17 +49,26 @@ reth-tracing.workspace = true test-fuzz.workspace = true tokio-util = { workspace = true, features = ["io", "codec"] } rand.workspace = true -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", +] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-derive.workspace = true -async-stream = "0.3" +async-stream.workspace = true [features] default = ["serde"] serde = ["dep:serde"] -arbitrary = ["reth-primitives/arbitrary", "dep:arbitrary", "dep:proptest", "dep:proptest-derive"] +arbitrary = [ + "reth-primitives/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-derive", +] optimism = ["reth-primitives/optimism"] [[test]] From 31801c7bc58b7c211b92da5e0f66a92ccd0921d2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 22 Apr 2024 14:12:31 +0200 Subject: [PATCH 272/700] chore: flatten beacon-consensus test deps (#7796) --- Cargo.lock | 3 ++- crates/consensus/beacon/Cargo.toml | 3 ++- crates/consensus/beacon/src/engine/test_utils.rs | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 473ec4583242c..d4913d7a9c97a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6162,9 +6162,10 @@ dependencies = [ "reth-db", "reth-downloaders", "reth-engine-primitives", + "reth-ethereum-engine-primitives", + "reth-evm-ethereum", "reth-interfaces", "reth-metrics", - "reth-node-ethereum", "reth-payload-builder", "reth-payload-validator", "reth-primitives", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index f195c98bd9508..439002ec5fa3b 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -54,7 +54,8 @@ reth-rpc-types-compat.workspace = true reth-tracing.workspace = true reth-revm.workspace = true reth-downloaders.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-config.workspace = true assert_matches.workspace = true diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 42f85282c64f7..329ea644666fc 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -8,6 +8,8 @@ use reth_blockchain_tree::{ }; use reth_config::config::EtlConfig; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_evm_ethereum::EthEvmConfig; type DatabaseEnv = TempDatabase; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -20,7 +22,6 @@ use reth_interfaces::{ sync::NoopSyncStateUpdater, test_utils::{NoopFullBlockClient, TestConsensus}, }; -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{BlockNumber, ChainSpec, FinishedExExHeight, PruneModes, B256}; use reth_provider::{ From 250da4e449d1872625fa6d8b0a943ceb0f6b8379 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 22 Apr 2024 14:17:59 +0200 Subject: [PATCH 273/700] chore: ask for debug logs in bug template (#7795) --- .github/ISSUE_TEMPLATE/bug.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 5f355177b6eaa..bfb81f1b7e5fb 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -36,7 +36,14 @@ body: id: logs attributes: label: Node logs - description: If applicable, please provide the node logs leading up to the bug. + description: | + If applicable, please provide the node logs leading up to the bug. + + **Please also provide debug logs.** By default, these can be found in: + + - `~/.cache/reth/logs` on Linux + - `~/Library/Caches/reth/logs` on macOS + - `%localAppData%/reth/logs` on Windows render: text validations: required: false From 24a82024811dc468ef2050b0a4fc97b0efcf2a4a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 22 Apr 2024 15:34:31 +0200 Subject: [PATCH 274/700] fix(cli): read exact chunk len (#7777) --- crates/net/downloaders/src/file_client.rs | 94 ++++++++++++++++------- 1 file changed, 66 insertions(+), 28 deletions(-) diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index ce830383f0d37..7d29cc577fe14 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -16,9 +16,11 @@ use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; use tokio_util::codec::FramedRead; -use tracing::{trace, warn}; +use tracing::{debug, trace, warn}; -/// Byte length of chunk to read from chain file. +/// Default byte length of chunk to read from chain file. +/// +/// Default is 1 GB. pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000; /// Front-end API for fetching chain data from a file. @@ -70,7 +72,7 @@ impl FileClient { let file_len = metadata.len(); let mut reader = vec![]; - file.read_to_end(&mut reader).await.unwrap(); + file.read_to_end(&mut reader).await?; Ok(Self::from_reader(&reader[..], file_len).await?.0) } @@ -87,9 +89,15 @@ impl FileClient { let mut hash_to_number = HashMap::new(); let mut bodies = HashMap::new(); - // use with_capacity to make sure the internal buffer contains the entire file + // use with_capacity to make sure the internal buffer contains the entire chunk let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); + trace!(target: "downloaders::file", + target_num_bytes=num_bytes, + capacity=stream.read_buffer().capacity(), + "init decode stream" + ); + let mut remaining_bytes = vec![]; let mut log_interval = 0; @@ -98,7 +106,12 @@ impl FileClient { while let Some(block_res) = stream.next().await { let block = match block_res { Ok(block) => block, - Err(FileClientError::Rlp(_err, bytes)) => { + Err(FileClientError::Rlp(err, bytes)) => { + trace!(target: "downloaders::file", + %err, + bytes_len=bytes.len(), + "partial block returned from decoding chunk" + ); remaining_bytes = bytes; break } @@ -135,7 +148,7 @@ impl FileClient { log_interval += 1; } - trace!(blocks = headers.len(), "Initialized file client"); + trace!(target: "downloaders::file", blocks = headers.len(), "Initialized file client"); Ok((Self { headers, hash_to_number, bodies }, remaining_bytes)) } @@ -204,6 +217,16 @@ impl FileClient { } self } + + /// Returns the current number of headers in the client. + pub fn headers_len(&self) -> usize { + self.headers.len() + } + + /// Returns the current number of bodies in the client. + pub fn bodies_len(&self) -> usize { + self.bodies.len() + } } impl HeadersClient for FileClient { @@ -297,8 +320,8 @@ impl DownloadClient for FileClient { pub struct ChunkedFileReader { /// File to read from. file: File, - /// Current file length. - file_len: u64, + /// Current file byte length. + file_byte_len: u64, /// Bytes that have been read. chunk: Vec, /// Max bytes per chunk. @@ -322,20 +345,20 @@ impl ChunkedFileReader { pub async fn from_file(file: File, chunk_byte_len: u64) -> Result { // get file len from metadata before reading let metadata = file.metadata().await?; - let file_len = metadata.len(); + let file_byte_len = metadata.len(); - Ok(Self { file, file_len, chunk: vec![], chunk_byte_len }) + Ok(Self { file, file_byte_len, chunk: vec![], chunk_byte_len }) } /// Calculates the number of bytes to read from the chain file. Returns a tuple of the chunk /// length and the remaining file length. fn chunk_len(&self) -> u64 { - let Self { chunk_byte_len, file_len, .. } = *self; - let file_len = file_len + self.chunk.len() as u64; + let Self { chunk_byte_len, file_byte_len, .. } = *self; + let file_byte_len = file_byte_len + self.chunk.len() as u64; - if chunk_byte_len > file_len { + if chunk_byte_len > file_byte_len { // last chunk - file_len + file_byte_len } else { chunk_byte_len } @@ -343,37 +366,52 @@ impl ChunkedFileReader { /// Read next chunk from file. Returns [`FileClient`] containing decoded chunk. pub async fn next_chunk(&mut self) -> Result, FileClientError> { - if self.file_len == 0 && self.chunk.is_empty() { + if self.file_byte_len == 0 && self.chunk.is_empty() { // eof return Ok(None) } - let chunk_len = self.chunk_len(); + let chunk_target_len = self.chunk_len(); let old_bytes_len = self.chunk.len() as u64; // calculate reserved space in chunk - let new_bytes_len = chunk_len - old_bytes_len; + let new_read_bytes_target_len = chunk_target_len - old_bytes_len; // read new bytes from file - let mut reader = BytesMut::with_capacity(new_bytes_len as usize); - self.file.read_buf(&mut reader).await?; + let mut reader = BytesMut::zeroed(new_read_bytes_target_len as usize); + // actual bytes that have been read + let new_read_bytes_len = self.file.read_exact(&mut reader).await? as u64; // update remaining file length - self.file_len -= new_bytes_len; + self.file_byte_len -= new_read_bytes_len; - trace!(target: "downloaders::file", - max_chunk_byte_len=self.chunk_byte_len, - prev_read_bytes_len=self.chunk.len(), - new_bytes_len, - remaining_file_byte_len=self.file_len, - "new bytes were read from file" - ); + let prev_read_bytes_len = self.chunk.len(); // read new bytes from file into chunk self.chunk.extend_from_slice(&reader[..]); + let next_chunk_byte_len = self.chunk.len(); + + debug!(target: "downloaders::file", + max_chunk_byte_len=self.chunk_byte_len, + prev_read_bytes_len, + new_read_bytes_target_len, + new_read_bytes_len, + reader_capacity=reader.capacity(), + next_chunk_byte_len, + remaining_file_byte_len=self.file_byte_len, + "new bytes were read from file" + ); // make new file client from chunk - let (file_client, bytes) = FileClient::from_reader(&self.chunk[..], chunk_len).await?; + let (file_client, bytes) = + FileClient::from_reader(&self.chunk[..], next_chunk_byte_len as u64).await?; + + debug!(target: "downloaders::file", + headers_len=file_client.headers.len(), + bodies_len=file_client.bodies.len(), + remaining_bytes_len=bytes.len(), + "parsed blocks that were read from file" + ); // save left over bytes self.chunk = bytes; From 9a1d6ea9ca7d6522b7610b65871eb562ff080e67 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Mon, 22 Apr 2024 11:46:56 -0400 Subject: [PATCH 275/700] feat: base fee param fetching at a specific block (#7783) --- crates/consensus/auto-seal/src/lib.rs | 7 ++- .../ethereum/engine-primitives/src/payload.rs | 5 ++- crates/payload/optimism/src/payload.rs | 2 +- crates/primitives/src/chain/spec.rs | 44 +++++++++++++++---- crates/primitives/src/header.rs | 25 ++++++----- crates/rpc/rpc/src/eth/api/fee_history.rs | 2 +- crates/rpc/rpc/src/eth/api/fees.rs | 2 +- crates/rpc/rpc/src/eth/api/mod.rs | 5 ++- crates/transaction-pool/src/maintain.rs | 10 +++-- 9 files changed, 69 insertions(+), 33 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 59efc0d48a5e9..62a2936645a28 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -277,10 +277,9 @@ impl StorageInner { let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); // check previous block for base fee - let base_fee_per_gas = self - .headers - .get(&self.best_block) - .and_then(|parent| parent.next_block_base_fee(chain_spec.base_fee_params(timestamp))); + let base_fee_per_gas = self.headers.get(&self.best_block).and_then(|parent| { + parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) + }); let mut header = Header { parent_hash: self.best_hash, diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 6e8c820a7e4d2..a6c47ebde9105 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -245,7 +245,8 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { }) .map(BlobExcessGasAndPrice::new); - let mut basefee = parent.next_block_base_fee(chain_spec.base_fee_params(self.timestamp())); + let mut basefee = + parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(self.timestamp())); let mut gas_limit = U256::from(parent.gas_limit); @@ -253,7 +254,7 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { // elasticity multiplier to get the new gas limit. if chain_spec.fork(Hardfork::London).transitions_at_block(parent.number + 1) { let elasticity_multiplier = - chain_spec.base_fee_params(self.timestamp()).elasticity_multiplier; + chain_spec.base_fee_params_at_timestamp(self.timestamp()).elasticity_multiplier; // multiply the gas limit by the elasticity multiplier gas_limit *= U256::from(elasticity_multiplier); diff --git a/crates/payload/optimism/src/payload.rs b/crates/payload/optimism/src/payload.rs index 0e4b0c82615a5..d753370fd2b73 100644 --- a/crates/payload/optimism/src/payload.rs +++ b/crates/payload/optimism/src/payload.rs @@ -142,7 +142,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { // calculate basefee based on parent block's gas usage basefee: U256::from( parent - .next_block_base_fee(chain_spec.base_fee_params(self.timestamp())) + .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(self.timestamp())) .unwrap_or_default(), ), // calculate excess gas based on parent block's blob gas usage diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 6b2a6a4dc9104..ee732a9bcbb43 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -661,10 +661,10 @@ impl ChainSpec { } /// Get the [BaseFeeParams] for the chain at the given timestamp. - pub fn base_fee_params(&self, timestamp: u64) -> BaseFeeParams { + pub fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams { match self.base_fee_params { BaseFeeParamsKind::Constant(bf_params) => bf_params, - BaseFeeParamsKind::Variable(ForkBaseFeeParams { 0: ref bf_params }) => { + BaseFeeParamsKind::Variable(ForkBaseFeeParams(ref bf_params)) => { // Walk through the base fee params configuration in reverse order, and return the // first one that corresponds to a hardfork that is active at the // given timestamp. @@ -679,6 +679,25 @@ impl ChainSpec { } } + /// Get the [BaseFeeParams] for the chain at the given block number + pub fn base_fee_params_at_block(&self, block_number: u64) -> BaseFeeParams { + match self.base_fee_params { + BaseFeeParamsKind::Constant(bf_params) => bf_params, + BaseFeeParamsKind::Variable(ForkBaseFeeParams(ref bf_params)) => { + // Walk through the base fee params configuration in reverse order, and return the + // first one that corresponds to a hardfork that is active at the + // given timestamp. + for (fork, params) in bf_params.iter().rev() { + if self.is_fork_active_at_block(*fork, block_number) { + return *params + } + } + + bf_params.first().map(|(_, params)| *params).unwrap_or(BaseFeeParams::ethereum()) + } + } + } + /// Get the hash of the genesis block. pub fn genesis_hash(&self) -> B256 { self.genesis_hash.unwrap_or_else(|| self.genesis_header().hash_slow()) @@ -770,6 +789,12 @@ impl ChainSpec { self.fork(fork).active_at_timestamp(timestamp) } + /// Convenience method to check if a fork is active at a given block number + #[inline] + pub fn is_fork_active_at_block(&self, fork: Hardfork, block_number: u64) -> bool { + self.fork(fork).active_at_block(block_number) + } + /// Convenience method to check if [Hardfork::Shanghai] is active at a given timestamp. #[inline] pub fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { @@ -3168,8 +3193,9 @@ Post-merge hard forks (timestamp based): genesis.hash_slow(), b256!("f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd") ); - let base_fee = - genesis.next_block_base_fee(BASE_MAINNET.base_fee_params(genesis.timestamp)).unwrap(); + let base_fee = genesis + .next_block_base_fee(BASE_MAINNET.base_fee_params_at_timestamp(genesis.timestamp)) + .unwrap(); // assert_eq!(base_fee, 980000000); } @@ -3182,8 +3208,9 @@ Post-merge hard forks (timestamp based): genesis.hash_slow(), b256!("0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4") ); - let base_fee = - genesis.next_block_base_fee(BASE_SEPOLIA.base_fee_params(genesis.timestamp)).unwrap(); + let base_fee = genesis + .next_block_base_fee(BASE_SEPOLIA.base_fee_params_at_timestamp(genesis.timestamp)) + .unwrap(); // assert_eq!(base_fee, 980000000); } @@ -3196,8 +3223,9 @@ Post-merge hard forks (timestamp based): genesis.hash_slow(), b256!("102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d") ); - let base_fee = - genesis.next_block_base_fee(OP_SEPOLIA.base_fee_params(genesis.timestamp)).unwrap(); + let base_fee = genesis + .next_block_base_fee(OP_SEPOLIA.base_fee_params_at_timestamp(genesis.timestamp)) + .unwrap(); // assert_eq!(base_fee, 980000000); } diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 3846679dc2598..899fcb368d54e 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -700,7 +700,8 @@ impl SealedHeader { let parent_gas_limit = if chain_spec.fork(Hardfork::London).transitions_at_block(self.number) { parent.gas_limit * - chain_spec.base_fee_params(self.timestamp).elasticity_multiplier as u64 + chain_spec.base_fee_params_at_timestamp(self.timestamp).elasticity_multiplier + as u64 } else { parent.gas_limit }; @@ -801,16 +802,18 @@ impl SealedHeader { if chain_spec.fork(Hardfork::London).active_at_block(self.number) { let base_fee = self.base_fee_per_gas.ok_or(HeaderValidationError::BaseFeeMissing)?; - let expected_base_fee = - if chain_spec.fork(Hardfork::London).transitions_at_block(self.number) { - constants::EIP1559_INITIAL_BASE_FEE - } else { - // This BaseFeeMissing will not happen as previous blocks are checked to have - // them. - parent - .next_block_base_fee(chain_spec.base_fee_params(self.timestamp)) - .ok_or(HeaderValidationError::BaseFeeMissing)? - }; + let expected_base_fee = if chain_spec + .fork(Hardfork::London) + .transitions_at_block(self.number) + { + constants::EIP1559_INITIAL_BASE_FEE + } else { + // This BaseFeeMissing will not happen as previous blocks are checked to have + // them. + parent + .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(self.timestamp)) + .ok_or(HeaderValidationError::BaseFeeMissing)? + }; if expected_base_fee != base_fee { return Err(HeaderValidationError::BaseFeeDiff(GotExpected { expected: expected_base_fee, diff --git a/crates/rpc/rpc/src/eth/api/fee_history.rs b/crates/rpc/rpc/src/eth/api/fee_history.rs index 4029dc7f91bf9..487dade17859d 100644 --- a/crates/rpc/rpc/src/eth/api/fee_history.rs +++ b/crates/rpc/rpc/src/eth/api/fee_history.rs @@ -374,7 +374,7 @@ impl FeeHistoryEntry { self.gas_used as u128, self.gas_limit as u128, self.base_fee_per_gas as u128, - chain_spec.base_fee_params(self.timestamp), + chain_spec.base_fee_params_at_timestamp(self.timestamp), ) as u64 } diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs index d93b83d8909ce..da2b846a37427 100644 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ b/crates/rpc/rpc/src/eth/api/fees.rs @@ -188,7 +188,7 @@ where // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().expect("is present"); base_fee_per_gas.push( - self.provider().chain_spec().base_fee_params(last_header.timestamp).next_block_base_fee( + self.provider().chain_spec().base_fee_params_at_timestamp(last_header.timestamp).next_block_base_fee( last_header.gas_used as u128, last_header.gas_limit as u128, last_header.base_fee_per_gas.unwrap_or_default() as u128, diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 9044d5ccaacce..c23dfe1acd0c5 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -292,8 +292,9 @@ where // base fee of the child block let chain_spec = self.provider().chain_spec(); - latest_header.base_fee_per_gas = latest_header - .next_block_base_fee(chain_spec.base_fee_params(latest_header.timestamp)); + latest_header.base_fee_per_gas = latest_header.next_block_base_fee( + chain_spec.base_fee_params_at_timestamp(latest_header.timestamp), + ); // update excess blob gas consumed above target latest_header.excess_blob_gas = latest_header.next_block_excess_blob_gas(); diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index e2bdd44c22f68..52f39cd360f25 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -109,7 +109,7 @@ pub async fn maintain_transaction_pool( last_seen_block_hash: latest.hash(), last_seen_block_number: latest.number, pending_basefee: latest - .next_block_base_fee(chain_spec.base_fee_params(latest.timestamp + 12)) + .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(latest.timestamp + 12)) .unwrap_or_default(), pending_blob_fee: latest.next_block_blob_fee(), }; @@ -265,7 +265,9 @@ pub async fn maintain_transaction_pool( // fees for the next block: `new_tip+1` let pending_block_base_fee = new_tip - .next_block_base_fee(chain_spec.base_fee_params(new_tip.timestamp + 12)) + .next_block_base_fee( + chain_spec.base_fee_params_at_timestamp(new_tip.timestamp + 12), + ) .unwrap_or_default(); let pending_block_blob_fee = new_tip.next_block_blob_fee(); @@ -370,7 +372,9 @@ pub async fn maintain_transaction_pool( // fees for the next block: `tip+1` let pending_block_base_fee = tip - .next_block_base_fee(chain_spec.base_fee_params(tip.timestamp + 12)) + .next_block_base_fee( + chain_spec.base_fee_params_at_timestamp(tip.timestamp + 12), + ) .unwrap_or_default(); let pending_block_blob_fee = tip.next_block_blob_fee(); From 9179e4e8ac1c19442b094f0837b6106f9060783f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 22 Apr 2024 12:53:39 -0400 Subject: [PATCH 276/700] feat(rpc-engine-api): add engine API response type metrics (#7801) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 32 ++++--- crates/rpc/rpc-engine-api/src/metrics.rs | 100 +++++++++++++++++++- 2 files changed, 116 insertions(+), 16 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index d84b6ed22e79d..7fc52b21c1478 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -47,7 +47,7 @@ struct EngineApiInner { payload_store: PayloadStore, /// For spawning and executing async tasks task_spawner: Box, - /// The metrics for engine api calls + /// The latency and response type metrics for engine api calls metrics: EngineApiMetrics, } @@ -491,7 +491,8 @@ where trace!(target: "rpc::engine", "Serving engine_newPayloadV1"); let start = Instant::now(); let res = EngineApi::new_payload_v1(self, payload).await; - self.inner.metrics.new_payload_v1.record(start.elapsed()); + self.inner.metrics.latency.new_payload_v1.record(start.elapsed()); + self.inner.metrics.new_payload_response.update_response_metrics(&res); Ok(res?) } @@ -501,7 +502,8 @@ where trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); let start = Instant::now(); let res = EngineApi::new_payload_v2(self, payload).await; - self.inner.metrics.new_payload_v2.record(start.elapsed()); + self.inner.metrics.latency.new_payload_v2.record(start.elapsed()); + self.inner.metrics.new_payload_response.update_response_metrics(&res); Ok(res?) } @@ -518,7 +520,8 @@ where let res = EngineApi::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root) .await; - self.inner.metrics.new_payload_v3.record(start.elapsed()); + self.inner.metrics.latency.new_payload_v3.record(start.elapsed()); + self.inner.metrics.new_payload_response.update_response_metrics(&res); Ok(res?) } @@ -535,7 +538,8 @@ where let start = Instant::now(); let res = EngineApi::fork_choice_updated_v1(self, fork_choice_state, payload_attributes).await; - self.inner.metrics.fork_choice_updated_v1.record(start.elapsed()); + self.inner.metrics.latency.fork_choice_updated_v1.record(start.elapsed()); + self.inner.metrics.fcu_response.update_response_metrics(&res); Ok(res?) } @@ -550,7 +554,8 @@ where let start = Instant::now(); let res = EngineApi::fork_choice_updated_v2(self, fork_choice_state, payload_attributes).await; - self.inner.metrics.fork_choice_updated_v2.record(start.elapsed()); + self.inner.metrics.latency.fork_choice_updated_v2.record(start.elapsed()); + self.inner.metrics.fcu_response.update_response_metrics(&res); Ok(res?) } @@ -566,7 +571,8 @@ where let start = Instant::now(); let res = EngineApi::fork_choice_updated_v3(self, fork_choice_state, payload_attributes).await; - self.inner.metrics.fork_choice_updated_v3.record(start.elapsed()); + self.inner.metrics.latency.fork_choice_updated_v3.record(start.elapsed()); + self.inner.metrics.fcu_response.update_response_metrics(&res); Ok(res?) } @@ -588,7 +594,7 @@ where trace!(target: "rpc::engine", "Serving engine_getPayloadV1"); let start = Instant::now(); let res = EngineApi::get_payload_v1(self, payload_id).await; - self.inner.metrics.get_payload_v1.record(start.elapsed()); + self.inner.metrics.latency.get_payload_v1.record(start.elapsed()); Ok(res?) } @@ -608,7 +614,7 @@ where trace!(target: "rpc::engine", "Serving engine_getPayloadV2"); let start = Instant::now(); let res = EngineApi::get_payload_v2(self, payload_id).await; - self.inner.metrics.get_payload_v2.record(start.elapsed()); + self.inner.metrics.latency.get_payload_v2.record(start.elapsed()); Ok(res?) } @@ -628,7 +634,7 @@ where trace!(target: "rpc::engine", "Serving engine_getPayloadV3"); let start = Instant::now(); let res = EngineApi::get_payload_v3(self, payload_id).await; - self.inner.metrics.get_payload_v3.record(start.elapsed()); + self.inner.metrics.latency.get_payload_v3.record(start.elapsed()); Ok(res?) } @@ -641,7 +647,7 @@ where trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV1"); let start = Instant::now(); let res = EngineApi::get_payload_bodies_by_hash(self, block_hashes); - self.inner.metrics.get_payload_bodies_by_hash_v1.record(start.elapsed()); + self.inner.metrics.latency.get_payload_bodies_by_hash_v1.record(start.elapsed()); Ok(res?) } @@ -669,7 +675,7 @@ where trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV1"); let start_time = Instant::now(); let res = EngineApi::get_payload_bodies_by_range(self, start.to(), count.to()).await; - self.inner.metrics.get_payload_bodies_by_range_v1.record(start_time.elapsed()); + self.inner.metrics.latency.get_payload_bodies_by_range_v1.record(start_time.elapsed()); Ok(res?) } @@ -682,7 +688,7 @@ where trace!(target: "rpc::engine", "Serving engine_exchangeTransitionConfigurationV1"); let start = Instant::now(); let res = EngineApi::exchange_transition_configuration(self, config).await; - self.inner.metrics.exchange_transition_configuration.record(start.elapsed()); + self.inner.metrics.latency.exchange_transition_configuration.record(start.elapsed()); Ok(res?) } diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 9df8ff7900e97..d63611f7da584 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -1,10 +1,23 @@ -use metrics::Histogram; +use crate::EngineApiError; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; +use reth_rpc_types::engine::{ForkchoiceUpdated, PayloadStatus, PayloadStatusEnum}; -/// Beacon consensus engine metrics. +/// All beacon consensus engine metrics +#[derive(Default)] +pub(crate) struct EngineApiMetrics { + /// Engine API latency metrics + pub(crate) latency: EngineApiLatencyMetrics, + /// Engine API forkchoiceUpdated response type metrics + pub(crate) fcu_response: ForkchoiceUpdatedResponseMetrics, + /// Engine API newPayload response type metrics + pub(crate) new_payload_response: NewPayloadStatusResponseMetrics, +} + +/// Beacon consensus engine latency metrics. #[derive(Metrics)] #[metrics(scope = "engine.rpc")] -pub(crate) struct EngineApiMetrics { +pub(crate) struct EngineApiLatencyMetrics { /// Latency for `engine_newPayloadV1` pub(crate) new_payload_v1: Histogram, /// Latency for `engine_newPayloadV2` @@ -30,3 +43,84 @@ pub(crate) struct EngineApiMetrics { /// Latency for `engine_exchangeTransitionConfigurationV1` pub(crate) exchange_transition_configuration: Histogram, } + +/// Metrics for engine API forkchoiceUpdated responses. +#[derive(Metrics)] +#[metrics(scope = "engine.rpc")] +pub(crate) struct ForkchoiceUpdatedResponseMetrics { + /// The total count of forkchoice updated messages received. + pub(crate) forkchoice_updated_messages: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [Invalid](reth_rpc_types::engine::PayloadStatusEnum#Invalid). + pub(crate) forkchoice_updated_invalid: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [Valid](reth_rpc_types::engine::PayloadStatusEnum#Valid). + pub(crate) forkchoice_updated_valid: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [Syncing](reth_rpc_types::engine::PayloadStatusEnum#Syncing). + pub(crate) forkchoice_updated_syncing: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [Accepted](reth_rpc_types::engine::PayloadStatusEnum#Accepted). + pub(crate) forkchoice_updated_accepted: Counter, + /// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded + /// with an error type that is not a [PayloadStatusEnum]. + pub(crate) forkchoice_updated_error: Counter, +} + +/// Metrics for engine API newPayload responses. +#[derive(Metrics)] +#[metrics(scope = "engine.rpc")] +pub(crate) struct NewPayloadStatusResponseMetrics { + /// The total count of new payload messages received. + pub(crate) new_payload_messages: Counter, + /// The total count of new payload messages that we responded to with + /// [Invalid](reth_rpc_types::engine::PayloadStatusEnum#Invalid). + pub(crate) new_payload_invalid: Counter, + /// The total count of new payload messages that we responded to with + /// [Valid](reth_rpc_types::engine::PayloadStatusEnum#Valid). + pub(crate) new_payload_valid: Counter, + /// The total count of new payload messages that we responded to with + /// [Syncing](reth_rpc_types::engine::PayloadStatusEnum#Syncing). + pub(crate) new_payload_syncing: Counter, + /// The total count of new payload messages that we responded to with + /// [Accepted](reth_rpc_types::engine::PayloadStatusEnum#Accepted). + pub(crate) new_payload_accepted: Counter, + /// The total count of new payload messages that were unsuccessful, i.e. we responded with an + /// error type that is not a [PayloadStatusEnum]. + pub(crate) new_payload_error: Counter, +} + +impl NewPayloadStatusResponseMetrics { + /// Increment the newPayload counter based on the given rpc result + pub(crate) fn update_response_metrics(&self, result: &Result) { + match result { + Ok(status) => match status.status { + PayloadStatusEnum::Valid => self.new_payload_valid.increment(1), + PayloadStatusEnum::Syncing => self.new_payload_syncing.increment(1), + PayloadStatusEnum::Accepted => self.new_payload_accepted.increment(1), + PayloadStatusEnum::Invalid { .. } => self.new_payload_invalid.increment(1), + }, + Err(_) => self.new_payload_error.increment(1), + } + self.new_payload_messages.increment(1); + } +} + +impl ForkchoiceUpdatedResponseMetrics { + /// Increment the forkchoiceUpdated counter based on the given rpc result + pub(crate) fn update_response_metrics( + &self, + result: &Result, + ) { + match result { + Ok(status) => match status.payload_status.status { + PayloadStatusEnum::Valid => self.forkchoice_updated_valid.increment(1), + PayloadStatusEnum::Syncing => self.forkchoice_updated_syncing.increment(1), + PayloadStatusEnum::Accepted => self.forkchoice_updated_accepted.increment(1), + PayloadStatusEnum::Invalid { .. } => self.forkchoice_updated_invalid.increment(1), + }, + Err(_) => self.forkchoice_updated_error.increment(1), + } + self.forkchoice_updated_messages.increment(1); + } +} From ac29b4b73be382caf2a2462d426e6bad75e18af9 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 22 Apr 2024 19:29:01 +0200 Subject: [PATCH 277/700] release: v0.2.0-beta.6 (#7802) --- Cargo.lock | 138 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 70 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d4913d7a9c97a..dfb64fdd7fd15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2529,7 +2529,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "rayon", @@ -6033,7 +6033,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "ahash", "alloy-rlp", @@ -6110,7 +6110,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -6129,7 +6129,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "futures-core", @@ -6151,7 +6151,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "assert_matches", "futures", @@ -6189,7 +6189,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus-core" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "reth-consensus-common", "reth-interfaces", @@ -6198,7 +6198,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "aquamarine", "assert_matches", @@ -6222,7 +6222,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "futures", "reth-tasks", @@ -6232,7 +6232,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6249,7 +6249,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "convert_case 0.6.0", "proc-macro2", @@ -6260,7 +6260,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "confy", "humantime-serde", @@ -6276,7 +6276,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "mockall", "reth-interfaces", @@ -6286,7 +6286,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "arbitrary", "assert_matches", @@ -6323,7 +6323,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "discv5", @@ -6346,7 +6346,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "derive_more", @@ -6370,7 +6370,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "data-encoding", @@ -6393,7 +6393,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "assert_matches", @@ -6422,7 +6422,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-consensus", "alloy-network", @@ -6449,7 +6449,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "aes 0.8.4", "alloy-rlp", @@ -6480,7 +6480,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "reth-primitives", "reth-rpc-types", @@ -6490,7 +6490,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "arbitrary", @@ -6524,7 +6524,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "arbitrary", @@ -6547,7 +6547,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "reth-engine-primitives", @@ -6562,7 +6562,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-chains", "alloy-primitives", @@ -6577,7 +6577,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "reth-basic-payload-builder", "reth-payload-builder", @@ -6591,7 +6591,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "rayon", "reth-db", @@ -6601,7 +6601,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "reth-interfaces", "reth-primitives", @@ -6611,7 +6611,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "reth-evm", "reth-interfaces", @@ -6624,7 +6624,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "eyre", "metrics", @@ -6642,7 +6642,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "auto_impl", "clap", @@ -6661,7 +6661,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "async-trait", "bytes", @@ -6680,7 +6680,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "bitflags 2.5.0", "byteorder", @@ -6702,7 +6702,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "bindgen", "cc", @@ -6711,7 +6711,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "futures", "metrics", @@ -6722,7 +6722,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "metrics", "once_cell", @@ -6736,7 +6736,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "pin-project", "reth-primitives", @@ -6745,7 +6745,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "igd-next", "pin-project-lite", @@ -6759,7 +6759,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-node-bindings", "alloy-provider", @@ -6814,7 +6814,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "enr", "reth-discv4", @@ -6828,7 +6828,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "anyhow", "bincode", @@ -6849,7 +6849,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "reth-db", "reth-engine-primitives", @@ -6863,7 +6863,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "aquamarine", "confy", @@ -6900,7 +6900,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "assert_matches", "clap", @@ -6958,7 +6958,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "eyre", "futures", @@ -6986,7 +6986,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "futures", "humantime", @@ -7007,7 +7007,7 @@ dependencies = [ [[package]] name = "reth-node-optimism" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-primitives", "async-trait", @@ -7049,7 +7049,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", @@ -7070,7 +7070,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "futures-util", "metrics", @@ -7092,7 +7092,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "reth-primitives", "reth-rpc-types", @@ -7101,7 +7101,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-chains", "alloy-eips", @@ -7154,7 +7154,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "assert_matches", @@ -7184,7 +7184,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "assert_matches", "derive_more", @@ -7208,7 +7208,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "reth-consensus-common", "reth-evm", @@ -7223,7 +7223,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -7276,7 +7276,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "jsonrpsee", "reth-engine-primitives", @@ -7288,7 +7288,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "futures", "jsonrpsee", @@ -7302,7 +7302,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "hyper 0.14.28", "jsonrpsee", @@ -7339,7 +7339,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "assert_matches", @@ -7368,7 +7368,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -7397,7 +7397,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "alloy-rpc-types", @@ -7408,7 +7408,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "assert_matches", @@ -7443,7 +7443,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "aquamarine", "assert_matches", @@ -7465,7 +7465,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "assert_matches", "clap", @@ -7485,7 +7485,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "dyn-clone", "futures-util", @@ -7501,7 +7501,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "tokio", "tokio-stream", @@ -7509,7 +7509,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "clap", "eyre", @@ -7523,7 +7523,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "aquamarine", @@ -7561,7 +7561,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "auto_impl", @@ -7587,7 +7587,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", "criterion", diff --git a/Cargo.toml b/Cargo.toml index 1e016ab87f1a7..8cf53ef559456 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -159,7 +159,7 @@ unnecessary_struct_initialization = "allow" use_self = "allow" [workspace.package] -version = "0.2.0-beta.5" +version = "0.2.0-beta.6" edition = "2021" rust-version = "1.76" license = "MIT OR Apache-2.0" From a2027c817f346c35865d82dfe1df26da77cc4a9b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 22 Apr 2024 13:38:24 -0400 Subject: [PATCH 278/700] chore: add network or chain question to bug template (#7800) --- .github/ISSUE_TEMPLATE/bug.yml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index bfb81f1b7e5fb..1142a5bf251c6 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -6,7 +6,7 @@ body: attributes: value: | Thanks for taking the time to fill out this bug report! Please provide as much detail as possible. - + If you believe you have found a vulnerability, please provide details [here](mailto:georgios@paradigm.xyz) instead. - type: textarea id: what-happened @@ -14,7 +14,7 @@ body: label: Describe the bug description: | A clear and concise description of what the bug is. - + If the bug is in a crate you are using (i.e. you are not running the standard `reth` binary) please mention that as well. validations: required: true @@ -25,7 +25,7 @@ body: description: Please provide any steps you think might be relevant to reproduce the bug. placeholder: | Steps to reproduce: - + 1. Start '...' 2. Then '...' 3. Check '...' @@ -76,6 +76,13 @@ body: description: This can be obtained with `reth db version` validations: required: true + - type: textarea + id: network + attributes: + label: Which chain / network are you on? + description: This is the argument you pass to `reth --chain`. If you are using `--dev`, type in 'dev' here. If you are not running with `--chain` or `--dev` then it is mainnet. + validations: + required: true - type: dropdown id: node-type attributes: From b9db4cb61bbe0540be5a72cbf31dcb0813994113 Mon Sep 17 00:00:00 2001 From: jn Date: Tue, 23 Apr 2024 02:15:53 -0700 Subject: [PATCH 279/700] Implement Compact for Withdrawal (#7604) Co-authored-by: Matthias Seitz --- crates/storage/codecs/Cargo.toml | 9 ++- crates/storage/codecs/src/alloy/mod.rs | 1 + crates/storage/codecs/src/alloy/withdrawal.rs | 62 +++++++++++++++++++ 3 files changed, 69 insertions(+), 3 deletions(-) create mode 100644 crates/storage/codecs/src/alloy/withdrawal.rs diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 31f954f868d7e..ab8f1a3232866 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -11,18 +11,21 @@ repository.workspace = true workspace = true [dependencies] +# reth reth-codecs-derive = { path = "./derive", default-features = false } +# eth alloy-eips = { workspace = true, optional = true } alloy-primitives.workspace = true +# misc bytes.workspace = true +modular-bitfield = { workspace = true, optional = true } +serde.workspace = true [dev-dependencies] alloy-eips = { workspace = true, default-features = false, features = ["arbitrary", "serde"] } alloy-primitives = { workspace = true, features = ["arbitrary", "serde"] } -serde.workspace = true -modular-bitfield.workspace = true test-fuzz.workspace = true serde_json.workspace = true @@ -33,5 +36,5 @@ proptest-derive.workspace = true [features] default = ["std", "alloy"] std = ["alloy-primitives/std", "bytes/std"] -alloy = ["alloy-eips"] +alloy = ["dep:alloy-eips", "dep:modular-bitfield"] optimism = ["reth-codecs-derive/optimism"] diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 7d7a794fe6708..aff164642586b 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,3 +1,4 @@ mod access_list; mod log; mod txkind; +mod withdrawal; diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs new file mode 100644 index 0000000000000..0849b7e4a49fe --- /dev/null +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -0,0 +1,62 @@ +use crate::Compact; +use alloy_eips::eip4895::Withdrawal as AlloyWithdrawal; +use alloy_primitives::Address; +use reth_codecs_derive::main_codec; + +/// Withdrawal acts as bridge which simplifies Compact implementation for AlloyWithdrawal. +/// +/// Notice: Make sure this struct is 1:1 with `alloy_eips::eip4895::Withdrawal` +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct Withdrawal { + /// Monotonically increasing identifier issued by consensus layer. + index: u64, + /// Index of validator associated with withdrawal. + validator_index: u64, + /// Target address for withdrawn ether. + address: Address, + /// Value of the withdrawal in gwei. + amount: u64, +} + +impl Compact for AlloyWithdrawal { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let withdrawal = Withdrawal { + index: self.index, + validator_index: self.validator_index, + address: self.address, + amount: self.amount, + }; + withdrawal.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (withdrawal, _) = Withdrawal::from_compact(buf, len); + let alloy_withdrawal = AlloyWithdrawal { + index: withdrawal.index, + validator_index: withdrawal.validator_index, + address: withdrawal.address, + amount: withdrawal.amount, + }; + (alloy_withdrawal, buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::proptest; + + proptest! { + #[test] + fn roundtrip(withdrawal: AlloyWithdrawal) { + let mut compacted_withdrawal = Vec::::new(); + let len = withdrawal.to_compact(&mut compacted_withdrawal); + let (decoded, _) = AlloyWithdrawal::from_compact(&compacted_withdrawal, len); + assert_eq!(withdrawal, decoded) + } + } +} From c499797a6c959af2f2519ca97171ea6fd229a5dc Mon Sep 17 00:00:00 2001 From: Vid Kersic <38610409+Vid201@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:25:42 +0200 Subject: [PATCH 280/700] chore: export tables macro (#7807) --- crates/storage/db/src/lib.rs | 2 +- crates/storage/db/src/tables/mod.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index c0737cc42d44b..5425c80743969 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -38,7 +38,7 @@ //! //! # Overview //! -//! An overview of the current data model of reth can be found in the [`tables`] module. +//! An overview of the current data model of reth can be found in the [`mod@tables`] module. //! //! [`Database`]: crate::abstraction::database::Database //! [`DbTx`]: crate::abstraction::transaction::DbTx diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 676ed5ebc9d36..b106623259c83 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -101,6 +101,7 @@ pub trait TableViewer { } } +#[macro_export] /// Defines all the tables in the database. macro_rules! tables { (@bool) => { false }; From d6b861ea5d068c17baf160e1fd1ca37cdbe154cc Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 23 Apr 2024 11:05:46 +0100 Subject: [PATCH 281/700] feat(exex): send `ExExNotification` instead of `CanonStateNotification` (#7803) --- Cargo.lock | 2 +- crates/exex/src/context.rs | 10 ++-- crates/exex/src/lib.rs | 3 ++ crates/exex/src/manager.rs | 58 +++++++++++---------- crates/exex/src/notification.rs | 54 +++++++++++++++++++ crates/node-builder/src/builder.rs | 2 +- crates/stages/src/stages/execution.rs | 11 ++-- examples/exex/minimal/Cargo.toml | 2 +- examples/exex/minimal/src/main.rs | 23 +++++---- examples/exex/op-bridge/src/main.rs | 74 ++++++++++++++------------- 10 files changed, 149 insertions(+), 90 deletions(-) create mode 100644 crates/exex/src/notification.rs diff --git a/Cargo.lock b/Cargo.lock index dfb64fdd7fd15..d7effc49cf293 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4723,7 +4723,7 @@ dependencies = [ "reth-node-core", "reth-node-ethereum", "reth-primitives", - "reth-provider", + "reth-tracing", "tokio", ] diff --git a/crates/exex/src/context.rs b/crates/exex/src/context.rs index 619679e85fae7..df2b5137797d4 100644 --- a/crates/exex/src/context.rs +++ b/crates/exex/src/context.rs @@ -4,11 +4,10 @@ use reth_node_core::{ node_config::NodeConfig, }; use reth_primitives::Head; -use reth_provider::CanonStateNotification; use reth_tasks::TaskExecutor; use tokio::sync::mpsc::{Receiver, UnboundedSender}; -use crate::ExExEvent; +use crate::{ExExEvent, ExExNotification}; /// Captures the context that an ExEx has access to. #[derive(Debug)] @@ -35,12 +34,11 @@ pub struct ExExContext { /// Additionally, the exex can pre-emptively emit a `FinishedHeight` event to specify what /// blocks to receive notifications for. pub events: UnboundedSender, - /// Channel to receive [`CanonStateNotification`]s on state transitions. + /// Channel to receive [`ExExNotification`]s. /// /// # Important /// - /// Once a `CanonStateNotification` is sent over the channel, it is considered delivered by the + /// Once a an [`ExExNotification`] is sent over the channel, it is considered delivered by the /// node. - pub notifications: Receiver, - // TODO(alexey): add pool, payload builder, anything else? + pub notifications: Receiver, } diff --git a/crates/exex/src/lib.rs b/crates/exex/src/lib.rs index 638d8af79954b..4e2d0dd85177a 100644 --- a/crates/exex/src/lib.rs +++ b/crates/exex/src/lib.rs @@ -42,3 +42,6 @@ pub use event::*; mod manager; pub use manager::*; + +mod notification; +pub use notification::*; diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 59f2bde58bbbc..95b950f328496 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -1,8 +1,7 @@ -use crate::ExExEvent; +use crate::{ExExEvent, ExExNotification}; use metrics::Gauge; use reth_metrics::{metrics::Counter, Metrics}; use reth_primitives::{BlockNumber, FinishedExExHeight}; -use reth_provider::CanonStateNotification; use reth_tracing::tracing::debug; use std::{ collections::VecDeque, @@ -24,7 +23,7 @@ use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; #[derive(Metrics)] #[metrics(scope = "exex")] struct ExExMetrics { - /// The total number of canonical state notifications sent to an ExEx. + /// The total number of notifications sent to an ExEx. notifications_sent_total: Counter, /// The total number of events an ExEx has sent to the manager. events_sent_total: Counter, @@ -42,8 +41,8 @@ pub struct ExExHandle { /// Metrics for an ExEx. metrics: ExExMetrics, - /// Channel to send [`CanonStateNotification`]s to the ExEx. - sender: PollSender, + /// Channel to send [`ExExNotification`]s to the ExEx. + sender: PollSender, /// Channel to receive [`ExExEvent`]s from the ExEx. receiver: UnboundedReceiver, /// The ID of the next notification to send to this ExEx. @@ -59,22 +58,22 @@ impl ExExHandle { /// Create a new handle for the given ExEx. /// /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a - /// [`Receiver`] for [`CanonStateNotification`]s that should be given to the ExEx. - pub fn new(id: String) -> (Self, UnboundedSender, Receiver) { - let (canon_tx, canon_rx) = mpsc::channel(1); + /// [`Receiver`] for [`ExExNotification`]s that should be given to the ExEx. + pub fn new(id: String) -> (Self, UnboundedSender, Receiver) { + let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); ( Self { id: id.clone(), metrics: ExExMetrics::new_with_labels(&[("exex", id)]), - sender: PollSender::new(canon_tx), + sender: PollSender::new(notification_tx), receiver: event_rx, next_notification_id: 0, finished_height: None, }, event_tx, - canon_rx, + notification_rx, ) } @@ -85,14 +84,20 @@ impl ExExHandle { fn send( &mut self, cx: &mut Context<'_>, - (event_id, notification): &(usize, CanonStateNotification), - ) -> Poll>> { + (event_id, notification): &(usize, ExExNotification), + ) -> Poll>> { // check that this notification is above the finished height of the exex if the exex has set // one if let Some(finished_height) = self.finished_height { - if finished_height >= notification.tip().number { - self.next_notification_id = event_id + 1; - return Poll::Ready(Ok(())) + match notification { + ExExNotification::ChainCommitted { new } | + ExExNotification::ChainReorged { old: _, new } + if finished_height >= new.tip().number => + { + self.next_notification_id = event_id + 1; + return Poll::Ready(Ok(())) + } + _ => (), } } @@ -142,18 +147,18 @@ pub struct ExExManager { /// Handles to communicate with the ExEx's. exex_handles: Vec, - /// [`CanonStateNotification`] channel from the [`ExExManagerHandle`]s. - handle_rx: UnboundedReceiver, + /// [`ExExNotification`] channel from the [`ExExManagerHandle`]s. + handle_rx: UnboundedReceiver, /// The minimum notification ID currently present in the buffer. min_id: usize, - /// Monotonically increasing ID for [`CanonStateNotification`]s. + /// Monotonically increasing ID for [`ExExNotification`]s. next_id: usize, - /// Internal buffer of [`CanonStateNotification`]s. + /// Internal buffer of [`ExExNotification`]s. /// /// The first element of the tuple is a monotonically increasing ID unique to the notification /// (the second element of the tuple). - buffer: VecDeque<(usize, CanonStateNotification)>, + buffer: VecDeque<(usize, ExExNotification)>, /// Max size of the internal state notifications buffer. max_capacity: usize, /// Current state notifications buffer capacity. @@ -244,7 +249,7 @@ impl ExExManager { /// Pushes a new notification into the managers internal buffer, assigning the notification a /// unique ID. - fn push_notification(&mut self, notification: CanonStateNotification) { + fn push_notification(&mut self, notification: ExExNotification) { let next_id = self.next_id; self.buffer.push_back((next_id, notification)); self.next_id += 1; @@ -334,7 +339,7 @@ impl Future for ExExManager { #[derive(Debug)] pub struct ExExManagerHandle { /// Channel to send notifications to the ExEx manager. - exex_tx: UnboundedSender, + exex_tx: UnboundedSender, /// The number of ExEx's running on the node. num_exexs: usize, /// A watch channel denoting whether the manager is ready for new notifications or not. @@ -376,10 +381,7 @@ impl ExExManagerHandle { /// Synchronously send a notification over the channel to all execution extensions. /// /// Senders should call [`Self::has_capacity`] first. - pub fn send( - &self, - notification: CanonStateNotification, - ) -> Result<(), SendError> { + pub fn send(&self, notification: ExExNotification) -> Result<(), SendError> { self.exex_tx.send(notification) } @@ -389,8 +391,8 @@ impl ExExManagerHandle { /// capacity in the channel, the future will wait. pub async fn send_async( &mut self, - notification: CanonStateNotification, - ) -> Result<(), SendError> { + notification: ExExNotification, + ) -> Result<(), SendError> { self.ready().await; self.exex_tx.send(notification) } diff --git a/crates/exex/src/notification.rs b/crates/exex/src/notification.rs new file mode 100644 index 0000000000000..ae8091e0c6b31 --- /dev/null +++ b/crates/exex/src/notification.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; + +use reth_provider::{CanonStateNotification, Chain}; + +/// Notifications sent to an ExEx. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ExExNotification { + /// Chain got committed without a reorg, and only the new chain is returned. + ChainCommitted { + /// The new chain after commit. + new: Arc, + }, + /// Chain got reorged, and both the old and the new chains are returned. + ChainReorged { + /// The old chain before reorg. + old: Arc, + /// The new chain after reorg. + new: Arc, + }, + /// Chain got reverted, and only the old chain is returned. + ChainReverted { + /// The old chain before reversion. + old: Arc, + }, +} + +impl ExExNotification { + /// Returns the committed chain from the [Self::ChainCommitted] and [Self::ChainReorged] + /// variants, if any. + pub fn committed_chain(&self) -> Option> { + match self { + Self::ChainCommitted { new } | Self::ChainReorged { old: _, new } => Some(new.clone()), + Self::ChainReverted { .. } => None, + } + } + + /// Returns the reverted chain from the [Self::ChainReorged] and [Self::ChainReverted] variants, + /// if any. + pub fn reverted_chain(&self) -> Option> { + match self { + Self::ChainReorged { old, new: _ } | Self::ChainReverted { old } => Some(old.clone()), + Self::ChainCommitted { .. } => None, + } + } +} + +impl From for ExExNotification { + fn from(notification: CanonStateNotification) -> Self { + match notification { + CanonStateNotification::Commit { new } => Self::ChainCommitted { new }, + CanonStateNotification::Reorg { old, new } => Self::ChainReorged { old, new }, + } + } +} diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 49be32b33bf15..c47478047f235 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -661,7 +661,7 @@ where executor.spawn_critical("exex manager blockchain tree notifications", async move { while let Ok(notification) = canon_state_notifications.recv().await { handle - .send_async(notification) + .send_async(notification.into()) .await .expect("blockchain tree notification could not be sent to exex manager"); } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 7f22ecaef37f4..b581af40363ac 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -3,7 +3,7 @@ use num_traits::Zero; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; -use reth_exex::ExExManagerHandle; +use reth_exex::{ExExManagerHandle, ExExNotification}; use reth_primitives::{ stage::{ CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, StageCheckpoint, StageId, @@ -12,9 +12,8 @@ use reth_primitives::{ }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - BlockReader, CanonStateNotification, Chain, DatabaseProviderRW, ExecutorFactory, - HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, - TransactionVariant, + BlockReader, Chain, DatabaseProviderRW, ExecutorFactory, HeaderProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, TransactionVariant, }; use reth_stages_api::{ BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, @@ -265,7 +264,7 @@ impl ExecutionStage { // NOTE: We can ignore the error here, since an error means that the channel is closed, // which means the manager has died, which then in turn means the node is shutting down. - let _ = self.exex_manager_handle.send(CanonStateNotification::Commit { new: chain }); + let _ = self.exex_manager_handle.send(ExExNotification::ChainCommitted { new: chain }); } let time = Instant::now(); @@ -436,7 +435,7 @@ impl Stage for ExecutionStage { // NOTE: We can ignore the error here, since an error means that the channel is closed, // which means the manager has died, which then in turn means the node is shutting down. - let _ = self.exex_manager_handle.send(CanonStateNotification::Reorg { + let _ = self.exex_manager_handle.send(ExExNotification::ChainReorged { old: Arc::new(chain), new: Arc::new(Chain::default()), }); diff --git a/examples/exex/minimal/Cargo.toml b/examples/exex/minimal/Cargo.toml index c1c586fd5c381..fc6eba841a314 100644 --- a/examples/exex/minimal/Cargo.toml +++ b/examples/exex/minimal/Cargo.toml @@ -12,7 +12,7 @@ reth-node-api.workspace = true reth-node-core.workspace = true reth-node-ethereum.workspace = true reth-primitives.workspace = true -reth-provider.workspace = true +reth-tracing.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/exex/minimal/src/main.rs b/examples/exex/minimal/src/main.rs index 1c2463cdaa16a..18d3acd2c38e8 100644 --- a/examples/exex/minimal/src/main.rs +++ b/examples/exex/minimal/src/main.rs @@ -1,8 +1,8 @@ use futures::Future; -use reth_exex::{ExExContext, ExExEvent}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; -use reth_provider::CanonStateNotification; +use reth_tracing::tracing::info; /// The initialization logic of the ExEx is just an async function. /// @@ -21,19 +21,20 @@ async fn exex_init( async fn exex(mut ctx: ExExContext) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.recv().await { match ¬ification { - CanonStateNotification::Commit { new } => { - println!("Received commit: {:?}", new.first().number..=new.tip().number); + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); } - CanonStateNotification::Reorg { old, new } => { - println!( - "Received reorg: {:?} -> {:?}", - old.first().number..=old.tip().number, - new.first().number..=new.tip().number - ); + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); } }; - ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; + if let Some(committed_chain) = notification.committed_chain() { + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } } Ok(()) } diff --git a/examples/exex/op-bridge/src/main.rs b/examples/exex/op-bridge/src/main.rs index 92e6ef10601bc..0f48b0a5f43ed 100644 --- a/examples/exex/op-bridge/src/main.rs +++ b/examples/exex/op-bridge/src/main.rs @@ -94,7 +94,8 @@ async fn op_bridge_exex( ) -> eyre::Result<()> { // Process all new chain state notifications while let Some(notification) = ctx.notifications.recv().await { - if let Some(reverted_chain) = notification.reverted() { + // Revert all deposits and withdrawals + if let Some(reverted_chain) = notification.reverted_chain() { let events = decode_chain_into_events(&reverted_chain); let mut deposits = 0; @@ -126,22 +127,22 @@ async fn op_bridge_exex( } // Insert all new deposits and withdrawals - let committed_chain = notification.committed(); - let events = decode_chain_into_events(&committed_chain); - - let mut deposits = 0; - let mut withdrawals = 0; - - for (block, tx, log, event) in events { - match event { - // L1 -> L2 deposit - L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { - amount, - from, - to, - .. - }) => { - let inserted = connection.execute( + if let Some(committed_chain) = notification.committed_chain() { + let events = decode_chain_into_events(&committed_chain); + + let mut deposits = 0; + let mut withdrawals = 0; + + for (block, tx, log, event) in events { + match event { + // L1 -> L2 deposit + L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { + amount, + from, + to, + .. + }) => { + let inserted = connection.execute( r#" INSERT INTO deposits (block_number, tx_hash, contract_address, "from", "to", amount) VALUES (?, ?, ?, ?, ?, ?) @@ -155,16 +156,16 @@ async fn op_bridge_exex( amount.to_string(), ), )?; - deposits += inserted; - } - // L2 -> L1 withdrawal - L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { - amount, - from, - to, - .. - }) => { - let inserted = connection.execute( + deposits += inserted; + } + // L2 -> L1 withdrawal + L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { + amount, + from, + to, + .. + }) => { + let inserted = connection.execute( r#" INSERT INTO withdrawals (block_number, tx_hash, contract_address, "from", "to", amount) VALUES (?, ?, ?, ?, ?, ?) @@ -178,17 +179,18 @@ async fn op_bridge_exex( amount.to_string(), ), )?; - withdrawals += inserted; - } - _ => continue, - }; - } + withdrawals += inserted; + } + _ => continue, + }; + } - info!(block_range = ?committed_chain.range(), %deposits, %withdrawals, "Committed chain events"); + info!(block_range = ?committed_chain.range(), %deposits, %withdrawals, "Committed chain events"); - // Send a finished height event, signaling the node that we don't need any blocks below - // this height anymore - ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; + // Send a finished height event, signaling the node that we don't need any blocks below + // this height anymore + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } } Ok(()) From d1e38966a106d37beacb329cde19b63d09635abf Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 23 Apr 2024 11:36:00 +0100 Subject: [PATCH 282/700] fix(stages): send revert chain notification instead of a reorg on unwind (#7808) --- crates/stages/src/stages/execution.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index b581af40363ac..6fb6f58e7fca2 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -248,7 +248,7 @@ impl ExecutionStage { let state = executor.take_output_state(); let write_preparation_duration = time.elapsed(); - // Check if we should send a [`CanonStateNotification`] to execution extensions. + // Check if we should send a [`ExExNotification`] to execution extensions. // // Note: Since we only write to `blocks` if there are any ExEx's we don't need to perform // the `has_exexs` check here as well @@ -427,18 +427,17 @@ impl Stage for ExecutionStage { // This also updates `PlainStorageState` and `PlainAccountState`. let bundle_state_with_receipts = provider.unwind_or_peek_state::(range.clone())?; - // Construct a `CanonStateNotification` if we have ExEx's installed. + // Construct a `ExExNotification` if we have ExEx's installed. if self.exex_manager_handle.has_exexs() { - // Get the blocks for the unwound range. This is needed for `CanonStateNotification`. + // Get the blocks for the unwound range. This is needed for `ExExNotification`. let blocks = provider.get_take_block_range::(range.clone())?; let chain = Chain::new(blocks, bundle_state_with_receipts, None); // NOTE: We can ignore the error here, since an error means that the channel is closed, // which means the manager has died, which then in turn means the node is shutting down. - let _ = self.exex_manager_handle.send(ExExNotification::ChainReorged { - old: Arc::new(chain), - new: Arc::new(Chain::default()), - }); + let _ = self + .exex_manager_handle + .send(ExExNotification::ChainReverted { old: Arc::new(chain) }); } // Unwind all receipts for transactions in the block range From 3224837523dddadf86f4d2f27869e5b0b4fda789 Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 23 Apr 2024 19:18:32 +0800 Subject: [PATCH 283/700] fix(args/txpool): duplicate arg of txpool.max-account-slots (#7806) Signed-off-by: jsvisa --- book/cli/reth/node.md | 6 +++--- crates/node-core/src/args/txpool_args.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index dbfe7b1d49178..450180c84e4a3 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -349,7 +349,7 @@ TxPool: [default: 20] - --txpool.max_account_slots + --txpool.max-account-slots Max number of executable transaction slots guaranteed per account [default: 16] @@ -387,7 +387,7 @@ Builder: --builder.extradata Block extra data set by the payload builder - [default: reth/v0.2.0-beta.5/linux] + [default: reth//] --builder.gaslimit Target gas ceiling for built blocks @@ -517,7 +517,7 @@ Logging: --log.file.directory The path to put log files in - [default: /root/.cache/reth/logs] + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file diff --git a/crates/node-core/src/args/txpool_args.rs b/crates/node-core/src/args/txpool_args.rs index db9e43d82bd59..12fc6bd79c0f7 100644 --- a/crates/node-core/src/args/txpool_args.rs +++ b/crates/node-core/src/args/txpool_args.rs @@ -35,7 +35,7 @@ pub struct TxPoolArgs { pub queued_max_size: usize, /// Max number of executable transaction slots guaranteed per account - #[arg(long = "txpool.max-account-slots", long = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] + #[arg(long = "txpool.max-account-slots", alias = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] pub max_account_slots: usize, /// Price bump (in %) for the transaction pool underpriced check. From 6d2e20cd8570ed21852db1bce6f3d123dd948283 Mon Sep 17 00:00:00 2001 From: ThreeHrSleep <151536303+ThreeHrSleep@users.noreply.github.com> Date: Tue, 23 Apr 2024 17:20:19 +0530 Subject: [PATCH 284/700] chore: Move consensus trait to standalone crate (#7757) Co-authored-by: Matthias Seitz --- Cargo.lock | 27 +++++++++++++++-- Cargo.toml | 4 ++- bin/reth/Cargo.toml | 1 + .../src/commands/debug_cmd/build_block.rs | 3 +- bin/reth/src/commands/debug_cmd/execution.rs | 6 ++-- bin/reth/src/commands/debug_cmd/merkle.rs | 3 +- .../src/commands/debug_cmd/replay_engine.rs | 2 +- bin/reth/src/commands/import.rs | 10 +++---- bin/reth/src/commands/stage/unwind.rs | 2 +- crates/blockchain-tree/Cargo.toml | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/chain.rs | 2 +- crates/blockchain-tree/src/externals.rs | 3 +- crates/consensus/auto-seal/Cargo.toml | 2 ++ crates/consensus/auto-seal/src/lib.rs | 6 ++-- crates/consensus/auto-seal/src/task.rs | 2 +- crates/consensus/beacon-core/Cargo.toml | 2 +- crates/consensus/beacon-core/src/lib.rs | 2 +- crates/consensus/beacon/Cargo.toml | 1 + crates/consensus/beacon/src/engine/event.rs | 2 +- crates/consensus/beacon/src/engine/message.rs | 6 ++-- crates/consensus/beacon/src/engine/mod.rs | 4 +-- .../consensus/beacon/src/engine/test_utils.rs | 9 +++--- crates/consensus/common/Cargo.toml | 1 + crates/consensus/common/src/validation.rs | 3 +- crates/consensus/consensus/Cargo.toml | 18 +++++++++++ .../consensus/src/lib.rs} | 13 ++++++-- crates/interfaces/Cargo.toml | 2 +- .../interfaces/src/blockchain_tree/error.rs | 2 +- crates/interfaces/src/error.rs | 2 +- crates/interfaces/src/lib.rs | 3 -- crates/interfaces/src/p2p/error.rs | 3 +- crates/interfaces/src/p2p/full_block.rs | 12 ++++---- .../interfaces/src/p2p/headers/downloader.rs | 7 ++--- crates/interfaces/src/p2p/headers/error.rs | 2 +- crates/interfaces/src/p2p/headers/mod.rs | 2 +- crates/interfaces/src/p2p/mod.rs | 2 +- crates/interfaces/src/test_utils/headers.rs | 30 +++++++++---------- crates/net/downloaders/Cargo.toml | 1 + crates/net/downloaders/src/bodies/bodies.rs | 16 +++++----- crates/net/downloaders/src/bodies/queue.rs | 10 +++---- crates/net/downloaders/src/bodies/request.rs | 12 ++++---- crates/net/downloaders/src/bodies/task.rs | 3 +- .../src/headers/reverse_headers.rs | 18 +++++------ crates/net/downloaders/src/headers/task.rs | 2 +- crates/net/network/Cargo.toml | 1 + crates/net/network/src/import.rs | 2 +- crates/node-builder/Cargo.toml | 1 + crates/node-builder/src/builder.rs | 3 +- crates/node-builder/src/setup.rs | 10 +++---- crates/node/events/Cargo.toml | 2 +- crates/node/events/src/node.rs | 2 +- crates/rpc/rpc-engine-api/Cargo.toml | 1 - crates/rpc/rpc-engine-api/src/engine_api.rs | 5 ++-- crates/stages-api/Cargo.toml | 1 + crates/stages-api/src/error.rs | 7 +++-- crates/stages-api/src/pipeline/mod.rs | 6 ++-- crates/stages/Cargo.toml | 1 + crates/stages/src/lib.rs | 2 +- crates/stages/src/sets.rs | 6 ++-- crates/stages/src/stages/headers.rs | 2 +- crates/stages/src/stages/merkle.rs | 4 +-- crates/stages/src/stages/sender_recovery.rs | 4 +-- crates/storage/provider/Cargo.toml | 1 + crates/storage/provider/src/providers/mod.rs | 2 +- .../storage/provider/src/traits/chain_info.rs | 2 +- 66 files changed, 185 insertions(+), 146 deletions(-) create mode 100644 crates/consensus/consensus/Cargo.toml rename crates/{interfaces/src/consensus.rs => consensus/consensus/src/lib.rs} (95%) diff --git a/Cargo.lock b/Cargo.lock index d7effc49cf293..6802524cdc1cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6064,6 +6064,7 @@ dependencies = [ "reth-blockchain-tree", "reth-cli-runner", "reth-config", + "reth-consensus", "reth-consensus-common", "reth-db", "reth-discv4", @@ -6114,12 +6115,14 @@ version = "0.2.0-beta.6" dependencies = [ "futures-util", "reth-beacon-consensus", + "reth-consensus", "reth-engine-primitives", "reth-evm", "reth-interfaces", "reth-primitives", "reth-provider", "reth-revm", + "reth-rpc-types", "reth-stages-api", "reth-transaction-pool", "tokio", @@ -6159,6 +6162,7 @@ dependencies = [ "reth-beacon-consensus-core", "reth-blockchain-tree", "reth-config", + "reth-consensus", "reth-db", "reth-downloaders", "reth-engine-primitives", @@ -6191,8 +6195,8 @@ dependencies = [ name = "reth-beacon-consensus-core" version = "0.2.0-beta.6" dependencies = [ + "reth-consensus", "reth-consensus-common", - "reth-interfaces", "reth-primitives", ] @@ -6206,6 +6210,7 @@ dependencies = [ "lru", "metrics", "parking_lot 0.12.1", + "reth-consensus", "reth-db", "reth-interfaces", "reth-metrics", @@ -6274,11 +6279,21 @@ dependencies = [ "toml", ] +[[package]] +name = "reth-consensus" +version = "0.2.0-beta.6" +dependencies = [ + "auto_impl", + "reth-primitives", + "thiserror", +] + [[package]] name = "reth-consensus-common" version = "0.2.0-beta.6" dependencies = [ "mockall", + "reth-consensus", "reth-interfaces", "reth-primitives", "reth-provider", @@ -6405,6 +6420,7 @@ dependencies = [ "rand 0.8.5", "rayon", "reth-config", + "reth-consensus", "reth-db", "reth-interfaces", "reth-metrics", @@ -6649,10 +6665,10 @@ dependencies = [ "futures", "parking_lot 0.12.1", "rand 0.8.5", + "reth-consensus", "reth-eth-wire-types", "reth-network-api", "reth-primitives", - "reth-rpc-types", "secp256k1 0.27.0", "thiserror", "tokio", @@ -6781,6 +6797,7 @@ dependencies = [ "pin-project", "pprof", "rand 0.8.5", + "reth-consensus", "reth-discv4", "reth-discv5", "reth-dns-discovery", @@ -6875,6 +6892,7 @@ dependencies = [ "reth-beacon-consensus", "reth-blockchain-tree", "reth-config", + "reth-consensus", "reth-db", "reth-downloaders", "reth-exex", @@ -6993,12 +7011,12 @@ dependencies = [ "pin-project", "reth-beacon-consensus", "reth-db", - "reth-interfaces", "reth-network", "reth-network-api", "reth-primitives", "reth-provider", "reth-prune", + "reth-rpc-types", "reth-stages", "reth-static-file", "tokio", @@ -7173,6 +7191,7 @@ dependencies = [ "reth-metrics", "reth-nippy-jar", "reth-primitives", + "reth-rpc-types", "reth-trie", "revm", "strum 0.26.2", @@ -7422,6 +7441,7 @@ dependencies = [ "rayon", "reth-codecs", "reth-config", + "reth-consensus", "reth-db", "reth-downloaders", "reth-etl", @@ -7450,6 +7470,7 @@ dependencies = [ "auto_impl", "futures-util", "metrics", + "reth-consensus", "reth-db", "reth-interfaces", "reth-metrics", diff --git a/Cargo.toml b/Cargo.toml index 8cf53ef559456..1fb403e1ba1fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "crates/consensus/beacon/", "crates/consensus/beacon-core/", "crates/consensus/common/", + "crates/consensus/consensus/", "crates/ethereum-forks/", "crates/e2e-test-utils/", "crates/etl/", @@ -84,7 +85,7 @@ members = [ "examples/custom-inspector/", "examples/exex/minimal/", "examples/exex/op-bridge/", - "testing/ef-tests/", + "testing/ef-tests/" ] default-members = ["bin/reth"] @@ -207,6 +208,7 @@ reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } reth-config = { path = "crates/config" } +reth-consensus = { path = "crates/consensus/consensus" } reth-consensus-common = { path = "crates/consensus/common" } reth-db = { path = "crates/storage/db" } reth-discv4 = { path = "crates/net/discv4" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ea1ee87f01b2f..ff251546456d0 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -56,6 +56,7 @@ reth-node-optimism = { workspace = true, optional = true, features = [ reth-node-core.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true +reth-consensus.workspace = true # crypto alloy-rlp.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 566198ec8255f..7aaef00fa2741 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -18,8 +18,9 @@ use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; use reth_cli_runner::CliContext; +use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; -use reth_interfaces::{consensus::Consensus, RethResult}; +use reth_interfaces::RethResult; use reth_node_api::PayloadBuilderAttributes; #[cfg(not(feature = "optimism"))] use reth_node_ethereum::EthEvmConfig; diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 10f485a735802..2384a9af07580 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -14,16 +14,14 @@ use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::BeaconConsensus; use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; +use reth_consensus::Consensus; use reth_db::{database::Database, init_db, DatabaseEnv}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::{ - consensus::Consensus, - p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, -}; +use reth_interfaces::p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_core::init::init_genesis; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index ed8783e965149..d806306d4160a 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -14,9 +14,10 @@ use clap::Parser; use reth_beacon_consensus::BeaconConsensus; use reth_cli_runner::CliContext; use reth_config::Config; +use reth_consensus::Consensus; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; use reth_exex::ExExManagerHandle; -use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient}; +use reth_interfaces::p2p::full_block::FullBlockClient; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_node_ethereum::EthEvmConfig; diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 0ef866396f77a..841b9e3c67675 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -15,8 +15,8 @@ use reth_blockchain_tree::{ }; use reth_cli_runner::CliContext; use reth_config::Config; +use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; -use reth_interfaces::consensus::Consensus; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_node_core::engine_api_store::{EngineApiStore, StoredEngineApiMessage}; diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index dc31409245f62..4542f10be28a2 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -13,6 +13,7 @@ use eyre::Context; use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensus; use reth_config::{config::EtlConfig, Config}; +use reth_consensus::Consensus; use reth_db::{database::Database, init_db}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -20,12 +21,9 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::downloader::BodyDownloader, - headers::downloader::{HeaderDownloader, SyncTarget}, - }, +use reth_interfaces::p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_node_core::init::init_genesis; use reth_node_ethereum::EthEvmConfig; diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 7810a4416fb3e..a1fe7d8a07ccc 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -10,13 +10,13 @@ use crate::{ use clap::{Parser, Subcommand}; use reth_beacon_consensus::BeaconConsensus; use reth_config::{Config, PruneConfig}; +use reth_consensus::Consensus; use reth_db::{database::Database, open_db}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::consensus::Consensus; use reth_node_core::{ args::{get_secret_key, NetworkArgs}, dirs::ChainPath, diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 3a6ab1439e58f..1757b29391568 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -19,6 +19,7 @@ reth-provider.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-parallel = { workspace = true, features = ["parallel"] } +reth-consensus.workspace = true # common parking_lot.workspace = true diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 02bae76bb7b7c..799e7e3434f54 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -5,13 +5,13 @@ use crate::{ state::{BlockChainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, BundleStateData, TreeExternals, }; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, }, - consensus::{Consensus, ConsensusError}, executor::{BlockExecutionError, BlockValidationError}, provider::RootMismatch, RethResult, diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 2444cf24a901b..c091b800ab94a 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -5,13 +5,13 @@ use super::externals::TreeExternals; use crate::BundleStateDataRef; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, InsertBlockErrorKind}, BlockAttachment, BlockValidationKind, }, - consensus::{Consensus, ConsensusError}, RethResult, }; use reth_primitives::{ diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 5a288271e76e6..c3bda1ae21e9e 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -1,9 +1,10 @@ //! Blockchain tree externals. +use reth_consensus::Consensus; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; -use reth_interfaces::{consensus::Consensus, RethResult}; +use reth_interfaces::RethResult; use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; use reth_provider::{ProviderFactory, StatsReader}; use std::{collections::BTreeMap, sync::Arc}; diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 5fbf4f07a5e13..ec96426a40217 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -22,6 +22,8 @@ reth-revm.workspace = true reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true +reth-consensus.workspace = true +reth-rpc-types.workspace = true # async futures-util.workspace = true diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 62a2936645a28..f6de63979d3ad 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -16,12 +16,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_beacon_consensus::BeaconEngineMessage; +use reth_consensus::{Consensus, ConsensusError}; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; -use reth_interfaces::{ - consensus::{Consensus, ConsensusError}, - executor::{BlockExecutionError, BlockValidationError}, -}; +use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index e76b4333e8cf1..53bfc6356c3ec 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -3,9 +3,9 @@ use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; -use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::{Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders}; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; +use reth_rpc_types::engine::ForkchoiceState; use reth_stages_api::PipelineEvent; use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; use std::{ diff --git a/crates/consensus/beacon-core/Cargo.toml b/crates/consensus/beacon-core/Cargo.toml index 232631f73b601..b5c778b05ec5b 100644 --- a/crates/consensus/beacon-core/Cargo.toml +++ b/crates/consensus/beacon-core/Cargo.toml @@ -14,7 +14,7 @@ workspace = true # reth reth-consensus-common.workspace = true reth-primitives.workspace = true -reth-interfaces.workspace = true +reth-consensus.workspace = true [features] optimism = ["reth-primitives/optimism"] \ No newline at end of file diff --git a/crates/consensus/beacon-core/src/lib.rs b/crates/consensus/beacon-core/src/lib.rs index 599e010092202..c2a3df6e6814a 100644 --- a/crates/consensus/beacon-core/src/lib.rs +++ b/crates/consensus/beacon-core/src/lib.rs @@ -8,8 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use reth_consensus::{Consensus, ConsensusError}; use reth_consensus_common::validation; -use reth_interfaces::consensus::{Consensus, ConsensusError}; use reth_primitives::{ constants::MAXIMUM_EXTRA_DATA_SIZE, Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 439002ec5fa3b..0fed125978b62 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -57,6 +57,7 @@ reth-downloaders.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-config.workspace = true +reth-consensus.workspace = true assert_matches.workspace = true diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index 168130de790f0..d5cbdee468301 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,6 +1,6 @@ use crate::engine::forkchoice::ForkchoiceStatus; -use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::{SealedBlock, SealedHeader, B256}; +use reth_rpc_types::engine::ForkchoiceState; use std::{sync::Arc, time::Duration}; /// Events emitted by [crate::BeaconConsensusEngine]. diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 464dcedb295b7..f9f1a84d46f49 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -4,11 +4,11 @@ use crate::{ }; use futures::{future::Either, FutureExt}; use reth_engine_primitives::EngineTypes; -use reth_interfaces::{consensus::ForkchoiceState, RethResult}; +use reth_interfaces::RethResult; use reth_payload_builder::error::PayloadBuilderError; use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceUpdateError, - ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, + CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, + ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use std::{ future::Future, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 877e6f4501993..5e22a48605648 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -14,7 +14,6 @@ use reth_interfaces::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }, - consensus::ForkchoiceState, executor::BlockValidationError, p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, provider::ProviderResult, @@ -31,7 +30,8 @@ use reth_provider::{ StageCheckpointReader, }; use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, PayloadStatus, PayloadStatusEnum, PayloadValidationError, + CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, }; use reth_stages_api::{ControlFlow, Pipeline}; use reth_tasks::TaskSpawner; diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 329ea644666fc..297269975a49f 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -7,16 +7,15 @@ use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; -use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_evm_ethereum::EthEvmConfig; -type DatabaseEnv = TempDatabase; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_evm_ethereum::EthEvmConfig; use reth_interfaces::{ - consensus::Consensus, executor::BlockExecutionError, p2p::{bodies::client::BodiesClient, either::EitherDownloader, headers::client::HeadersClient}, sync::NoopSyncStateUpdater, @@ -40,6 +39,8 @@ use reth_tasks::TokioTaskExecutor; use std::{collections::VecDeque, sync::Arc}; use tokio::sync::{oneshot, watch}; +type DatabaseEnv = TempDatabase; + type TestBeaconConsensusEngine = BeaconConsensusEngine< Arc, BlockchainProvider< diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 4659dd3e1d694..5e5a6ef579146 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -15,6 +15,7 @@ workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true +reth-consensus.workspace=true [dev-dependencies] reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 1ab466c77ecf3..94906188206ff 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,6 +1,7 @@ //! Collection of methods for block validation. -use reth_interfaces::{consensus::ConsensusError, RethResult}; +use reth_consensus::ConsensusError; +use reth_interfaces::RethResult; use reth_primitives::{ constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, BlockNumber, ChainSpec, GotExpected, Hardfork, Header, InvalidTransactionError, SealedBlock, diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml new file mode 100644 index 0000000000000..6e4fc7ee9d8f9 --- /dev/null +++ b/crates/consensus/consensus/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "reth-consensus" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-primitives.workspace = true + +# misc +auto_impl.workspace = true +thiserror.workspace = true \ No newline at end of file diff --git a/crates/interfaces/src/consensus.rs b/crates/consensus/consensus/src/lib.rs similarity index 95% rename from crates/interfaces/src/consensus.rs rename to crates/consensus/consensus/src/lib.rs index b7d03b72ee049..ab2453b74a59e 100644 --- a/crates/interfaces/src/consensus.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -1,12 +1,19 @@ +//! Consensus protocol functions + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + use reth_primitives::{ BlockHash, BlockNumber, GotExpected, GotExpectedBoxed, Header, HeaderValidationError, InvalidTransactionError, SealedBlock, SealedHeader, B256, U256, }; use std::fmt::Debug; -/// Re-export fork choice state -pub use reth_rpc_types::engine::ForkchoiceState; - /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] pub trait Consensus: Debug + Send + Sync { diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 8f4aa494a0ea4..70ac2f94246e6 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -12,9 +12,9 @@ workspace = true [dependencies] reth-primitives.workspace = true -reth-rpc-types.workspace = true reth-network-api.workspace = true reth-eth-wire-types.workspace = true +reth-consensus.workspace = true # async futures.workspace = true diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index e08211a4fe603..34e01883513d6 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -1,11 +1,11 @@ //! Error handling for the blockchain tree use crate::{ - consensus::ConsensusError, executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, RethError, }; +use reth_consensus::ConsensusError; use reth_primitives::{BlockHash, BlockNumber, SealedBlock}; /// Various error cases that can occur when a block violates tree assumptions. diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index c49323595dceb..df307ae091f75 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -1,10 +1,10 @@ use crate::{ blockchain_tree::error::{BlockchainTreeError, CanonicalError}, - consensus::ConsensusError, db::DatabaseError, executor::BlockExecutionError, provider::ProviderError, }; +use reth_consensus::ConsensusError; use reth_network_api::NetworkError; use reth_primitives::fs::FsPathError; diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index b8cfb7b39a6d5..e60d4a621648a 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -12,9 +12,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// Consensus traits. -pub mod consensus; - /// Database error pub mod db; diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 3c4e351fc32d8..6d822f44c8c08 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -1,5 +1,6 @@ use super::headers::client::HeadersRequest; -use crate::{consensus::ConsensusError, db::DatabaseError, provider::ProviderError}; +use crate::{db::DatabaseError, provider::ProviderError}; +use reth_consensus::ConsensusError; use reth_network_api::ReputationChangeKind; use reth_primitives::{ BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, WithPeerId, B256, diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index 3ab8e7644090d..cb4c665438603 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -1,13 +1,11 @@ use super::headers::client::HeadersRequest; -use crate::{ - consensus::{Consensus, ConsensusError}, - p2p::{ - bodies::client::{BodiesClient, SingleBodyRequest}, - error::PeerRequestResult, - headers::client::{HeadersClient, SingleHeaderRequest}, - }, +use crate::p2p::{ + bodies::client::{BodiesClient, SingleBodyRequest}, + error::PeerRequestResult, + headers::client::{HeadersClient, SingleHeaderRequest}, }; use futures::Stream; +use reth_consensus::{Consensus, ConsensusError}; use reth_primitives::{ BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, WithPeerId, B256, }; diff --git a/crates/interfaces/src/p2p/headers/downloader.rs b/crates/interfaces/src/p2p/headers/downloader.rs index 9eea13aabf82d..500a1a1bc844b 100644 --- a/crates/interfaces/src/p2p/headers/downloader.rs +++ b/crates/interfaces/src/p2p/headers/downloader.rs @@ -1,11 +1,8 @@ use super::error::HeadersDownloaderResult; -use crate::{ - consensus::Consensus, - p2p::error::{DownloadError, DownloadResult}, -}; +use crate::p2p::error::{DownloadError, DownloadResult}; use futures::Stream; +use reth_consensus::Consensus; use reth_primitives::{BlockHashOrNumber, SealedHeader, B256}; - /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, diff --git a/crates/interfaces/src/p2p/headers/error.rs b/crates/interfaces/src/p2p/headers/error.rs index 12eab9548cc15..f586aaf742512 100644 --- a/crates/interfaces/src/p2p/headers/error.rs +++ b/crates/interfaces/src/p2p/headers/error.rs @@ -1,4 +1,4 @@ -use crate::consensus::ConsensusError; +use reth_consensus::ConsensusError; use reth_primitives::SealedHeader; use thiserror::Error; diff --git a/crates/interfaces/src/p2p/headers/mod.rs b/crates/interfaces/src/p2p/headers/mod.rs index 5746c1b2d93ae..56aabf9d67feb 100644 --- a/crates/interfaces/src/p2p/headers/mod.rs +++ b/crates/interfaces/src/p2p/headers/mod.rs @@ -6,7 +6,7 @@ pub mod client; /// A downloader that receives and verifies block headers, is generic /// over the Consensus and the HeadersClient being used. /// -/// [`Consensus`]: crate::consensus::Consensus +/// [`Consensus`]: reth_consensus::Consensus /// [`HeadersClient`]: client::HeadersClient pub mod downloader; diff --git a/crates/interfaces/src/p2p/mod.rs b/crates/interfaces/src/p2p/mod.rs index 8e4d7c84fb2d5..75f3a8fc4c5fb 100644 --- a/crates/interfaces/src/p2p/mod.rs +++ b/crates/interfaces/src/p2p/mod.rs @@ -14,7 +14,7 @@ pub mod full_block; /// of a Linear and a Parallel downloader generic over the [`Consensus`] and /// [`HeadersClient`]. /// -/// [`Consensus`]: crate::consensus::Consensus +/// [`Consensus`]: reth_consensus::Consensus /// [`HeadersClient`]: crate::p2p::headers::client::HeadersClient pub mod headers; diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index 8262d9ae033d1..c0da9ff16141e 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -1,19 +1,17 @@ //! Testing support for headers related interfaces. -use crate::{ - consensus::{self, Consensus, ConsensusError}, - p2p::{ - download::DownloadClient, - error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, - headers::{ - client::{HeadersClient, HeadersRequest}, - downloader::{HeaderDownloader, SyncTarget}, - error::HeadersDownloaderResult, - }, - priority::Priority, +use crate::p2p::{ + download::DownloadClient, + error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, + headers::{ + client::{HeadersClient, HeadersRequest}, + downloader::{HeaderDownloader, SyncTarget}, + error::HeadersDownloaderResult, }, + priority::Priority, }; use futures::{Future, FutureExt, Stream, StreamExt}; +use reth_consensus::{Consensus, ConsensusError}; use reth_primitives::{ Header, HeadersDirection, PeerId, SealedBlock, SealedHeader, WithPeerId, U256, }; @@ -274,7 +272,7 @@ impl TestConsensus { impl Consensus for TestConsensus { fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) + Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } @@ -286,7 +284,7 @@ impl Consensus for TestConsensus { _parent: &SealedHeader, ) -> Result<(), ConsensusError> { if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) + Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } @@ -298,15 +296,15 @@ impl Consensus for TestConsensus { _total_difficulty: U256, ) -> Result<(), ConsensusError> { if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) + Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_block(&self, _block: &SealedBlock) -> Result<(), consensus::ConsensusError> { + fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) + Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 7ae6db8e69521..b5de192f2dd4e 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives.workspace = true reth-tasks.workspace = true reth-provider.workspace = true reth-config.workspace = true +reth-consensus.workspace = true # async futures.workspace = true diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index d45c9b191ca3e..28b43359ee1d9 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -3,16 +3,14 @@ use crate::{bodies::task::TaskDownloader, metrics::BodyDownloaderMetrics}; use futures::Stream; use futures_util::StreamExt; use reth_config::BodiesConfig; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{ - client::BodiesClient, - downloader::{BodyDownloader, BodyDownloaderResult}, - response::BlockResponse, - }, - error::{DownloadError, DownloadResult}, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + bodies::{ + client::BodiesClient, + downloader::{BodyDownloader, BodyDownloaderResult}, + response::BlockResponse, }, + error::{DownloadError, DownloadResult}, }; use reth_primitives::{BlockNumber, SealedHeader}; use reth_provider::HeaderProvider; diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 0fc9635df3fd2..072e059a481c0 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -2,12 +2,10 @@ use super::request::BodiesRequestFuture; use crate::metrics::BodyDownloaderMetrics; use futures::{stream::FuturesUnordered, Stream}; use futures_util::StreamExt; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{client::BodiesClient, response::BlockResponse}, - error::DownloadResult, - }, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + bodies::{client::BodiesClient, response::BlockResponse}, + error::DownloadResult, }; use reth_primitives::{BlockNumber, SealedHeader}; use std::{ diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 302256ef4539b..1a87928d55a87 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -1,12 +1,10 @@ use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics}; use futures::{Future, FutureExt}; -use reth_interfaces::{ - consensus::{Consensus as ConsensusTrait, Consensus}, - p2p::{ - bodies::{client::BodiesClient, response::BlockResponse}, - error::{DownloadError, DownloadResult}, - priority::Priority, - }, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + bodies::{client::BodiesClient, response::BlockResponse}, + error::{DownloadError, DownloadResult}, + priority::Priority, }; use reth_primitives::{ BlockBody, GotExpected, PeerId, SealedBlock, SealedHeader, WithPeerId, B256, diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index a57e5e4864185..2d9bb3f96eeaf 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -42,8 +42,9 @@ impl TaskDownloader { /// # Example /// /// ``` + /// use reth_consensus::Consensus; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; - /// use reth_interfaces::{consensus::Consensus, p2p::bodies::client::BodiesClient}; + /// use reth_interfaces::p2p::bodies::client::BodiesClient; /// use reth_provider::HeaderProvider; /// use std::sync::Arc; /// diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 3af45c17259fc..8d2318507fb19 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -6,17 +6,15 @@ use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; use reth_config::config::HeadersConfig; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - error::{DownloadError, DownloadResult, PeerRequestResult}, - headers::{ - client::{HeadersClient, HeadersRequest}, - downloader::{validate_header_download, HeaderDownloader, SyncTarget}, - error::{HeadersDownloaderError, HeadersDownloaderResult}, - }, - priority::Priority, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + error::{DownloadError, DownloadResult, PeerRequestResult}, + headers::{ + client::{HeadersClient, HeadersRequest}, + downloader::{validate_header_download, HeaderDownloader, SyncTarget}, + error::{HeadersDownloaderError, HeadersDownloaderResult}, }, + priority::Priority, }; use reth_primitives::{ BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, PeerId, SealedHeader, diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 83f00d4f124f4..16597342b6c9c 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -44,7 +44,7 @@ impl TaskDownloader { /// # use std::sync::Arc; /// # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloader; /// # use reth_downloaders::headers::task::TaskDownloader; - /// # use reth_interfaces::consensus::Consensus; + /// # use reth_consensus::Consensus; /// # use reth_interfaces::p2p::headers::client::HeadersClient; /// # fn t(consensus:Arc, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 8e4c110bb501d..dbf7f5fa2b335 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -27,6 +27,7 @@ reth-transaction-pool.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true reth-tokio-util.workspace = true +reth-consensus.workspace = true # ethereum enr = { workspace = true, features = ["serde", "rust-secp256k1"] } diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index d127dab8fed73..738851f0a6123 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -50,7 +50,7 @@ pub enum BlockValidation { pub enum BlockImportError { /// Consensus error #[error(transparent)] - Consensus(#[from] reth_interfaces::consensus::ConsensusError), + Consensus(#[from] reth_consensus::ConsensusError), } /// An implementation of `BlockImport` used in Proof-of-Stake consensus that does nothing. diff --git a/crates/node-builder/Cargo.toml b/crates/node-builder/Cargo.toml index c245203ca0846..aae73f5a63f93 100644 --- a/crates/node-builder/Cargo.toml +++ b/crates/node-builder/Cargo.toml @@ -37,6 +37,7 @@ reth-stages.workspace = true reth-config.workspace = true reth-downloaders.workspace = true reth-node-events.workspace = true +reth-consensus.workspace = true ## async futures.workspace = true diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index c47478047f235..3d7d3a04462b9 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -22,6 +22,7 @@ use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::{ database::Database, database_metrics::{DatabaseMetadata, DatabaseMetrics}, @@ -29,7 +30,7 @@ use reth_db::{ DatabaseEnv, }; use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; -use reth_interfaces::{consensus::Consensus, p2p::either::EitherDownloader}; +use reth_interfaces::p2p::either::EitherDownloader; use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; use reth_node_api::{ FullNodeComponents, FullNodeComponentsAdapter, FullNodeTypes, FullNodeTypesAdapter, NodeTypes, diff --git a/crates/node-builder/src/setup.rs b/crates/node-builder/src/setup.rs index bb67cad661d30..03bf45893369b 100644 --- a/crates/node-builder/src/setup.rs +++ b/crates/node-builder/src/setup.rs @@ -2,18 +2,16 @@ use crate::ConfigureEvm; use reth_config::{config::StageConfig, PruneConfig}; +use reth_consensus::Consensus; use reth_db::database::Database; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{client::BodiesClient, downloader::BodyDownloader}, - headers::{client::HeadersClient, downloader::HeaderDownloader}, - }, +use reth_interfaces::p2p::{ + bodies::{client::BodiesClient, downloader::BodyDownloader}, + headers::{client::HeadersClient, downloader::HeaderDownloader}, }; use reth_node_core::{ node_config::NodeConfig, diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 9c66f146931c9..83f2bd13aec03 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -18,9 +18,9 @@ reth-network-api.workspace = true reth-stages.workspace = true reth-prune.workspace = true reth-static-file.workspace = true -reth-interfaces.workspace = true reth-db.workspace = true reth-primitives.workspace = true +reth-rpc-types.workspace = true # async tokio.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index b18cc5f0b6710..32bf66e1b795d 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -6,7 +6,6 @@ use reth_beacon_consensus::{ BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, }; use reth_db::{database::Database, database_metrics::DatabaseMetadata}; -use reth_interfaces::consensus::ForkchoiceState; use reth_network::{NetworkEvent, NetworkHandle}; use reth_network_api::PeersInfo; use reth_primitives::{ @@ -15,6 +14,7 @@ use reth_primitives::{ BlockNumber, B256, }; use reth_prune::PrunerEvent; +use reth_rpc_types::engine::ForkchoiceState; use reth_stages::{ExecOutput, PipelineEvent}; use reth_static_file::StaticFileProducerEvent; use std::{ diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 2713639634e84..b7e6eeccb6897 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-interfaces.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true reth-rpc-api.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 7fc52b21c1478..01db0bb115967 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -6,15 +6,14 @@ use reth_engine_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, EngineTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_interfaces::consensus::ForkchoiceState; use reth_payload_builder::PayloadStore; use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hardfork, B256, U64}; use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceUpdated, PayloadId, PayloadStatus, - TransitionConfiguration, CAPABILITIES, + ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, + PayloadStatus, TransitionConfiguration, CAPABILITIES, }; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml index d991a47af55f4..d1e31ba7828fb 100644 --- a/crates/stages-api/Cargo.toml +++ b/crates/stages-api/Cargo.toml @@ -18,6 +18,7 @@ reth-db.workspace = true reth-interfaces.workspace = true reth-static-file.workspace = true reth-tokio-util.workspace = true +reth-consensus.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/stages-api/src/error.rs b/crates/stages-api/src/error.rs index 3b744e7cbe2c6..3d7ae1d72d022 100644 --- a/crates/stages-api/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -1,5 +1,6 @@ +use reth_consensus::ConsensusError; use reth_interfaces::{ - consensus, db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, + db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, }; use reth_primitives::{BlockNumber, SealedHeader, StaticFileSegment, TxNumber}; use reth_provider::ProviderError; @@ -13,7 +14,7 @@ use tokio::sync::mpsc::error::SendError; pub enum BlockErrorKind { /// The block encountered a validation error. #[error("validation error: {0}")] - Validation(#[from] consensus::ConsensusError), + Validation(#[from] ConsensusError), /// The block encountered an execution error. #[error("execution error: {0}")] Execution(#[from] executor::BlockExecutionError), @@ -49,7 +50,7 @@ pub enum StageError { header: Box, /// The error that occurred when attempting to attach the header. #[source] - error: Box, + error: Box, }, /// The headers stage is missing sync gap. #[error("missing sync gap")] diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index bb15129589521..1b455a9395c25 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -552,8 +552,8 @@ mod tests { use super::*; use crate::{test_utils::TestStage, UnwindOutput}; use assert_matches::assert_matches; + use reth_consensus::ConsensusError; use reth_interfaces::{ - consensus, provider::ProviderError, test_utils::{generators, generators::random_header}, }; @@ -922,9 +922,7 @@ mod tests { 5, Default::default(), )), - error: BlockErrorKind::Validation( - consensus::ConsensusError::BaseFeeMissing, - ), + error: BlockErrorKind::Validation(ConsensusError::BaseFeeMissing), })) .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(0) })) .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index df98d1dd7a5f8..3c4a3d5a104a5 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -23,6 +23,7 @@ reth-trie = { workspace = true, features = ["metrics"] } reth-etl.workspace = true reth-config.workspace = true reth-stages-api = { workspace = true, features = ["test-utils"] } +reth-consensus.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index f8e427763c09b..cf2b8acbe8221 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -15,7 +15,6 @@ //! # use std::sync::Arc; //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; -//! # use reth_interfaces::consensus::Consensus; //! # use reth_interfaces::test_utils::{TestBodiesClient, TestConsensus, TestHeadersClient}; //! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PeerId, MAINNET, B256, PruneModes}; @@ -28,6 +27,7 @@ //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; +//! # use reth_consensus::Consensus; //! # //! # let chain_spec = MAINNET.clone(); //! # let consensus: Arc = Arc::new(TestConsensus::default()); diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 833f2af8eef03..4f04e9b10f208 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -58,10 +58,10 @@ use crate::{ StageSet, StageSetBuilder, }; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::database::Database; -use reth_interfaces::{ - consensus::Consensus, - p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}, +use reth_interfaces::p2p::{ + bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, }; use reth_provider::{ExecutorFactory, HeaderSyncGapProvider, HeaderSyncMode}; use std::sync::Arc; diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index a862d4afcb396..f90149e1a81c6 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -1,6 +1,7 @@ use futures_util::StreamExt; use reth_codecs::Compact; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, @@ -10,7 +11,6 @@ use reth_db::{ }; use reth_etl::Collector; use reth_interfaces::{ - consensus::Consensus, p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}, provider::ProviderError, }; diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 9b4eec87f8db7..bfdb9782b2deb 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -1,10 +1,10 @@ use reth_codecs::Compact; +use reth_consensus::ConsensusError; use reth_db::{ database::Database, tables, transaction::{DbTx, DbTxMut}, }; -use reth_interfaces::consensus; use reth_primitives::{ stage::{EntitiesCheckpoint, MerkleCheckpoint, StageCheckpoint, StageId}, trie::StoredSubNode, @@ -327,7 +327,7 @@ fn validate_state_root( } else { warn!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root"); Err(StageError::Block { - error: BlockErrorKind::Validation(consensus::ConsensusError::BodyStateRootDiff( + error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( GotExpected { got, expected: expected.state_root }.into(), )), block: Box::new(expected), diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 04a30cb2e7e5e..5ddb2dfc08424 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -1,3 +1,4 @@ +use reth_consensus::ConsensusError; use reth_db::{ cursor::DbCursorRW, database::Database, @@ -6,7 +7,6 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, RawValue, }; -use reth_interfaces::consensus; use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, Address, PruneSegment, StaticFileSegment, TransactionSignedNoHash, TxNumber, @@ -209,7 +209,7 @@ fn recover_range( Err(StageError::Block { block: Box::new(sealed_header), error: BlockErrorKind::Validation( - consensus::ConsensusError::TransactionSignerRecoveryError, + ConsensusError::TransactionSignerRecoveryError, ), }) } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 6f9305e88b185..526cbdaa5a93f 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-interfaces.workspace = true +reth-rpc-types.workspace = true reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index f696c86d7bf6d..b3011a9b02cfd 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -17,7 +17,6 @@ use reth_interfaces::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }, - consensus::ForkchoiceState, provider::ProviderResult, RethResult, }; @@ -61,6 +60,7 @@ use chain_info::ChainInfoTracker; mod consistent_view; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; +use reth_rpc_types::engine::ForkchoiceState; /// The main type for interacting with the blockchain. /// diff --git a/crates/storage/provider/src/traits/chain_info.rs b/crates/storage/provider/src/traits/chain_info.rs index 82d879df40ca5..5e6379f01c479 100644 --- a/crates/storage/provider/src/traits/chain_info.rs +++ b/crates/storage/provider/src/traits/chain_info.rs @@ -1,5 +1,5 @@ -use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::SealedHeader; +use reth_rpc_types::engine::ForkchoiceState; use std::time::Instant; /// A type that can track updates related to fork choice updates. From eabd0220c6a27e0b3ad3292a7c9534ee4ea2c365 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 23 Apr 2024 14:24:58 +0200 Subject: [PATCH 285/700] feat: support short issue links in label workflow (#7814) --- .github/scripts/label_pr.js | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/.github/scripts/label_pr.js b/.github/scripts/label_pr.js index c01f4c98aac1a..16ace2db03270 100644 --- a/.github/scripts/label_pr.js +++ b/.github/scripts/label_pr.js @@ -8,6 +8,20 @@ function shouldIncludeLabel (label) { return !isStatus && !isTrackingIssue && !isPreventStale && !isDifficulty; } +// Get the issue number from an issue link in the forms ` ` or ` #`. +function getIssueLink (repoUrl, body) { + const urlPattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) ${repoUrl}/issues/(?\\d+)`, 'i') + const issuePattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) \#(?\\d+)`, 'i') + + const urlRe = body.match(urlPattern); + const issueRe = body.match(issuePattern); + if (urlRe?.groups?.issue_number) { + return urlRe.groups.issue_number + } else { + return issueRe?.groups?.issue_number + } +} + module.exports = async ({ github, context }) => { try { const prNumber = context.payload.pull_request.number; @@ -15,11 +29,7 @@ module.exports = async ({ github, context }) => { const repo = context.repo; const repoUrl = context.payload.repository.html_url; - const pattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) ${repoUrl}/issues/(?\\d+)`, 'i') - - const re = prBody.match(pattern); - const issueNumber = re?.groups?.issue_number; - + const issueNumber = getIssueLink(repoUrl, prBody); if (!issueNumber) { console.log('No issue reference found in PR description.'); return; From 672e4c512cdd99b6fcfb8dec9940a5150d5b16a7 Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 23 Apr 2024 20:26:57 +0800 Subject: [PATCH 286/700] chore(bin/stage): add explicit doc of the use of to-block and hashing stage (#7805) Signed-off-by: jsvisa Co-authored-by: Oliver Nordbjerg --- bin/reth/src/commands/stage/unwind.rs | 9 +++++++-- crates/node-core/src/args/stage_args.rs | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index a1fe7d8a07ccc..2682683d4ceef 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -236,10 +236,12 @@ impl Command { /// `reth stage unwind` subcommand #[derive(Subcommand, Debug, Eq, PartialEq)] enum Subcommands { - /// Unwinds the database until the given block number (range is inclusive). + /// Unwinds the database from the latest block, until the given block number or hash has been + /// reached, that block is not included. #[command(name = "to-block")] ToBlock { target: BlockHashOrNumber }, - /// Unwinds the given number of blocks from the database. + /// Unwinds the database from the latest block, until the given number of blocks have been + /// reached. #[command(name = "num-blocks")] NumBlocks { amount: u64 }, } @@ -263,6 +265,9 @@ impl Subcommands { }, Subcommands::NumBlocks { amount } => last.saturating_sub(*amount), } + 1; + if target > last { + eyre::bail!("Target block number is higher than the latest block number") + } Ok(target..=last) } } diff --git a/crates/node-core/src/args/stage_args.rs b/crates/node-core/src/args/stage_args.rs index d90eabcfc1d05..337f5a4a60b11 100644 --- a/crates/node-core/src/args/stage_args.rs +++ b/crates/node-core/src/args/stage_args.rs @@ -30,11 +30,11 @@ pub enum StageEnum { /// /// Manages operations related to hashing storage data. StorageHashing, - /// The hashing stage within the pipeline. + /// The account and storage hashing stages within the pipeline. /// /// Covers general data hashing operations. Hashing, - /// The Merkle stage within the pipeline. + /// The merkle stage within the pipeline. /// /// Handles Merkle tree-related computations and data processing. Merkle, From 9fd35f948c65325447d1d7dced207842d2f2c782 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 23 Apr 2024 14:42:51 +0200 Subject: [PATCH 287/700] chore: move TestConensus to reth-consensus (#7813) --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/consensus/beacon/Cargo.toml | 2 +- .../consensus/beacon/src/engine/test_utils.rs | 4 +- crates/consensus/consensus/Cargo.toml | 5 +- crates/consensus/consensus/src/lib.rs | 4 + crates/consensus/consensus/src/test_utils.rs | 70 +++++++++++++ crates/interfaces/Cargo.toml | 4 +- crates/interfaces/src/p2p/full_block.rs | 2 +- crates/interfaces/src/test_utils/headers.rs | 97 +++---------------- crates/net/downloaders/Cargo.toml | 3 +- crates/net/downloaders/src/bodies/bodies.rs | 3 +- crates/net/downloaders/src/bodies/request.rs | 3 +- crates/net/downloaders/src/bodies/task.rs | 3 +- crates/net/downloaders/src/file_client.rs | 10 +- .../src/headers/reverse_headers.rs | 3 +- crates/net/downloaders/src/headers/task.rs | 3 +- crates/stages/Cargo.toml | 1 + crates/stages/src/lib.rs | 3 +- crates/stages/src/stages/headers.rs | 4 +- 19 files changed, 122 insertions(+), 104 deletions(-) create mode 100644 crates/consensus/consensus/src/test_utils.rs diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 799e7e3434f54..08f588cd2d595 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1259,8 +1259,8 @@ mod tests { use super::*; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; + use reth_consensus::test_utils::TestConsensus; use reth_db::{tables, test_utils::TempDatabase, transaction::DbTxMut, DatabaseEnv}; - use reth_interfaces::test_utils::TestConsensus; use reth_node_ethereum::EthEvmConfig; #[cfg(not(feature = "optimism"))] use reth_primitives::proofs::calculate_receipt_root; diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 0fed125978b62..9a78414479755 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -45,6 +45,7 @@ schnellru.workspace = true # reth reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["test-utils"] } +reth-consensus = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } reth-blockchain-tree = { workspace = true, features = ["test-utils"] } @@ -57,7 +58,6 @@ reth-downloaders.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-config.workspace = true -reth-consensus.workspace = true assert_matches.workspace = true diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 297269975a49f..ff36e871b0283 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -7,7 +7,7 @@ use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; use reth_config::config::EtlConfig; -use reth_consensus::Consensus; +use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -19,7 +19,7 @@ use reth_interfaces::{ executor::BlockExecutionError, p2p::{bodies::client::BodiesClient, either::EitherDownloader, headers::client::HeadersClient}, sync::NoopSyncStateUpdater, - test_utils::{NoopFullBlockClient, TestConsensus}, + test_utils::NoopFullBlockClient, }; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{BlockNumber, ChainSpec, FinishedExExHeight, PruneModes, B256}; diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 6e4fc7ee9d8f9..308a16f2026ec 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -15,4 +15,7 @@ reth-primitives.workspace = true # misc auto_impl.workspace = true -thiserror.workspace = true \ No newline at end of file +thiserror.workspace = true + +[features] +test-utils = [] \ No newline at end of file diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index ab2453b74a59e..b434272a414f0 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -14,6 +14,10 @@ use reth_primitives::{ }; use std::fmt::Debug; +#[cfg(any(test, feature = "test-utils"))] +/// test helpers for mocking consensus +pub mod test_utils; + /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] pub trait Consensus: Debug + Send + Sync { diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs new file mode 100644 index 0000000000000..a8655661b8c8b --- /dev/null +++ b/crates/consensus/consensus/src/test_utils.rs @@ -0,0 +1,70 @@ +use crate::{Consensus, ConsensusError}; +use reth_primitives::{Header, SealedBlock, SealedHeader, U256}; +use std::sync::atomic::{AtomicBool, Ordering}; + +/// Consensus engine implementation for testing +#[derive(Debug)] +pub struct TestConsensus { + /// Flag whether the header validation should purposefully fail + fail_validation: AtomicBool, +} + +impl Default for TestConsensus { + fn default() -> Self { + Self { fail_validation: AtomicBool::new(false) } + } +} + +impl TestConsensus { + /// Get the failed validation flag. + pub fn fail_validation(&self) -> bool { + self.fail_validation.load(Ordering::SeqCst) + } + + /// Update the validation flag. + pub fn set_fail_validation(&self, val: bool) { + self.fail_validation.store(val, Ordering::SeqCst) + } +} + +impl Consensus for TestConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_header_against_parent( + &self, + _header: &SealedHeader, + _parent: &SealedHeader, + ) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_header_with_total_difficulty( + &self, + _header: &Header, + _total_difficulty: U256, + ) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } +} diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 70ac2f94246e6..6c066593b1004 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -34,12 +34,14 @@ parking_lot = { workspace = true, optional = true } rand = { workspace = true, optional = true } [dev-dependencies] +reth-consensus = { workspace = true, features = ["test-utils"] } + parking_lot.workspace = true rand.workspace = true tokio = { workspace = true, features = ["full"] } secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] -test-utils = ["secp256k1", "rand", "parking_lot"] +test-utils = ["reth-consensus/test-utils", "secp256k1", "rand", "parking_lot"] cli = ["clap"] optimism = ["reth-eth-wire-types/optimism"] diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index cb4c665438603..6cf3f2c81e064 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -36,7 +36,7 @@ impl FullBlockClient { /// Returns a client with Test consensus #[cfg(any(test, feature = "test-utils"))] pub fn test_client(client: Client) -> Self { - Self::new(client, Arc::new(crate::test_utils::TestConsensus::default())) + Self::new(client, Arc::new(reth_consensus::test_utils::TestConsensus::default())) } } diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index c0da9ff16141e..304f394c896e6 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -1,5 +1,18 @@ //! Testing support for headers related interfaces. +use std::{ + fmt, + pin::Pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + task::{ready, Context, Poll}, +}; + +use futures::{Future, FutureExt, Stream, StreamExt}; +use tokio::sync::Mutex; + use crate::p2p::{ download::DownloadClient, error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, @@ -10,21 +23,8 @@ use crate::p2p::{ }, priority::Priority, }; -use futures::{Future, FutureExt, Stream, StreamExt}; -use reth_consensus::{Consensus, ConsensusError}; -use reth_primitives::{ - Header, HeadersDirection, PeerId, SealedBlock, SealedHeader, WithPeerId, U256, -}; -use std::{ - fmt, - pin::Pin, - sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, - }, - task::{ready, Context, Poll}, -}; -use tokio::sync::Mutex; +use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_primitives::{Header, HeadersDirection, PeerId, SealedHeader, WithPeerId}; /// A test downloader which just returns the values that have been pushed to it. #[derive(Debug)] @@ -243,70 +243,3 @@ impl HeadersClient for TestHeadersClient { }) } } - -/// Consensus engine implementation for testing -#[derive(Debug)] -pub struct TestConsensus { - /// Flag whether the header validation should purposefully fail - fail_validation: AtomicBool, -} - -impl Default for TestConsensus { - fn default() -> Self { - Self { fail_validation: AtomicBool::new(false) } - } -} - -impl TestConsensus { - /// Get the failed validation flag. - pub fn fail_validation(&self) -> bool { - self.fail_validation.load(Ordering::SeqCst) - } - - /// Update the validation flag. - pub fn set_fail_validation(&self, val: bool) { - self.fail_validation.store(val, Ordering::SeqCst) - } -} - -impl Consensus for TestConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } - - fn validate_header_against_parent( - &self, - _header: &SealedHeader, - _parent: &SealedHeader, - ) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } - - fn validate_header_with_total_difficulty( - &self, - _header: &Header, - _total_difficulty: U256, - ) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } - - fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } -} diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index b5de192f2dd4e..f1f14c85c01a4 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -45,6 +45,7 @@ itertools.workspace = true [dev-dependencies] reth-db = { workspace = true, features = ["test-utils"] } +reth-consensus = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true @@ -58,5 +59,5 @@ rand.workspace = true tempfile.workspace = true [features] -test-utils = ["dep:tempfile", "reth-db/test-utils", "reth-interfaces/test-utils"] +test-utils = ["dep:tempfile", "reth-db/test-utils", "reth-consensus/test-utils", "reth-interfaces/test-utils"] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 28b43359ee1d9..985c545e9b396 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -604,8 +604,9 @@ mod tests { test_utils::{generate_bodies, TestBodiesClient}, }; use assert_matches::assert_matches; + use reth_consensus::test_utils::TestConsensus; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; - use reth_interfaces::test_utils::{generators, generators::random_block_range, TestConsensus}; + use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{BlockBody, B256, MAINNET}; use reth_provider::ProviderFactory; use std::collections::HashMap; diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 1a87928d55a87..d6da2444c49db 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -250,7 +250,8 @@ mod tests { bodies::test_utils::zip_blocks, test_utils::{generate_bodies, TestBodiesClient}, }; - use reth_interfaces::test_utils::{generators, generators::random_header_range, TestConsensus}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::test_utils::{generators, generators::random_header_range}; /// Check if future returns empty bodies without dispatching any requests. #[tokio::test] diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 2d9bb3f96eeaf..f8815bcb05791 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -170,7 +170,8 @@ mod tests { test_utils::{generate_bodies, TestBodiesClient}, }; use assert_matches::assert_matches; - use reth_interfaces::{p2p::error::DownloadError, test_utils::TestConsensus}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::p2p::error::DownloadError; use reth_provider::test_utils::create_test_provider_factory; use std::sync::Arc; diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 7d29cc577fe14..b5b7aceae5c75 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -434,12 +434,10 @@ mod tests { use assert_matches::assert_matches; use futures_util::stream::StreamExt; use rand::Rng; - use reth_interfaces::{ - p2p::{ - bodies::downloader::BodyDownloader, - headers::downloader::{HeaderDownloader, SyncTarget}, - }, - test_utils::TestConsensus, + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_provider::test_utils::create_test_provider_factory; use std::{mem, sync::Arc}; diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 8d2318507fb19..5c12a161a00a3 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -1223,7 +1223,8 @@ mod tests { use crate::headers::test_utils::child_header; use assert_matches::assert_matches; - use reth_interfaces::test_utils::{TestConsensus, TestHeadersClient}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::test_utils::TestHeadersClient; /// Tests that `replace_number` works the same way as Option::replace #[test] diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 16597342b6c9c..aa079dad26235 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -183,7 +183,8 @@ mod tests { use crate::headers::{ reverse_headers::ReverseHeadersDownloaderBuilder, test_utils::child_header, }; - use reth_interfaces::test_utils::{TestConsensus, TestHeadersClient}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::test_utils::TestHeadersClient; use std::sync::Arc; #[tokio::test(flavor = "multi_thread")] diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 3c4a3d5a104a5..2692c94103d05 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -45,6 +45,7 @@ reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils", "mdbx"] } reth-evm-ethereum.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } +reth-consensus = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true reth-revm.workspace = true reth-static-file.workspace = true diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index cf2b8acbe8221..4b6df93913306 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -15,7 +15,7 @@ //! # use std::sync::Arc; //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; -//! # use reth_interfaces::test_utils::{TestBodiesClient, TestConsensus, TestHeadersClient}; +//! # use reth_interfaces::test_utils::{TestBodiesClient, TestHeadersClient}; //! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PeerId, MAINNET, B256, PruneModes}; //! # use reth_stages::Pipeline; @@ -28,6 +28,7 @@ //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; //! # use reth_consensus::Consensus; +//! # use reth_consensus::test_utils::TestConsensus; //! # //! # let chain_spec = MAINNET.clone(); //! # let consensus: Arc = Arc::new(TestConsensus::default()); diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index f90149e1a81c6..83dd710d0fd9c 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -371,13 +371,13 @@ mod tests { mod test_runner { use super::*; use crate::test_utils::{TestRunnerError, TestStageDB}; + use reth_consensus::test_utils::TestConsensus; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_downloaders::headers::reverse_headers::{ ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; use reth_interfaces::test_utils::{ - generators, generators::random_header_range, TestConsensus, TestHeaderDownloader, - TestHeadersClient, + generators, generators::random_header_range, TestHeaderDownloader, TestHeadersClient, }; use reth_provider::BlockNumReader; use tokio::sync::watch; From b3db4cf56d3488cdad3f655c725ccf747aa939bd Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 23 Apr 2024 15:30:38 +0100 Subject: [PATCH 288/700] feat: add UX improvements on e2e testing (#7804) --- Cargo.lock | 5 + crates/consensus/beacon/Cargo.toml | 3 + crates/e2e-test-utils/Cargo.toml | 3 + crates/e2e-test-utils/src/engine_api.rs | 28 ++++- crates/e2e-test-utils/src/lib.rs | 94 ++++++++++++++++ crates/e2e-test-utils/src/node.rs | 121 ++++++++++++++++----- crates/e2e-test-utils/src/payload.rs | 2 +- crates/e2e-test-utils/src/wallet.rs | 9 +- crates/node-ethereum/tests/e2e/dev.rs | 38 ++----- crates/node-ethereum/tests/e2e/eth.rs | 48 +++----- crates/node-ethereum/tests/e2e/p2p.rs | 77 ++++--------- crates/node-ethereum/tests/e2e/utils.rs | 5 + crates/optimism/node/Cargo.toml | 3 +- crates/optimism/node/tests/e2e/p2p.rs | 82 +++----------- crates/optimism/node/tests/e2e/utils.rs | 44 +++++++- crates/storage/provider/src/traits/full.rs | 4 +- 16 files changed, 344 insertions(+), 222 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6802524cdc1cf..3bd9ff743a19f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6176,6 +6176,7 @@ dependencies = [ "reth-provider", "reth-prune", "reth-revm", + "reth-rpc", "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", @@ -6451,16 +6452,19 @@ dependencies = [ "rand 0.8.5", "reth", "reth-db", + "reth-node-builder", "reth-node-core", "reth-node-ethereum", "reth-payload-builder", "reth-primitives", + "reth-provider", "reth-rpc", "reth-tracing", "secp256k1 0.27.0", "serde_json", "tokio", "tokio-stream", + "tracing", ] [[package]] @@ -7039,6 +7043,7 @@ dependencies = [ "reqwest 0.11.27", "reth", "reth-basic-payload-builder", + "reth-beacon-consensus", "reth-db", "reth-e2e-test-utils", "reth-evm", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 9a78414479755..38dd772af2cb2 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -52,6 +52,7 @@ reth-blockchain-tree = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true +reth-rpc.workspace = true reth-tracing.workspace = true reth-revm.workspace = true reth-downloaders.workspace = true @@ -68,4 +69,6 @@ optimism = [ "reth-provider/optimism", "reth-blockchain-tree/optimism", "reth-beacon-consensus-core/optimism", + "reth-revm/optimism", + "reth-rpc/optimism" ] diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index f32ff029c2ed9..96b4ca2e68cf3 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -17,6 +17,8 @@ reth-tracing.workspace = true reth-db.workspace = true reth-rpc.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-provider.workspace = true +reth-node-builder.workspace = true jsonrpsee.workspace = true @@ -32,3 +34,4 @@ alloy-signer-wallet = { workspace = true, features = ["mnemonic"] } alloy-rpc-types.workspace = true alloy-network.workspace = true alloy-consensus.workspace = true +tracing.workspace = true \ No newline at end of file diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index ec8b058a30077..fe05b0b6893f9 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -3,7 +3,10 @@ use jsonrpsee::http_client::HttpClient; use reth::{ api::{EngineTypes, PayloadBuilderAttributes}, providers::CanonStateNotificationStream, - rpc::{api::EngineApiClient, types::engine::ForkchoiceState}, + rpc::{ + api::EngineApiClient, + types::engine::{ForkchoiceState, PayloadStatusEnum}, + }, }; use reth_payload_builder::PayloadId; use reth_primitives::B256; @@ -30,6 +33,7 @@ impl EngineApiHelper { &self, payload: E::BuiltPayload, payload_builder_attributes: E::PayloadBuilderAttributes, + expected_status: PayloadStatusEnum, ) -> eyre::Result where E::ExecutionPayloadV3: From + PayloadEnvelopeExt, @@ -45,8 +49,10 @@ impl EngineApiHelper { payload_builder_attributes.parent_beacon_block_root().unwrap(), ) .await?; - assert!(submission.is_valid(), "{}", submission); - Ok(submission.latest_valid_hash.unwrap()) + + assert!(submission.status == expected_status); + + Ok(submission.latest_valid_hash.unwrap_or_default()) } /// Sends forkchoice update to the engine api @@ -64,4 +70,20 @@ impl EngineApiHelper { Ok(()) } + + /// Sends forkchoice update to the engine api with a zero finalized hash + pub async fn update_optimistic_forkchoice(&self, hash: B256) -> eyre::Result<()> { + EngineApiClient::::fork_choice_updated_v2( + &self.engine_api_client, + ForkchoiceState { + head_block_hash: hash, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }, + None, + ) + .await?; + + Ok(()) + } } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 016fb4d3e21b0..043d1e0c66457 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,3 +1,22 @@ +use node::NodeHelper; +use reth::{ + args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, + blockchain_tree::ShareableBlockchainTree, + builder::{NodeBuilder, NodeConfig, NodeHandle}, + revm::EvmProcessorFactory, + tasks::TaskManager, +}; +use reth_db::{test_utils::TempDatabase, DatabaseEnv}; +use reth_node_builder::{ + components::{NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, + FullNodeComponentsAdapter, FullNodeTypesAdapter, NodeTypes, +}; +use reth_primitives::ChainSpec; +use reth_provider::providers::BlockchainProvider; +use std::sync::Arc; +use tracing::{span, Level}; +use wallet::Wallet; + /// Wrapper type to create test nodes pub mod node; @@ -15,3 +34,78 @@ mod engine_api; /// Helper traits mod traits; + +/// Creates the initial setup with `num_nodes` started and interconnected. +pub async fn setup( + num_nodes: usize, + chain_spec: Arc, + is_dev: bool, +) -> eyre::Result<(Vec>, TaskManager, Wallet)> +where + N: Default + reth_node_builder::Node>, + N::PoolBuilder: PoolBuilder>, + N::NetworkBuilder: NetworkBuilder, TmpPool>, + N::PayloadBuilder: PayloadServiceBuilder, TmpPool>, +{ + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Create nodes and peer them + let mut nodes: Vec> = Vec::with_capacity(num_nodes); + + for idx in 0..num_nodes { + let mut node_config = NodeConfig::test() + .with_chain(chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + + if is_dev { + node_config = node_config.dev(); + } + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(Default::default()) + .launch() + .await?; + + let mut node = NodeHelper::new(node).await?; + + // Connect each node in a chain. + if let Some(previous_node) = nodes.last_mut() { + previous_node.connect(&mut node).await; + } + + // Connect last node with the first if there are more than two + if idx + 1 == num_nodes && num_nodes > 2 { + if let Some(first_node) = nodes.first_mut() { + node.connect(first_node).await; + } + } + + nodes.push(node); + } + + Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) +} + +// Type aliases + +type TmpDB = Arc>; +type EvmType = EvmProcessorFactory<::Evm>; +type RethProvider = BlockchainProvider>>; +type TmpPool = <>>::PoolBuilder as PoolBuilder< + TmpNodeAdapter, +>>::Pool; +type TmpNodeAdapter = FullNodeTypesAdapter>; + +/// Type alias for a type of NodeHelper +pub type NodeHelperType = NodeHelper, TmpPool>>; diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index d88a428f05a10..18d147fd913a8 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -4,21 +4,18 @@ use crate::{ }; use alloy_rpc_types::BlockNumberOrTag; use eyre::Ok; +use futures_util::Future; use reth::{ api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, builder::FullNode, - providers::{BlockReaderIdExt, CanonStateSubscriptions}, + providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, rpc::{ eth::{error::EthResult, EthTransactions}, - types::engine::PayloadAttributes, + types::engine::PayloadStatusEnum, }, }; -use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, BlockNumber, Bytes, B256}; -use std::{ - marker::PhantomData, - time::{SystemTime, UNIX_EPOCH}, -}; +use reth_primitives::{stage::StageId, BlockHash, BlockNumber, Bytes, B256}; +use std::{marker::PhantomData, pin::Pin}; use tokio_stream::StreamExt; /// An helper struct to handle node actions @@ -27,7 +24,7 @@ where Node: FullNodeComponents, { pub inner: FullNode, - payload: PayloadHelper, + pub payload: PayloadHelper, pub network: NetworkHelper, pub engine_api: EngineApiHelper, } @@ -52,12 +49,53 @@ where }) } - /// Advances the node forward + pub async fn connect(&mut self, node: &mut NodeHelper) { + self.network.add_peer(node.network.record()).await; + node.network.add_peer(self.network.record()).await; + node.network.expect_session().await; + self.network.expect_session().await; + } + + /// Advances the chain `length` blocks. + /// + /// Returns the added chain as a Vec of block hashes. pub async fn advance( + &mut self, + length: u64, + tx_generator: impl Fn() -> Pin>>, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes + + Copy, + ) -> eyre::Result< + Vec<( + ::BuiltPayload, + ::PayloadBuilderAttributes, + )>, + > + where + ::ExecutionPayloadV3: + From<::BuiltPayload> + PayloadEnvelopeExt, + { + let mut chain = Vec::with_capacity(length as usize); + for _ in 0..length { + let (payload, _) = + self.advance_block(tx_generator().await, attributes_generator).await?; + chain.push(payload); + } + Ok(chain) + } + + /// Advances the node forward one block + pub async fn advance_block( &mut self, raw_tx: Bytes, attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, - ) -> eyre::Result<(B256, B256)> + ) -> eyre::Result<( + ( + ::BuiltPayload, + ::PayloadBuilderAttributes, + ), + B256, + )> where ::ExecutionPayloadV3: From<::BuiltPayload> + PayloadEnvelopeExt, @@ -81,15 +119,54 @@ where let payload = self.payload.expect_built_payload().await?; // submit payload via engine api - let block_number = payload.block().number; - let block_hash = self.engine_api.submit_payload(payload, eth_attr.clone()).await?; + let block_hash = self + .engine_api + .submit_payload(payload.clone(), eth_attr.clone(), PayloadStatusEnum::Valid) + .await?; // trigger forkchoice update via engine api to commit the block to the blockchain self.engine_api.update_forkchoice(block_hash).await?; // assert the block has been committed to the blockchain - self.assert_new_block(tx_hash, block_hash, block_number).await?; - Ok((block_hash, tx_hash)) + self.assert_new_block(tx_hash, block_hash, payload.block().number).await?; + Ok(((payload, eth_attr), tx_hash)) + } + + /// Waits for block to be available on node. + pub async fn wait_block( + &self, + number: BlockNumber, + expected_block_hash: BlockHash, + wait_finish_checkpoint: bool, + ) -> eyre::Result<()> { + let mut check = !wait_finish_checkpoint; + loop { + tokio::time::sleep(std::time::Duration::from_millis(20)).await; + + if !check && wait_finish_checkpoint { + if let Some(checkpoint) = + self.inner.provider.get_stage_checkpoint(StageId::Finish)? + { + if checkpoint.block_number >= number { + check = true + } + } + } + + if check { + if let Some(latest_block) = self.inner.provider.block_by_number(number)? { + if latest_block.hash_slow() != expected_block_hash { + // TODO: only if its awaiting a reorg + continue + } + break + } + if wait_finish_checkpoint { + panic!("Finish checkpoint matches, but could not fetch block."); + } + } + } + Ok(()) } /// Injects a raw transaction into the node tx pool via RPC server @@ -129,17 +206,3 @@ where Ok(()) } } - -/// Helper function to create a new eth payload attributes -pub fn eth_payload_attributes() -> EthPayloadBuilderAttributes { - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); - - let attributes = PayloadAttributes { - timestamp, - prev_randao: B256::ZERO, - suggested_fee_recipient: Address::ZERO, - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - }; - EthPayloadBuilderAttributes::new(B256::ZERO, attributes) -} diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 37138cdd3ebfb..2d349721b232c 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -7,7 +7,7 @@ use tokio_stream::wrappers::BroadcastStream; pub struct PayloadHelper { pub payload_event_stream: BroadcastStream>, payload_builder: PayloadBuilderHandle, - timestamp: u64, + pub timestamp: u64, } impl PayloadHelper { diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index 43fe7555dc7e6..d064eede99c11 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -1,11 +1,11 @@ use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; use alloy_rpc_types::{TransactionInput, TransactionRequest}; use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; -use reth_primitives::{Address, Bytes, U256}; +use reth_primitives::{hex, Address, Bytes, U256}; /// One of the accounts of the genesis allocations. pub struct Wallet { inner: LocalWallet, - nonce: u64, + pub nonce: u64, chain_id: u64, } @@ -27,6 +27,11 @@ impl Wallet { self.tx(None).await } + pub async fn optimism_l1_block_info_tx(&mut self) -> Bytes { + let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); + self.tx(Some(l1_block_info)).await + } + /// Creates a transaction with data and signs it pub async fn tx(&mut self, data: Option) -> Bytes { let tx = TransactionRequest { diff --git a/crates/node-ethereum/tests/e2e/dev.rs b/crates/node-ethereum/tests/e2e/dev.rs index b096bda5aab0b..4570a8c0e1220 100644 --- a/crates/node-ethereum/tests/e2e/dev.rs +++ b/crates/node-ethereum/tests/e2e/dev.rs @@ -1,43 +1,27 @@ -use futures_util::StreamExt; -use reth::{ - api::FullNodeComponents, - builder::{FullNode, NodeBuilder, NodeHandle}, - providers::CanonStateSubscriptions, - rpc::eth::EthTransactions, - tasks::TaskManager, -}; -use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::EthereumNode; +use crate::utils::EthNode; +use futures::StreamExt; +use reth::rpc::eth::EthTransactions; +use reth_e2e_test_utils::setup; use reth_primitives::{b256, hex, ChainSpec, Genesis}; +use reth_provider::CanonStateSubscriptions; use std::sync::Arc; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { - let tasks = TaskManager::current(); + reth_tracing::init_test_tracing(); + let (mut nodes, _tasks, _) = setup(1, custom_chain(), true).await?; - // create node config - let node_config = NodeConfig::test() - .dev() - .with_rpc(RpcServerArgs::default().with_http().with_unused_ports()) - .with_chain(custom_chain()); - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(tasks.executor()) - .node(EthereumNode::default()) - .launch() - .await?; - - assert_chain_advances(node).await; + assert_chain_advances(nodes.pop().unwrap()).await; Ok(()) } -async fn assert_chain_advances(mut node: FullNode) { - let mut notifications = node.provider.canonical_state_stream(); +async fn assert_chain_advances(mut node: EthNode) { + let mut notifications = node.inner.provider.canonical_state_stream(); // submit tx through rpc let raw_tx = hex!("02f876820a28808477359400847735940082520894ab0840c0e43688012c1adb0f5e3fc665188f83d28a029d394a5d630544000080c080a0a044076b7e67b5deecc63f61a8d7913fab86ca365b344b5759d1fe3563b4c39ea019eab979dd000da04dfc72bb0377c092d30fd9e1cab5ae487de49586cc8b0090"); - let eth_api = node.rpc_registry.eth_api(); + let eth_api = node.inner.rpc_registry.eth_api(); let hash = eth_api.send_raw_transaction(raw_tx.into()).await.unwrap(); diff --git a/crates/node-ethereum/tests/e2e/eth.rs b/crates/node-ethereum/tests/e2e/eth.rs index 6f9eeb999108a..39ba5e2326995 100644 --- a/crates/node-ethereum/tests/e2e/eth.rs +++ b/crates/node-ethereum/tests/e2e/eth.rs @@ -4,7 +4,7 @@ use reth::{ builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; -use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; +use reth_e2e_test_utils::{node::NodeHelper, setup, wallet::Wallet}; use reth_node_ethereum::EthereumNode; use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; use std::sync::Arc; @@ -13,38 +13,24 @@ use std::sync::Arc; async fn can_run_eth_node() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let exec = TaskManager::current(); - let exec = exec.executor(); - - // Chain spec with test allocs - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(genesis) - .cancun_activated() - .build(), - ); - - // Node setup - let node_config = NodeConfig::test() - .with_chain(chain_spec) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec) - .node(EthereumNode::default()) - .launch() - .await?; - let mut node = NodeHelper::new(node).await?; - - // Configure wallet from test mnemonic and create dummy transfer tx - let mut wallet = Wallet::default(); + let (mut nodes, _tasks, mut wallet) = setup::( + 1, + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ), + false, + ) + .await?; + + let mut node = nodes.pop().unwrap(); let raw_tx = wallet.transfer_tx().await; // make the node advance - node.advance(raw_tx, eth_payload_attributes).await?; + node.advance_block(raw_tx, eth_payload_attributes).await?; Ok(()) } @@ -83,7 +69,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { let raw_tx = wallet.transfer_tx().await; // make the node advance - node.advance(raw_tx, crate::utils::eth_payload_attributes).await?; + node.advance_block(raw_tx, crate::utils::eth_payload_attributes).await?; Ok(()) } diff --git a/crates/node-ethereum/tests/e2e/p2p.rs b/crates/node-ethereum/tests/e2e/p2p.rs index 940096e189b83..c7ce2a7c12d36 100644 --- a/crates/node-ethereum/tests/e2e/p2p.rs +++ b/crates/node-ethereum/tests/e2e/p2p.rs @@ -1,71 +1,34 @@ -use std::sync::Arc; - use crate::utils::eth_payload_attributes; -use reth::{ - args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; -use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; +use reth_e2e_test_utils::setup; use reth_node_ethereum::EthereumNode; -use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; +use reth_primitives::{ChainSpecBuilder, MAINNET}; +use std::sync::Arc; #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let tasks = TaskManager::current(); - let exec = tasks.executor(); - - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(genesis) - .cancun_activated() - .build(), - ); - - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; - - let node_config = NodeConfig::test() - .with_chain(chain_spec) - .with_network(network_config) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let (mut nodes, _tasks, mut wallet) = setup::( + 2, + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ), + false, + ) + .await?; - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .node(EthereumNode::default()) - .launch() - .await?; - - let mut first_node = NodeHelper::new(node.clone()).await?; - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec) - .node(EthereumNode::default()) - .launch() - .await?; - - let mut second_node = NodeHelper::new(node).await?; - - let mut wallet = Wallet::default(); let raw_tx = wallet.transfer_tx().await; - - // Make them peer - first_node.network.add_peer(second_node.network.record()).await; - second_node.network.add_peer(first_node.network.record()).await; - - // Make sure they establish a new session - first_node.network.expect_session().await; - second_node.network.expect_session().await; + let mut second_node = nodes.pop().unwrap(); + let mut first_node = nodes.pop().unwrap(); // Make the first node advance - let (block_hash, tx_hash) = first_node.advance(raw_tx.clone(), eth_payload_attributes).await?; + let ((payload, _), tx_hash) = + first_node.advance_block(raw_tx.clone(), eth_payload_attributes).await?; + let block_hash = payload.block().hash(); // only send forkchoice update to second node second_node.engine_api.update_forkchoice(block_hash).await?; diff --git a/crates/node-ethereum/tests/e2e/utils.rs b/crates/node-ethereum/tests/e2e/utils.rs index 52526c45f3092..2c1dc373b82e1 100644 --- a/crates/node-ethereum/tests/e2e/utils.rs +++ b/crates/node-ethereum/tests/e2e/utils.rs @@ -1,7 +1,12 @@ use reth::rpc::types::engine::PayloadAttributes; +use reth_e2e_test_utils::NodeHelperType; +use reth_node_ethereum::EthereumNode; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_primitives::{Address, B256}; +/// Ethereum Node Helper type +pub(crate) type EthNode = NodeHelperType; + /// Helper function to create a new eth payload attributes pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttributes { let attributes = PayloadAttributes { diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 8f10c00d74545..f242adf5a79ae 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -28,7 +28,7 @@ reth-network.workspace = true reth-interfaces.workspace = true reth-evm.workspace = true reth-revm.workspace = true - +reth-beacon-consensus.workspace = true revm.workspace = true revm-primitives.workspace = true @@ -67,4 +67,5 @@ optimism = [ "reth-rpc/optimism", "reth-revm/optimism", "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", ] diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 5fe4daa7bfd35..da6af2090e03b 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,80 +1,28 @@ +use crate::utils::{advance_chain, setup}; use std::sync::Arc; - -use crate::utils::optimism_payload_attributes; -use reth::{ - args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; -use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; -use reth_node_optimism::node::OptimismNode; -use reth_primitives::{hex, Bytes, ChainSpecBuilder, Genesis, BASE_MAINNET}; +use tokio::sync::Mutex; #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let tasks = TaskManager::current(); - let exec = tasks.executor(); - - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(BASE_MAINNET.chain) - .genesis(genesis) - .ecotone_activated() - .build(), - ); - let mut wallet = Wallet::default().with_chain_id(chain_spec.chain.into()); - - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; - - let node_config = NodeConfig::test() - .with_chain(chain_spec) - .with_network(network_config) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .node(OptimismNode::default()) - .launch() - .await?; - - let mut first_node = NodeHelper::new(node.clone()).await?; - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec) - .node(OptimismNode::default()) - .launch() - .await?; - - let mut second_node = NodeHelper::new(node).await?; - - // Make them peer - first_node.network.add_peer(second_node.network.record()).await; - second_node.network.add_peer(first_node.network.record()).await; - - // Make sure they establish a new session - first_node.network.expect_session().await; - second_node.network.expect_session().await; + let (mut nodes, _tasks, wallet) = setup(2).await?; + let wallet = Arc::new(Mutex::new(wallet)); - // Taken from optimism tests - let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); + let second_node = nodes.pop().unwrap(); + let mut first_node = nodes.pop().unwrap(); - // Make the first node advance - let raw_tx = wallet.tx(Some(l1_block_info)).await; - let (block_hash, tx_hash) = - first_node.advance(raw_tx.clone(), optimism_payload_attributes).await?; + let tip: usize = 300; + let tip_index: usize = tip - 1; - // only send forkchoice update to second node - second_node.engine_api.update_forkchoice(block_hash).await?; + // On first node, create a chain up to block number 300a + let canonical_payload_chain = advance_chain(tip, &mut first_node, wallet.clone()).await?; + let canonical_chain = + canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); - // expect second node advanced via p2p gossip - second_node.assert_new_block(tx_hash, block_hash, 1).await?; + // On second node, sync up to block number 300a + second_node.engine_api.update_forkchoice(canonical_chain[tip_index]).await?; + second_node.wait_block(tip as u64, canonical_chain[tip_index], true).await?; Ok(()) } diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 1f655502e67b1..5322cad9a6114 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,7 +1,45 @@ -use reth::rpc::types::engine::PayloadAttributes; -use reth_node_optimism::OptimismPayloadBuilderAttributes; +use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType}; +use reth_node_optimism::{OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes}; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, B256}; +use reth_primitives::{Address, ChainSpecBuilder, Genesis, B256, BASE_MAINNET}; +use std::sync::Arc; +use tokio::sync::Mutex; + +/// Optimism Node Helper type +pub(crate) type OpNode = NodeHelperType; + +pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + reth_e2e_test_utils::setup( + num_nodes, + Arc::new( + ChainSpecBuilder::default() + .chain(BASE_MAINNET.chain) + .genesis(genesis) + .ecotone_activated() + .build(), + ), + false, + ) + .await +} + +pub(crate) async fn advance_chain( + length: usize, + node: &mut OpNode, + wallet: Arc>, +) -> eyre::Result> { + node.advance( + length as u64, + || { + let wallet = wallet.clone(); + Box::pin(async move { wallet.lock().await.optimism_l1_block_info_tx().await }) + }, + optimism_payload_attributes, + ) + .await +} /// Helper function to create a new eth payload attributes pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuilderAttributes { diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index e73357f4a7cd4..78ef740852ff0 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -2,7 +2,7 @@ use crate::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - DatabaseProviderFactory, EvmEnvProvider, StateProviderFactory, + DatabaseProviderFactory, EvmEnvProvider, StageCheckpointReader, StateProviderFactory, }; use reth_db::database::Database; @@ -16,6 +16,7 @@ pub trait FullProvider: + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions + + StageCheckpointReader + Clone + Unpin + 'static @@ -31,6 +32,7 @@ impl FullProvider for T where + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions + + StageCheckpointReader + Clone + Unpin + 'static From 00ca9cd0241e639bdf991436d0166ca086694437 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 23 Apr 2024 16:50:14 +0200 Subject: [PATCH 289/700] feat: add prague helpers to spec (#7817) --- crates/primitives/src/chain/spec.rs | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index ee732a9bcbb43..fb657b5f93a36 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -813,6 +813,15 @@ impl ChainSpec { .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp)) } + /// Convenience method to check if [Hardfork::Prague] is active at a given timestamp. + #[inline] + pub fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork_timestamps + .prague + .map(|prague| timestamp >= prague) + .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp)) + } + /// Convenience method to check if [Hardfork::Byzantium] is active at a given block number. #[inline] pub fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { @@ -1055,10 +1064,12 @@ impl From for ChainSpec { /// Various timestamps of forks #[derive(Debug, Clone, Default, Eq, PartialEq)] pub struct ForkTimestamps { - /// The timestamp of the shanghai fork + /// The timestamp of the Shanghai fork pub shanghai: Option, - /// The timestamp of the cancun fork + /// The timestamp of the Cancun fork pub cancun: Option, + /// The timestamp of the Prague fork + pub prague: Option, /// The timestamp of the Regolith fork #[cfg(feature = "optimism")] pub regolith: Option, @@ -1080,6 +1091,9 @@ impl ForkTimestamps { if let Some(cancun) = forks.get(&Hardfork::Cancun).and_then(|f| f.as_timestamp()) { timestamps = timestamps.cancun(cancun); } + if let Some(prague) = forks.get(&Hardfork::Prague).and_then(|f| f.as_timestamp()) { + timestamps = timestamps.prague(prague); + } #[cfg(feature = "optimism")] { if let Some(regolith) = forks.get(&Hardfork::Regolith).and_then(|f| f.as_timestamp()) { @@ -1095,18 +1109,24 @@ impl ForkTimestamps { timestamps } - /// Sets the given shanghai timestamp + /// Sets the given Shanghai timestamp pub fn shanghai(mut self, shanghai: u64) -> Self { self.shanghai = Some(shanghai); self } - /// Sets the given cancun timestamp + /// Sets the given Cancun timestamp pub fn cancun(mut self, cancun: u64) -> Self { self.cancun = Some(cancun); self } + /// Sets the given Prague timestamp + pub fn prague(mut self, prague: u64) -> Self { + self.prague = Some(prague); + self + } + /// Sets the given regolith timestamp #[cfg(feature = "optimism")] pub fn regolith(mut self, regolith: u64) -> Self { From ee1c811c43e9f2dd136f07d28deac6224d6c010d Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Tue, 23 Apr 2024 23:08:32 +0800 Subject: [PATCH 290/700] feat: integrate RpcMiddleware in IPC (#7790) --- Cargo.lock | 2 + crates/ethereum/evm/src/execute.rs | 1 - crates/node-core/src/args/rpc_server_args.rs | 2 +- crates/node-core/src/cli/config.rs | 2 +- crates/optimism/node/src/evm/execute.rs | 1 - crates/rpc/ipc/Cargo.toml | 2 + crates/rpc/ipc/src/server/future.rs | 2 +- crates/rpc/ipc/src/server/ipc.rs | 196 +++--------- crates/rpc/ipc/src/server/mod.rs | 311 ++++++++++++++++--- crates/rpc/ipc/src/server/rpc_service.rs | 138 ++++++++ crates/rpc/rpc-builder/src/auth.rs | 6 +- crates/rpc/rpc-builder/src/lib.rs | 17 +- 12 files changed, 483 insertions(+), 197 deletions(-) create mode 100644 crates/rpc/ipc/src/server/rpc_service.rs diff --git a/Cargo.lock b/Cargo.lock index 3bd9ff743a19f..9e34b6c61282e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6686,9 +6686,11 @@ dependencies = [ "async-trait", "bytes", "futures", + "futures-util", "jsonrpsee", "parity-tokio-ipc", "pin-project", + "reth-tracing", "serde_json", "thiserror", "tokio", diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index b23c35cfd53ab..64b69d1e577c4 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -465,7 +465,6 @@ where #[cfg(test)] mod tests { use super::*; - use crate::EthEvmConfig; use reth_primitives::{ bytes, constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server_args.rs index b12f2740aa68e..da3095815118b 100644 --- a/crates/node-core/src/args/rpc_server_args.rs +++ b/crates/node-core/src/args/rpc_server_args.rs @@ -437,7 +437,7 @@ impl RethRpcConfig for RpcServerArgs { .max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get()) } - fn ipc_server_builder(&self) -> IpcServerBuilder { + fn ipc_server_builder(&self) -> IpcServerBuilder { IpcServerBuilder::default() .max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get()) .max_request_body_size(self.rpc_max_request_size_bytes()) diff --git a/crates/node-core/src/cli/config.rs b/crates/node-core/src/cli/config.rs index 988ef34d5d253..1bce398ef2db1 100644 --- a/crates/node-core/src/cli/config.rs +++ b/crates/node-core/src/cli/config.rs @@ -49,7 +49,7 @@ pub trait RethRpcConfig { fn http_ws_server_builder(&self) -> ServerBuilder; /// Returns the default ipc server builder - fn ipc_server_builder(&self) -> IpcServerBuilder; + fn ipc_server_builder(&self) -> IpcServerBuilder; /// Creates the [RpcServerConfig] from cli args. fn rpc_server_config(&self) -> RpcServerConfig; diff --git a/crates/optimism/node/src/evm/execute.rs b/crates/optimism/node/src/evm/execute.rs index cca13fb7d205e..f51c6cd3bb794 100644 --- a/crates/optimism/node/src/evm/execute.rs +++ b/crates/optimism/node/src/evm/execute.rs @@ -545,7 +545,6 @@ mod tests { use revm::L1_BLOCK_CONTRACT; use std::{collections::HashMap, str::FromStr}; - use crate::OptimismEvmConfig; use reth_revm::test_utils::StateProviderTest; fn create_op_state_provider() -> StateProviderTest { diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index 8d93a275cf30f..21b6454094d2f 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -29,6 +29,8 @@ serde_json.workspace = true tracing.workspace = true bytes.workspace = true thiserror.workspace = true +futures-util = "0.3.30" [dev-dependencies] tokio-stream = { workspace = true, features = ["sync"] } +reth-tracing.workspace = true diff --git a/crates/rpc/ipc/src/server/future.rs b/crates/rpc/ipc/src/server/future.rs index 84df306a5be06..65aaccc88df73 100644 --- a/crates/rpc/ipc/src/server/future.rs +++ b/crates/rpc/ipc/src/server/future.rs @@ -84,7 +84,7 @@ where while i < self.futures.len() { if self.futures[i].poll_unpin(cx).is_ready() { - // Using `swap_remove` since we don't care about ordering + // Using `swap_remove` since we don't care about ordering, // but we do care about removing being `O(1)`. // // We don't increment `i` in this branch, since we now diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index 8ce4502a28dd9..1fd600c033772 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -1,5 +1,7 @@ //! IPC request handling adapted from [`jsonrpsee`] http request handling +use std::sync::Arc; + use futures::{stream::FuturesOrdered, StreamExt}; use jsonrpsee::{ batch_response_error, @@ -8,15 +10,10 @@ use jsonrpsee::{ tracing::server::{rx_log_from_json, tx_log_from_str}, JsonRawValue, }, - server::IdProvider, - types::{ - error::{reject_too_many_subscriptions, ErrorCode}, - ErrorObject, Id, InvalidRequest, Notification, Params, Request, - }, - BatchResponseBuilder, BoundedSubscriptions, CallOrSubscription, MethodCallback, MethodResponse, - MethodSink, Methods, ResponsePayload, SubscriptionState, + server::middleware::rpc::RpcServiceT, + types::{error::ErrorCode, ErrorObject, Id, InvalidRequest, Notification, Request}, + BatchResponseBuilder, MethodResponse, ResponsePayload, }; -use std::{sync::Arc, time::Instant}; use tokio::sync::OwnedSemaphorePermit; use tokio_util::either::Either; use tracing::instrument; @@ -24,42 +21,33 @@ use tracing::instrument; type Notif<'a> = Notification<'a, Option<&'a JsonRawValue>>; #[derive(Debug, Clone)] -pub(crate) struct Batch<'a> { +pub(crate) struct Batch { data: Vec, - call: CallData<'a>, -} - -#[derive(Debug, Clone)] -pub(crate) struct CallData<'a> { - conn_id: usize, - methods: &'a Methods, - id_provider: &'a dyn IdProvider, - sink: &'a MethodSink, - max_response_body_size: u32, - max_log_length: u32, - request_start: Instant, - bounded_subscriptions: BoundedSubscriptions, + rpc_service: S, } // Batch responses must be sent back as a single message so we read the results from each // request in the batch and read the results off of a new channel, `rx_batch`, and then send the // complete batch response back to the client over `tx`. #[instrument(name = "batch", skip(b), level = "TRACE")] -pub(crate) async fn process_batch_request(b: Batch<'_>) -> Option { - let Batch { data, call } = b; +pub(crate) async fn process_batch_request( + b: Batch, + max_response_body_size: usize, +) -> Option +where + for<'a> S: RpcServiceT<'a> + Send, +{ + let Batch { data, rpc_service } = b; if let Ok(batch) = serde_json::from_slice::>(&data) { let mut got_notif = false; - let mut batch_response = - BatchResponseBuilder::new_with_limit(call.max_response_body_size as usize); + let mut batch_response = BatchResponseBuilder::new_with_limit(max_response_body_size); let mut pending_calls: FuturesOrdered<_> = batch .into_iter() .filter_map(|v| { if let Ok(req) = serde_json::from_str::>(v.get()) { - Some(Either::Right(async { - execute_call(req, call.clone()).await.into_response() - })) + Some(Either::Right(rpc_service.call(req))) } else if let Ok(_notif) = serde_json::from_str::>(v.get()) { // notifications should not be answered. got_notif = true; @@ -95,92 +83,32 @@ pub(crate) async fn process_batch_request(b: Batch<'_>) -> Option { } } -pub(crate) async fn process_single_request( +pub(crate) async fn process_single_request( data: Vec, - call: CallData<'_>, -) -> Option { + rpc_service: &S, +) -> Option +where + for<'a> S: RpcServiceT<'a> + Send, +{ if let Ok(req) = serde_json::from_slice::>(&data) { - Some(execute_call_with_tracing(req, call).await) + Some(execute_call_with_tracing(req, rpc_service).await) } else if serde_json::from_slice::>(&data).is_ok() { None } else { let (id, code) = prepare_error(&data); - Some(CallOrSubscription::Call(MethodResponse::error(id, ErrorObject::from(code)))) + Some(MethodResponse::error(id, ErrorObject::from(code))) } } -#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(call, req), level = "TRACE")] -pub(crate) async fn execute_call_with_tracing<'a>( +#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service), level = "TRACE")] +pub(crate) async fn execute_call_with_tracing<'a, S>( req: Request<'a>, - call: CallData<'_>, -) -> CallOrSubscription { - execute_call(req, call).await -} - -pub(crate) async fn execute_call(req: Request<'_>, call: CallData<'_>) -> CallOrSubscription { - let CallData { - methods, - max_response_body_size, - max_log_length, - conn_id, - id_provider, - sink, - request_start, - bounded_subscriptions, - } = call; - - rx_log_from_json(&req, call.max_log_length); - - let params = Params::new(req.params.as_ref().map(|params| params.get())); - let name = &req.method; - let id = req.id; - - let response = match methods.method_with_name(name) { - None => { - let response = MethodResponse::error(id, ErrorObject::from(ErrorCode::MethodNotFound)); - CallOrSubscription::Call(response) - } - Some((_name, method)) => match method { - MethodCallback::Sync(callback) => { - let response = (callback)(id, params, max_response_body_size as usize); - CallOrSubscription::Call(response) - } - MethodCallback::Async(callback) => { - let id = id.into_owned(); - let params = params.into_owned(); - let response = - (callback)(id, params, conn_id, max_response_body_size as usize).await; - CallOrSubscription::Call(response) - } - MethodCallback::AsyncWithDetails(_callback) => { - unimplemented!() - } - MethodCallback::Subscription(callback) => { - if let Some(p) = bounded_subscriptions.acquire() { - let conn_state = - SubscriptionState { conn_id, id_provider, subscription_permit: p }; - let response = callback(id, params, sink.clone(), conn_state).await; - CallOrSubscription::Subscription(response) - } else { - let response = MethodResponse::error( - id, - reject_too_many_subscriptions(bounded_subscriptions.max()), - ); - CallOrSubscription::Call(response) - } - } - MethodCallback::Unsubscription(callback) => { - // Don't adhere to any resource or subscription limits; always let unsubscribing - // happen! - let result = callback(id, params, conn_id, max_response_body_size as usize); - CallOrSubscription::Call(result) - } - }, - }; - - tx_log_from_str(response.as_response().as_result(), max_log_length); - let _ = request_start; - response + rpc_service: &S, +) -> MethodResponse +where + for<'b> S: RpcServiceT<'b> + Send, +{ + rpc_service.call(req).await } #[instrument(name = "notification", fields(method = notif.method.as_ref()), skip(notif, max_log_length), level = "TRACE")] @@ -192,31 +120,15 @@ fn execute_notification(notif: &Notif<'_>, max_log_length: u32) -> MethodRespons response } -#[allow(dead_code)] -pub(crate) struct HandleRequest { - pub(crate) methods: Methods, - pub(crate) max_request_body_size: u32, - pub(crate) max_response_body_size: u32, - pub(crate) max_log_length: u32, - pub(crate) batch_requests_supported: bool, - pub(crate) conn: Arc, - pub(crate) bounded_subscriptions: BoundedSubscriptions, - pub(crate) method_sink: MethodSink, - pub(crate) id_provider: Arc, -} - -pub(crate) async fn handle_request(request: String, input: HandleRequest) -> Option { - let HandleRequest { - methods, - max_response_body_size, - max_log_length, - conn, - bounded_subscriptions, - method_sink, - id_provider, - .. - } = input; - +pub(crate) async fn call_with_service( + request: String, + rpc_service: S, + max_response_body_size: usize, + conn: Arc, +) -> Option +where + for<'a> S: RpcServiceT<'a> + Send, +{ enum Kind { Single, Batch, @@ -231,31 +143,23 @@ pub(crate) async fn handle_request(request: String, input: HandleRequest) -> Opt }) .unwrap_or(Kind::Single); - let call = CallData { - conn_id: 0, - methods: &methods, - id_provider: &*id_provider, - sink: &method_sink, - max_response_body_size, - max_log_length, - request_start: Instant::now(), - bounded_subscriptions, - }; - // Single request or notification let res = if matches!(request_kind, Kind::Single) { - let response = process_single_request(request.into_bytes(), call).await; + let response = process_single_request(request.into_bytes(), &rpc_service).await; match response { - Some(CallOrSubscription::Call(response)) => Some(response.to_result()), - Some(CallOrSubscription::Subscription(_)) => { + Some(response) if response.is_method_call() => Some(response.to_result()), + _ => { // subscription responses are sent directly over the sink, return a response here // would lead to duplicate responses for the subscription response None } - None => None, } } else { - process_batch_request(Batch { data: request.into_bytes(), call }).await + process_batch_request( + Batch { data: request.into_bytes(), rpc_service }, + max_response_body_size, + ) + .await }; drop(conn); diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index e6d1a6051e35e..7afb6bb7d158e 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -7,7 +7,10 @@ use crate::server::{ use futures::{FutureExt, Stream, StreamExt}; use jsonrpsee::{ core::TEN_MB_SIZE_BYTES, - server::{AlreadyStoppedError, IdProvider, RandomIntegerIdProvider}, + server::{ + middleware::rpc::{either::Either, RpcLoggerLayer, RpcServiceT}, + AlreadyStoppedError, IdProvider, RandomIntegerIdProvider, + }, BoundedSubscriptions, MethodSink, Methods, }; use std::{ @@ -21,36 +24,57 @@ use tokio::{ io::{AsyncRead, AsyncWrite}, sync::{oneshot, watch, OwnedSemaphorePermit}, }; -use tower::{layer::util::Identity, Service}; +use tower::{layer::util::Identity, Layer, Service}; use tracing::{debug, trace, warn}; // re-export so can be used during builder setup -use crate::server::connection::IpcConnDriver; +use crate::server::{ + connection::IpcConnDriver, + rpc_service::{RpcService, RpcServiceCfg}, +}; pub use parity_tokio_ipc::Endpoint; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; +use tower::layer::{util::Stack, LayerFn}; mod connection; mod future; mod ipc; +mod rpc_service; /// Ipc Server implementation // This is an adapted `jsonrpsee` Server, but for `Ipc` connections. -pub struct IpcServer { +pub struct IpcServer { /// The endpoint we listen for incoming transactions endpoint: Endpoint, id_provider: Arc, cfg: Settings, - service_builder: tower::ServiceBuilder, + rpc_middleware: RpcServiceBuilder, + http_middleware: tower::ServiceBuilder, } -impl IpcServer { +impl IpcServer { /// Returns the configured [Endpoint] pub fn endpoint(&self) -> &Endpoint { &self.endpoint } +} +impl IpcServer +where + RpcMiddleware: Layer + Clone + Send + 'static, + for<'a> >::Service: RpcServiceT<'a>, + HttpMiddleware: Layer> + Send + 'static, + >>::Service: Send + + Service< + String, + Response = Option, + Error = Box, + >, + <>>::Service as Service>::Future: + Send + Unpin, +{ /// Start responding to connections requests. /// /// This will run on the tokio runtime until the server is stopped or the ServerHandle is @@ -123,7 +147,7 @@ impl IpcServer { let incoming = match self.endpoint.incoming() { Ok(connections) => { #[cfg(windows)] - let connections = Box::pin(connections); + let connections = Box::pin(connections); Incoming::new(connections) } Err(err) => { @@ -154,7 +178,7 @@ impl IpcServer { let (tx, rx) = mpsc::channel::(message_buffer_capacity as usize); let method_sink = MethodSink::new_with_limit(tx, max_response_body_size); - let tower_service = TowerService { + let tower_service = TowerServiceNoHttp { inner: ServiceData { methods: methods.clone(), max_request_body_size, @@ -170,9 +194,10 @@ impl IpcServer { ), method_sink, }, + rpc_middleware: self.rpc_middleware.clone(), }; - let service = self.service_builder.service(tower_service); + let service = self.http_middleware.service(tower_service); connections.add(Box::pin(spawn_connection( ipc, service, @@ -244,16 +269,87 @@ pub(crate) struct ServiceData { pub(crate) method_sink: MethodSink, } +/// Similar to [`tower::ServiceBuilder`] but doesn't +/// support any tower middleware implementations. +#[derive(Debug, Clone)] +pub struct RpcServiceBuilder(tower::ServiceBuilder); + +impl Default for RpcServiceBuilder { + fn default() -> Self { + RpcServiceBuilder(tower::ServiceBuilder::new()) + } +} + +impl RpcServiceBuilder { + /// Create a new [`RpcServiceBuilder`]. + pub fn new() -> Self { + Self(tower::ServiceBuilder::new()) + } +} + +impl RpcServiceBuilder { + /// Optionally add a new layer `T` to the [`RpcServiceBuilder`]. + /// + /// See the documentation for [`tower::ServiceBuilder::option_layer`] for more details. + pub fn option_layer( + self, + layer: Option, + ) -> RpcServiceBuilder, L>> { + let layer = if let Some(layer) = layer { + Either::Left(layer) + } else { + Either::Right(Identity::new()) + }; + self.layer(layer) + } + + /// Add a new layer `T` to the [`RpcServiceBuilder`]. + /// + /// See the documentation for [`tower::ServiceBuilder::layer`] for more details. + pub fn layer(self, layer: T) -> RpcServiceBuilder> { + RpcServiceBuilder(self.0.layer(layer)) + } + + /// Add a [`tower::Layer`] built from a function that accepts a service and returns another + /// service. + /// + /// See the documentation for [`tower::ServiceBuilder::layer_fn`] for more details. + pub fn layer_fn(self, f: F) -> RpcServiceBuilder, L>> { + RpcServiceBuilder(self.0.layer_fn(f)) + } + + /// Add a logging layer to [`RpcServiceBuilder`] + /// + /// This logs each request and response for every call. + pub fn rpc_logger(self, max_log_len: u32) -> RpcServiceBuilder> { + RpcServiceBuilder(self.0.layer(RpcLoggerLayer::new(max_log_len))) + } + + /// Wrap the service `S` with the middleware. + pub(crate) fn service(&self, service: S) -> L::Service + where + L: tower::Layer, + { + self.0.service(service) + } +} + /// JsonRPSee service compatible with `tower`. /// /// # Note /// This is similar to [`hyper::service::service_fn`](https://docs.rs/hyper/latest/hyper/service/fn.service_fn.html). -#[derive(Debug)] -pub struct TowerService { +#[derive(Debug, Clone)] +pub struct TowerServiceNoHttp { inner: ServiceData, + rpc_middleware: RpcServiceBuilder, } -impl Service for TowerService { +impl Service for TowerServiceNoHttp +where + RpcMiddleware: for<'a> Layer, + >::Service: Send + Sync + 'static, + for<'a> >::Service: RpcServiceT<'a>, +{ /// The response of a handled RPC call /// /// This is an `Option` because subscriptions and call responses are handled differently. @@ -273,26 +369,32 @@ impl Service for TowerService { fn call(&mut self, request: String) -> Self::Future { trace!("{:?}", request); - // handle the request - let data = ipc::HandleRequest { - methods: self.inner.methods.clone(), - max_request_body_size: self.inner.max_request_body_size, - max_response_body_size: self.inner.max_response_body_size, - max_log_length: self.inner.max_log_length, - batch_requests_supported: true, - conn: self.inner.conn.clone(), - bounded_subscriptions: self.inner.bounded_subscriptions.clone(), - method_sink: self.inner.method_sink.clone(), + let cfg = RpcServiceCfg::CallsAndSubscriptions { + bounded_subscriptions: BoundedSubscriptions::new( + self.inner.max_subscriptions_per_connection, + ), id_provider: self.inner.id_provider.clone(), + sink: self.inner.method_sink.clone(), }; + let max_response_body_size = self.inner.max_response_body_size as usize; + let rpc_service = self.rpc_middleware.service(RpcService::new( + self.inner.methods.clone(), + max_response_body_size, + self.inner.conn_id as usize, + cfg, + )); + let conn = self.inner.conn.clone(); // an ipc connection needs to handle read+write concurrently // even if the underlying rpc handler spawns the actual work or is does a lot of async any // additional overhead performed by `handle_request` can result in I/O latencies, for // example tracing calls are relatively CPU expensive on serde::serialize alone, moving this // work to a separate task takes the pressure off the connection so all concurrent responses // are also serialized concurrently and the connection can focus on read+write - let f = tokio::task::spawn(async move { ipc::handle_request(request, data).await }); + let f = tokio::task::spawn(async move { + ipc::call_with_service(request, rpc_service, max_response_body_size, conn).await + }); + Box::pin(async move { f.await.map_err(|err| err.into()) }) } } @@ -413,24 +515,26 @@ impl Default for Settings { /// Builder to configure and create a JSON-RPC server #[derive(Debug)] -pub struct Builder { +pub struct Builder { settings: Settings, /// Subscription ID provider. id_provider: Arc, - service_builder: tower::ServiceBuilder, + rpc_middleware: RpcServiceBuilder, + http_middleware: tower::ServiceBuilder, } -impl Default for Builder { +impl Default for Builder { fn default() -> Self { Builder { settings: Settings::default(), id_provider: Arc::new(RandomIntegerIdProvider), - service_builder: tower::ServiceBuilder::new(), + rpc_middleware: RpcServiceBuilder::new(), + http_middleware: tower::ServiceBuilder::new(), } } } -impl Builder { +impl Builder { /// Set the maximum size of a request body in bytes. Default is 10 MiB. pub fn max_request_body_size(mut self, size: u32) -> Self { self.settings.max_request_body_size = size; @@ -529,26 +633,114 @@ impl Builder { /// let builder = tower::ServiceBuilder::new(); /// /// let server = - /// reth_ipc::server::Builder::default().set_middleware(builder).build("/tmp/my-uds"); + /// reth_ipc::server::Builder::default().set_http_middleware(builder).build("/tmp/my-uds"); + /// } + /// ``` + pub fn set_http_middleware( + self, + service_builder: tower::ServiceBuilder, + ) -> Builder { + Builder { + settings: self.settings, + id_provider: self.id_provider, + http_middleware: service_builder, + rpc_middleware: self.rpc_middleware, + } + } + + /// Enable middleware that is invoked on every JSON-RPC call. + /// + /// The middleware itself is very similar to the `tower middleware` but + /// it has a different service trait which takes &self instead &mut self + /// which means that you can't use built-in middleware from tower. + /// + /// Another consequence of `&self` is that you must wrap any of the middleware state in + /// a type which is Send and provides interior mutability such `Arc`. + /// + /// The builder itself exposes a similar API as the [`tower::ServiceBuilder`] + /// where it is possible to compose layers to the middleware. + /// + /// ``` + /// use std::{ + /// net::SocketAddr, + /// sync::{ + /// atomic::{AtomicUsize, Ordering}, + /// Arc, + /// }, + /// time::Instant, + /// }; + /// + /// use futures_util::future::BoxFuture; + /// use jsonrpsee::{ + /// server::{middleware::rpc::RpcServiceT, ServerBuilder}, + /// types::Request, + /// MethodResponse, + /// }; + /// use reth_ipc::server::{Builder, RpcServiceBuilder}; + /// + /// #[derive(Clone)] + /// struct MyMiddleware { + /// service: S, + /// count: Arc, + /// } + /// + /// impl<'a, S> RpcServiceT<'a> for MyMiddleware + /// where + /// S: RpcServiceT<'a> + Send + Sync + Clone + 'static, + /// { + /// type Future = BoxFuture<'a, MethodResponse>; + /// + /// fn call(&self, req: Request<'a>) -> Self::Future { + /// tracing::info!("MyMiddleware processed call {}", req.method); + /// let count = self.count.clone(); + /// let service = self.service.clone(); + /// + /// Box::pin(async move { + /// let rp = service.call(req).await; + /// // Modify the state. + /// count.fetch_add(1, Ordering::Relaxed); + /// rp + /// }) + /// } /// } + /// + /// // Create a state per connection + /// // NOTE: The service type can be omitted once `start` is called on the server. + /// let m = RpcServiceBuilder::new().layer_fn(move |service: ()| MyMiddleware { + /// service, + /// count: Arc::new(AtomicUsize::new(0)), + /// }); + /// let builder = Builder::default().set_rpc_middleware(m); /// ``` - pub fn set_middleware(self, service_builder: tower::ServiceBuilder) -> Builder { - Builder { settings: self.settings, id_provider: self.id_provider, service_builder } + pub fn set_rpc_middleware( + self, + rpc_middleware: RpcServiceBuilder, + ) -> Builder { + Builder { + settings: self.settings, + id_provider: self.id_provider, + rpc_middleware, + http_middleware: self.http_middleware, + } } /// Finalize the configuration of the server. Consumes the [`Builder`]. - pub fn build(self, endpoint: impl AsRef) -> IpcServer { + pub fn build(self, endpoint: impl AsRef) -> IpcServer { let endpoint = Endpoint::new(endpoint.as_ref().to_string()); self.build_with_endpoint(endpoint) } /// Finalize the configuration of the server. Consumes the [`Builder`]. - pub fn build_with_endpoint(self, endpoint: Endpoint) -> IpcServer { + pub fn build_with_endpoint( + self, + endpoint: Endpoint, + ) -> IpcServer { IpcServer { endpoint, cfg: self.settings, id_provider: self.id_provider, - service_builder: self.service_builder, + http_middleware: self.http_middleware, + rpc_middleware: self.rpc_middleware, } } } @@ -589,7 +781,9 @@ mod tests { use futures::future::{select, Either}; use jsonrpsee::{ core::client::{ClientT, Subscription, SubscriptionClientT}, - rpc_params, PendingSubscriptionSink, RpcModule, SubscriptionMessage, + rpc_params, + types::Request, + PendingSubscriptionSink, RpcModule, SubscriptionMessage, }; use parity_tokio_ipc::dummy_endpoint; use tokio::sync::broadcast; @@ -657,6 +851,7 @@ mod tests { #[tokio::test] async fn test_ipc_modules() { + reth_tracing::init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().build(&endpoint); let mut module = RpcModule::new(()); @@ -703,4 +898,50 @@ mod tests { let items = sub.take(16).collect::>().await; assert_eq!(items.len(), 16); } + + #[tokio::test] + async fn test_rpc_middleware() { + #[derive(Clone)] + struct ModifyRequestIf(S); + + impl<'a, S> RpcServiceT<'a> for ModifyRequestIf + where + S: Send + Sync + RpcServiceT<'a>, + { + type Future = S::Future; + + fn call(&self, mut req: Request<'a>) -> Self::Future { + // Re-direct all calls that isn't `say_hello` to `say_goodbye` + if req.method == "say_hello" { + req.method = "say_goodbye".into(); + } else if req.method == "say_goodbye" { + req.method = "say_hello".into(); + } + + self.0.call(req) + } + } + + reth_tracing::init_test_tracing(); + let endpoint = dummy_endpoint(); + + let rpc_middleware = RpcServiceBuilder::new().layer_fn(ModifyRequestIf); + let server = Builder::default().set_rpc_middleware(rpc_middleware).build(&endpoint); + + let mut module = RpcModule::new(()); + let goodbye_msg = r#"{"jsonrpc":"2.0","id":1,"result":"goodbye"}"#; + let hello_msg = r#"{"jsonrpc":"2.0","id":2,"result":"hello"}"#; + module.register_method("say_hello", move |_, _| hello_msg).unwrap(); + module.register_method("say_goodbye", move |_, _| goodbye_msg).unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let say_hello_response: String = client.request("say_hello", rpc_params![]).await.unwrap(); + let say_goodbye_response: String = + client.request("say_goodbye", rpc_params![]).await.unwrap(); + + assert_eq!(say_hello_response, goodbye_msg); + assert_eq!(say_goodbye_response, hello_msg); + } } diff --git a/crates/rpc/ipc/src/server/rpc_service.rs b/crates/rpc/ipc/src/server/rpc_service.rs new file mode 100644 index 0000000000000..94e9ed2aac716 --- /dev/null +++ b/crates/rpc/ipc/src/server/rpc_service.rs @@ -0,0 +1,138 @@ +//! JSON-RPC service middleware. +use futures_util::future::BoxFuture; +use jsonrpsee::{ + server::{ + middleware::rpc::{ResponseFuture, RpcServiceT}, + IdProvider, + }, + types::{error::reject_too_many_subscriptions, ErrorCode, ErrorObject, Request}, + BoundedSubscriptions, ConnectionDetails, MethodCallback, MethodResponse, MethodSink, Methods, + SubscriptionState, +}; +use std::sync::Arc; + +/// JSON-RPC service middleware. +#[derive(Clone, Debug)] +pub struct RpcService { + conn_id: usize, + methods: Methods, + max_response_body_size: usize, + cfg: RpcServiceCfg, +} + +/// Configuration of the RpcService. +#[allow(dead_code)] +#[derive(Clone, Debug)] +pub(crate) enum RpcServiceCfg { + /// The server supports only calls. + OnlyCalls, + /// The server supports both method calls and subscriptions. + CallsAndSubscriptions { + bounded_subscriptions: BoundedSubscriptions, + sink: MethodSink, + id_provider: Arc, + }, +} + +impl RpcService { + /// Create a new service. + pub(crate) fn new( + methods: Methods, + max_response_body_size: usize, + conn_id: usize, + cfg: RpcServiceCfg, + ) -> Self { + Self { methods, max_response_body_size, conn_id, cfg } + } +} + +impl<'a> RpcServiceT<'a> for RpcService { + // The rpc module is already boxing the futures and + // it's used to under the hood by the RpcService. + type Future = ResponseFuture>; + + fn call(&self, req: Request<'a>) -> Self::Future { + let conn_id = self.conn_id; + let max_response_body_size = self.max_response_body_size; + + let params = req.params(); + let name = req.method_name(); + let id = req.id().clone(); + + match self.methods.method_with_name(name) { + None => { + let rp = MethodResponse::error(id, ErrorObject::from(ErrorCode::MethodNotFound)); + ResponseFuture::ready(rp) + } + Some((_name, method)) => match method { + MethodCallback::Async(callback) => { + let params = params.into_owned(); + let id = id.into_owned(); + + let fut = (callback)(id, params, conn_id, max_response_body_size); + ResponseFuture::future(fut) + } + MethodCallback::AsyncWithDetails(callback) => { + let params = params.into_owned(); + let id = id.into_owned(); + + // Note: Add the `Request::extensions` to the connection details when available + // here. + let fut = (callback)( + id, + params, + ConnectionDetails::_new(conn_id), + max_response_body_size, + ); + ResponseFuture::future(fut) + } + MethodCallback::Sync(callback) => { + let rp = (callback)(id, params, max_response_body_size); + ResponseFuture::ready(rp) + } + MethodCallback::Subscription(callback) => { + let RpcServiceCfg::CallsAndSubscriptions { + bounded_subscriptions, + sink, + id_provider, + } = self.cfg.clone() + else { + tracing::warn!("Subscriptions not supported"); + let rp = + MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); + return ResponseFuture::ready(rp); + }; + + if let Some(p) = bounded_subscriptions.acquire() { + let conn_state = SubscriptionState { + conn_id, + id_provider: &*id_provider.clone(), + subscription_permit: p, + }; + + let fut = callback(id.clone(), params, sink, conn_state); + ResponseFuture::future(fut) + } else { + let max = bounded_subscriptions.max(); + let rp = MethodResponse::error(id, reject_too_many_subscriptions(max)); + ResponseFuture::ready(rp) + } + } + MethodCallback::Unsubscription(callback) => { + // Don't adhere to any resource or subscription limits; always let unsubscribing + // happen! + + let RpcServiceCfg::CallsAndSubscriptions { .. } = self.cfg else { + tracing::warn!("Subscriptions not supported"); + let rp = + MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); + return ResponseFuture::ready(rp); + }; + + let rp = callback(id, params, conn_id, max_response_body_size); + ResponseFuture::ready(rp) + } + }, + } + } +} diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 2349c6e8562cc..cd21be27194eb 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -161,7 +161,7 @@ pub struct AuthServerConfig { /// Configs for JSON-RPC Http. pub(crate) server_config: ServerBuilder, /// Configs for IPC server - pub(crate) ipc_server_config: Option, + pub(crate) ipc_server_config: Option>, /// IPC endpoint pub(crate) ipc_endpoint: Option, } @@ -223,7 +223,7 @@ pub struct AuthServerConfigBuilder { socket_addr: Option, secret: JwtSecret, server_config: Option>, - ipc_server_config: Option, + ipc_server_config: Option>, ipc_endpoint: Option, } @@ -289,7 +289,7 @@ impl AuthServerConfigBuilder { /// Configures the IPC server /// /// Note: this always configures an [EthSubscriptionIdProvider] - pub fn with_ipc_config(mut self, config: IpcServerBuilder) -> Self { + pub fn with_ipc_config(mut self, config: IpcServerBuilder) -> Self { self.ipc_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ef5b8868c6f17..62f82b8f8bee4 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -171,7 +171,9 @@ use jsonrpsee::{ use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; use reth_ipc::server::IpcServer; -pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; +pub use reth_ipc::server::{ + Builder as IpcServerBuilder, Endpoint, RpcServiceBuilder as IpcRpcServiceBuilder, +}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_provider::{ AccountReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, @@ -1472,7 +1474,7 @@ pub struct RpcServerConfig { /// Address where to bind the ws server to ws_addr: Option, /// Configs for JSON-RPC IPC server - ipc_server_config: Option, + ipc_server_config: Option>, /// The Endpoint where to launch the ipc server ipc_endpoint: Option, /// JWT secret for authentication @@ -1508,7 +1510,7 @@ impl RpcServerConfig { } /// Creates a new config with only ipc set - pub fn ipc(config: IpcServerBuilder) -> Self { + pub fn ipc(config: IpcServerBuilder) -> Self { Self::default().with_ipc(config) } @@ -1568,7 +1570,7 @@ impl RpcServerConfig { /// /// Note: this always configures an [EthSubscriptionIdProvider] [IdProvider] for convenience. /// To set a custom [IdProvider], please use [Self::with_id_provider]. - pub fn with_ipc(mut self, config: IpcServerBuilder) -> Self { + pub fn with_ipc(mut self, config: IpcServerBuilder) -> Self { self.ipc_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self } @@ -1756,13 +1758,12 @@ impl RpcServerConfig { server.ws_http = self.build_ws_http(modules).await?; if let Some(builder) = self.ipc_server_config { - // let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::new).unwrap_or_default(); + let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::ipc).unwrap_or_default(); let ipc_path = self .ipc_endpoint .unwrap_or_else(|| Endpoint::new(DEFAULT_IPC_ENDPOINT.to_string())); let ipc = builder - // TODO(mattsse): add metrics middleware for IPC - // .set_middleware(metrics) + .set_rpc_middleware(IpcRpcServiceBuilder::new().layer(metrics)) .build(ipc_path.path()); server.ipc = Some(ipc); } @@ -2127,7 +2128,7 @@ pub struct RpcServer { /// Configured ws,http servers ws_http: WsHttpServer, /// ipc server - ipc: Option, + ipc: Option>>, } // === impl RpcServer === From ce1e401d217215dfdbe6c9de6b87efadf9f804e6 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 23 Apr 2024 17:40:18 +0100 Subject: [PATCH 291/700] docs: add warning regarding `increment_block` necessity on `write_to_storage` (#7816) --- .../src/bundle_state/bundle_state_with_receipts.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 5e595532c3014..1153464f76fab 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -316,7 +316,12 @@ impl BundleStateWithReceipts { let mut bodies_cursor = tx.cursor_read::()?; let mut receipts_cursor = tx.cursor_write::()?; - for (idx, receipts) in self.receipts.into_iter().enumerate() { + // ATTENTION: Any potential future refactor or change to how this loop works should keep in + // mind that the static file producer must always call `increment_block` even if the block + // has no receipts. Keeping track of the exact block range of the segment is needed for + // consistency, querying and file range segmentation. + let blocks = self.receipts.into_iter().enumerate(); + for (idx, receipts) in blocks { let block_number = self.first_block + idx as u64; let first_tx_index = bodies_cursor .seek_exact(block_number)? From a8a434d2c4869c8efb520e6b22eacc9b4d3ae439 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 23 Apr 2024 21:06:12 +0200 Subject: [PATCH 292/700] chore: flatten more deps (#7824) --- Cargo.lock | 3 +-- crates/rpc/rpc-engine-api/Cargo.toml | 7 ++++--- crates/rpc/rpc-engine-api/src/engine_api.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e34b6c61282e..9a183217c7fb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7375,10 +7375,9 @@ dependencies = [ "metrics", "reth-beacon-consensus", "reth-engine-primitives", + "reth-ethereum-engine-primitives", "reth-interfaces", "reth-metrics", - "reth-node-ethereum", - "reth-node-optimism", "reth-payload-builder", "reth-primitives", "reth-provider", diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index b7e6eeccb6897..5fe782a6ef539 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -39,12 +39,13 @@ tracing.workspace = true serde.workspace = true [dev-dependencies] -alloy-rlp.workspace = true -reth-node-ethereum.workspace = true -reth-node-optimism.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } + +alloy-rlp.workspace = true + assert_matches.workspace = true [features] diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 01db0bb115967..be9f98832abe1 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -712,8 +712,8 @@ mod tests { use super::*; use assert_matches::assert_matches; use reth_beacon_consensus::BeaconEngineMessage; + use reth_ethereum_engine_primitives::EthEngineTypes; use reth_interfaces::test_utils::generators::random_block; - use reth_node_ethereum::EthEngineTypes; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{SealedBlock, B256, MAINNET}; use reth_provider::test_utils::MockEthProvider; From c659e28aa0e3ea5e83341b8a9eead0f0c89c296a Mon Sep 17 00:00:00 2001 From: Kyrylo Riabov Date: Tue, 23 Apr 2024 22:10:45 +0300 Subject: [PATCH 293/700] feat(storage): replace Tree generic with Arc (#7810) Co-authored-by: Matthias Seitz --- .../src/commands/debug_cmd/build_block.rs | 2 +- .../src/commands/debug_cmd/replay_engine.rs | 5 +- .../consensus/beacon/src/engine/test_utils.rs | 12 +-- crates/e2e-test-utils/src/lib.rs | 8 +- crates/node-builder/src/builder.rs | 67 ++++++------- crates/storage/provider/src/providers/mod.rs | 94 ++++++++----------- crates/storage/provider/src/traits/mod.rs | 3 + .../provider/src/traits/tree_viewer.rs | 22 +++++ examples/rpc-db/src/main.rs | 2 +- 9 files changed, 102 insertions(+), 113 deletions(-) create mode 100644 crates/storage/provider/src/traits/tree_viewer.rs diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 7aaef00fa2741..aee51ee793da6 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -174,7 +174,7 @@ impl Command { EvmProcessorFactory::new(self.chain.clone(), evm_config), ); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; - let blockchain_tree = ShareableBlockchainTree::new(tree); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // fetch the best block from the database let best_block = diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 841b9e3c67675..d9b6e98659cbd 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -136,11 +136,10 @@ impl Command { EvmProcessorFactory::new(self.chain.clone(), evm_config), ); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; - let blockchain_tree = ShareableBlockchainTree::new(tree); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // Set up the blockchain provider - let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone(), blockchain_tree)?; // Set up network let network_secret_path = diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index ff36e871b0283..67225b7c71ce6 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -43,13 +43,7 @@ type DatabaseEnv = TempDatabase; type TestBeaconConsensusEngine = BeaconConsensusEngine< Arc, - BlockchainProvider< - Arc, - ShareableBlockchainTree< - Arc, - EitherExecutorFactory>, - >, - >, + BlockchainProvider>, Arc>, EthEngineTypes, >; @@ -423,9 +417,9 @@ where // Setup blockchain tree let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let tree = ShareableBlockchainTree::new( + let tree = Arc::new(ShareableBlockchainTree::new( BlockchainTree::new(externals, config, None).expect("failed to create tree"), - ); + )); let latest = self.base_config.chain_spec.genesis_header().seal_slow(); let blockchain_provider = BlockchainProvider::with_latest(provider_factory.clone(), tree, latest); diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 043d1e0c66457..3c34f76e57aa5 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,15 +1,13 @@ use node::NodeHelper; use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - blockchain_tree::ShareableBlockchainTree, builder::{NodeBuilder, NodeConfig, NodeHandle}, - revm::EvmProcessorFactory, tasks::TaskManager, }; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ components::{NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, - FullNodeComponentsAdapter, FullNodeTypesAdapter, NodeTypes, + FullNodeComponentsAdapter, FullNodeTypesAdapter, }; use reth_primitives::ChainSpec; use reth_provider::providers::BlockchainProvider; @@ -100,12 +98,10 @@ where // Type aliases type TmpDB = Arc>; -type EvmType = EvmProcessorFactory<::Evm>; -type RethProvider = BlockchainProvider>>; type TmpPool = <>>::PoolBuilder as PoolBuilder< TmpNodeAdapter, >>::Pool; -type TmpNodeAdapter = FullNodeTypesAdapter>; +type TmpNodeAdapter = FullNodeTypesAdapter>; /// Type alias for a type of NodeHelper pub type NodeHelperType = NodeHelper, TmpPool>>; diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 3d7d3a04462b9..327d906b34848 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -63,11 +63,9 @@ use tokio::sync::{mpsc::unbounded_channel, oneshot}; /// The builtin provider type of the reth node. // Note: we need to hardcode this because custom components might depend on it in associated types. -type RethFullProviderType = - BlockchainProvider>>; +type RethFullProviderType = BlockchainProvider; -type RethFullAdapter = - FullNodeTypesAdapter::Evm>>; +type RethFullAdapter = FullNodeTypesAdapter>; #[cfg_attr(doc, aquamarine::aquamarine)] /// Declaratively construct a node. @@ -278,7 +276,7 @@ where >, > where - N: Node::Evm>>>, + N: Node>>, N::PoolBuilder: PoolBuilder>, N::NetworkBuilder: crate::components::NetworkBuilder< RethFullAdapter, @@ -308,15 +306,14 @@ where Types, Components, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, > where - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, + Components: + NodeComponentsBuilder>>, { NodeBuilder { config: self.config, @@ -339,7 +336,7 @@ impl Types, Components, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -347,9 +344,7 @@ impl where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, Types: NodeTypes, - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, + Components: NodeComponentsBuilder>>, { /// Apply a function to the components builder. pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { @@ -371,7 +366,7 @@ where where F: Fn( FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, ) -> eyre::Result<()> @@ -388,7 +383,7 @@ where F: Fn( FullNode< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -407,7 +402,7 @@ where RpcContext< '_, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -427,7 +422,7 @@ where RpcContext< '_, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -449,7 +444,7 @@ where F: Fn( ExExContext< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -476,7 +471,7 @@ where ) -> eyre::Result< NodeHandle< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -556,7 +551,7 @@ where .with_sync_metrics_tx(sync_metrics_tx.clone()); let canon_state_notification_sender = tree.canon_state_notification_sender(); - let blockchain_tree = ShareableBlockchainTree::new(tree); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); debug!(target: "reth::cli", "configured blockchain tree"); // fetch the head block from the database @@ -995,7 +990,7 @@ where >, > where - N: Node::Evm>>>, + N: Node>>, N::PoolBuilder: PoolBuilder>, N::NetworkBuilder: crate::components::NetworkBuilder< RethFullAdapter, @@ -1032,7 +1027,7 @@ where >, > where - N: Node::Evm>>>, + N: Node>>, N::PoolBuilder: PoolBuilder>, N::NetworkBuilder: crate::components::NetworkBuilder< RethFullAdapter, @@ -1065,15 +1060,14 @@ where Types, Components, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, > where - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, + Components: + NodeComponentsBuilder>>, { WithLaunchContext { builder: self.builder.with_components(components_builder), @@ -1090,7 +1084,7 @@ impl Types, Components, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1098,9 +1092,7 @@ impl where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, Types: NodeTypes, - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, + Components: NodeComponentsBuilder>>, { /// Apply a function to the components builder. pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { @@ -1116,7 +1108,7 @@ where where F: Fn( FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, ) -> eyre::Result<()> @@ -1133,7 +1125,7 @@ where F: Fn( FullNode< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1152,7 +1144,7 @@ where RpcContext< '_, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1172,7 +1164,7 @@ where RpcContext< '_, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1190,7 +1182,7 @@ where F: Fn( ExExContext< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1210,7 +1202,7 @@ where ) -> eyre::Result< NodeHandle< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1391,13 +1383,12 @@ impl std::fmt::Debug for BuilderContext { pub struct InitState; /// The state after all types of the node have been configured. -#[derive(Debug)] pub struct TypesState where DB: Database + Clone + 'static, Types: NodeTypes, { - adapter: FullNodeTypesAdapter>, + adapter: FullNodeTypesAdapter>, } /// The state of the node builder process after the node's components have been configured. diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b3011a9b02cfd..c9ebd042cdd21 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -4,7 +4,8 @@ use crate::{ CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, - StateProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + StateProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, + WithdrawalsProvider, }; use reth_db::{ database::Database, @@ -67,31 +68,36 @@ use reth_rpc_types::engine::ForkchoiceState; /// This type serves as the main entry point for interacting with the blockchain and provides data /// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper /// type that holds an instance of the database and the blockchain tree. -#[derive(Clone, Debug)] -pub struct BlockchainProvider { +#[derive(Clone)] +#[allow(missing_debug_implementations)] +pub struct BlockchainProvider { /// Provider type used to access the database. database: ProviderFactory, /// The blockchain tree instance. - tree: Tree, + tree: Arc, /// Tracks the chain info wrt forkchoice updates chain_info: ChainInfoTracker, } -impl BlockchainProvider { +impl BlockchainProvider { /// Create new provider instance that wraps the database and the blockchain tree, using the /// provided latest header to initialize the chain info tracker. - pub fn with_latest(database: ProviderFactory, tree: Tree, latest: SealedHeader) -> Self { + pub fn with_latest( + database: ProviderFactory, + tree: Arc, + latest: SealedHeader, + ) -> Self { Self { database, tree, chain_info: ChainInfoTracker::new(latest) } } } -impl BlockchainProvider +impl BlockchainProvider where DB: Database, { /// Create a new provider using only the database and the tree, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory, tree: Tree) -> ProviderResult { + pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { let provider = database.provider()?; let best: ChainInfo = provider.chain_info()?; match provider.header_by_number(best.best_number)? { @@ -104,10 +110,9 @@ where } } -impl BlockchainProvider +impl BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer, { /// Ensures that the given block number is canonical (synced) /// @@ -128,7 +133,7 @@ where } } -impl DatabaseProviderFactory for BlockchainProvider +impl DatabaseProviderFactory for BlockchainProvider where DB: Database, { @@ -137,10 +142,9 @@ where } } -impl HeaderProvider for BlockchainProvider +impl HeaderProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.header(block_hash) @@ -182,10 +186,9 @@ where } } -impl BlockHashReader for BlockchainProvider +impl BlockHashReader for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn block_hash(&self, number: u64) -> ProviderResult> { self.database.block_hash(number) @@ -200,10 +203,9 @@ where } } -impl BlockNumReader for BlockchainProvider +impl BlockNumReader for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn chain_info(&self) -> ProviderResult { Ok(self.chain_info.chain_info()) @@ -222,10 +224,9 @@ where } } -impl BlockIdReader for BlockchainProvider +impl BlockIdReader for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn pending_block_num_hash(&self) -> ProviderResult> { Ok(self.tree.pending_block_num_hash()) @@ -240,10 +241,9 @@ where } } -impl BlockReader for BlockchainProvider +impl BlockReader for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { let block = match source { @@ -320,10 +320,9 @@ where } } -impl TransactionsProvider for BlockchainProvider +impl TransactionsProvider for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.database.transaction_id(tx_hash) @@ -388,10 +387,9 @@ where } } -impl ReceiptProvider for BlockchainProvider +impl ReceiptProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.database.receipt(id) @@ -412,10 +410,10 @@ where self.database.receipts_by_tx_range(range) } } -impl ReceiptProviderIdExt for BlockchainProvider + +impl ReceiptProviderIdExt for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { @@ -440,10 +438,9 @@ where } } -impl WithdrawalsProvider for BlockchainProvider +impl WithdrawalsProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn withdrawals_by_block( &self, @@ -458,10 +455,9 @@ where } } -impl StageCheckpointReader for BlockchainProvider +impl StageCheckpointReader for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) @@ -472,10 +468,9 @@ where } } -impl EvmEnvProvider for BlockchainProvider +impl EvmEnvProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn fill_env_at( &self, @@ -544,10 +539,9 @@ where } } -impl PruneCheckpointReader for BlockchainProvider +impl PruneCheckpointReader for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn get_prune_checkpoint( &self, @@ -557,20 +551,18 @@ where } } -impl ChainSpecProvider for BlockchainProvider +impl ChainSpecProvider for BlockchainProvider where DB: Send + Sync, - Tree: Send + Sync, { fn chain_spec(&self) -> Arc { self.database.chain_spec() } } -impl StateProviderFactory for BlockchainProvider +impl StateProviderFactory for BlockchainProvider where DB: Database, - Tree: BlockchainTreePendingStateProvider + BlockchainTreeViewer, { /// Storage provider for latest block fn latest(&self) -> ProviderResult { @@ -644,10 +636,9 @@ where } } -impl BlockchainTreeEngine for BlockchainProvider +impl BlockchainTreeEngine for BlockchainProvider where DB: Send + Sync, - Tree: BlockchainTreeEngine, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { self.tree.buffer_block(block) @@ -681,10 +672,9 @@ where } } -impl BlockchainTreeViewer for BlockchainProvider +impl BlockchainTreeViewer for BlockchainProvider where DB: Send + Sync, - Tree: BlockchainTreeViewer, { fn blocks(&self) -> BTreeMap> { self.tree.blocks() @@ -743,10 +733,9 @@ where } } -impl CanonChainTracker for BlockchainProvider +impl CanonChainTracker for BlockchainProvider where DB: Send + Sync, - Tree: Send + Sync, Self: BlockReader, { fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { @@ -779,10 +768,9 @@ where } } -impl BlockReaderIdExt for BlockchainProvider +impl BlockReaderIdExt for BlockchainProvider where Self: BlockReader + BlockIdReader + ReceiptProviderIdExt, - Tree: BlockchainTreeEngine, { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { @@ -859,10 +847,9 @@ where } } -impl BlockchainTreePendingStateProvider for BlockchainProvider +impl BlockchainTreePendingStateProvider for BlockchainProvider where DB: Send + Sync, - Tree: BlockchainTreePendingStateProvider, { fn find_pending_state_provider( &self, @@ -872,20 +859,18 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider +impl CanonStateSubscriptions for BlockchainProvider where DB: Send + Sync, - Tree: CanonStateSubscriptions, { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.tree.subscribe_to_canonical_state() } } -impl ChangeSetReader for BlockchainProvider +impl ChangeSetReader for BlockchainProvider where DB: Database, - Tree: Sync + Send, { fn account_block_changeset( &self, @@ -895,10 +880,9 @@ where } } -impl AccountReader for BlockchainProvider +impl AccountReader for BlockchainProvider where DB: Database + Sync + Send, - Tree: Sync + Send, { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index c9623cb0c21a8..44884acb019a6 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -80,3 +80,6 @@ pub use stats::StatsReader; mod full; pub use full::FullProvider; + +mod tree_viewer; +pub use tree_viewer::TreeViewer; diff --git a/crates/storage/provider/src/traits/tree_viewer.rs b/crates/storage/provider/src/traits/tree_viewer.rs new file mode 100644 index 0000000000000..db3b19c4d611f --- /dev/null +++ b/crates/storage/provider/src/traits/tree_viewer.rs @@ -0,0 +1,22 @@ +use crate::{BlockchainTreePendingStateProvider, CanonStateSubscriptions}; + +use reth_interfaces::blockchain_tree::{BlockchainTreeEngine, BlockchainTreeViewer}; + +/// Helper trait to combine all the traits we need for the BlockchainProvider +/// +/// This is a temporary solution +pub trait TreeViewer: + BlockchainTreeViewer + + BlockchainTreePendingStateProvider + + CanonStateSubscriptions + + BlockchainTreeEngine +{ +} + +impl TreeViewer for T where + T: BlockchainTreeViewer + + BlockchainTreePendingStateProvider + + CanonStateSubscriptions + + BlockchainTreeEngine +{ +} diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index b8286e51be79e..627da093c5910 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -49,7 +49,7 @@ async fn main() -> eyre::Result<()> { // 2. Setup the blockchain provider using only the database provider and a noop for the tree to // satisfy trait bounds. Tree is not used in this example since we are only operating on the // disk and don't handle new blocks/live sync etc, which is done by the blockchain tree. - let provider = BlockchainProvider::new(factory, NoopBlockchainTree::default())?; + let provider = BlockchainProvider::new(factory, Arc::new(NoopBlockchainTree::default()))?; let rpc_builder = RpcModuleBuilder::default() .with_provider(provider.clone()) From cfeead75985ebfc1e5cb5fefe4ad51f70cf2d6d2 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Tue, 23 Apr 2024 16:55:09 -0400 Subject: [PATCH 294/700] add `reth-evm-optimism` (#7821) --- Cargo.lock | 14 + Cargo.toml | 4 +- crates/optimism/evm/Cargo.toml | 37 ++ crates/optimism/evm/src/execute.rs | 744 ++++++++++++++++++++++++++++ crates/optimism/evm/src/lib.rs | 107 ++++ crates/optimism/node/Cargo.toml | 6 +- crates/optimism/node/src/evm/mod.rs | 94 ---- crates/optimism/node/src/lib.rs | 7 +- crates/optimism/node/src/node.rs | 3 +- crates/rpc/rpc/Cargo.toml | 2 +- 10 files changed, 914 insertions(+), 104 deletions(-) create mode 100644 crates/optimism/evm/Cargo.toml create mode 100644 crates/optimism/evm/src/execute.rs create mode 100644 crates/optimism/evm/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 9a183217c7fb8..9c450fe702112 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6642,6 +6642,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-evm-optimism" +version = "0.2.0-beta.6" +dependencies = [ + "reth-evm", + "reth-interfaces", + "reth-primitives", + "reth-provider", + "reth-revm", + "revm-primitives", + "tracing", +] + [[package]] name = "reth-exex" version = "0.2.0-beta.6" @@ -7049,6 +7062,7 @@ dependencies = [ "reth-db", "reth-e2e-test-utils", "reth-evm", + "reth-evm-optimism", "reth-interfaces", "reth-network", "reth-node-api", diff --git a/Cargo.toml b/Cargo.toml index 1fb403e1ba1fd..7ef645f39e02b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ members = [ "crates/node-ethereum/", "crates/node-builder/", "crates/optimism/node/", + "crates/optimism/evm/", "crates/node-core/", "crates/node/api/", "crates/stages/", @@ -85,7 +86,7 @@ members = [ "examples/custom-inspector/", "examples/exex/minimal/", "examples/exex/op-bridge/", - "testing/ef-tests/" + "testing/ef-tests/", ] default-members = ["bin/reth"] @@ -220,6 +221,7 @@ reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } reth-node-optimism = { path = "crates/optimism/node" } +reth-evm-optimism = { path = "crates/optimism/evm" } reth-node-core = { path = "crates/node-core" } reth-node-api = { path = "crates/node/api" } reth-downloaders = { path = "crates/net/downloaders" } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml new file mode 100644 index 0000000000000..fbffa12455113 --- /dev/null +++ b/crates/optimism/evm/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "reth-evm-optimism" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# Reth +reth-evm.workspace = true +reth-primitives.workspace = true +reth-revm.workspace = true +reth-interfaces.workspace = true +reth-provider.workspace = true + +# Optimism +revm-primitives.workspace = true + +# misc +tracing.workspace = true + +[dev-dependencies] +reth-revm = { workspace = true, features = ["test-utils"] } + +[features] +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-revm/optimism", + "reth-interfaces/optimism", + "revm-primitives/optimism", +] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs new file mode 100644 index 0000000000000..ef87cce1dfd46 --- /dev/null +++ b/crates/optimism/evm/src/execute.rs @@ -0,0 +1,744 @@ +//! Optimism block executor. + +use crate::OptimismEvmConfig; +use reth_evm::{ + execute::{ + BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, + ExecutorProvider, + }, + ConfigureEvm, ConfigureEvmEnv, +}; +use reth_interfaces::{ + executor::{BlockExecutionError, BlockValidationError, OptimismBlockExecutionError}, + provider::ProviderError, +}; +use reth_primitives::{ + proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, Bytes, ChainSpec, + GotExpected, Hardfork, Header, PruneModes, Receipt, ReceiptWithBloom, Receipts, TxType, + Withdrawals, B256, U256, +}; +use reth_provider::BundleStateWithReceipts; +use reth_revm::{ + batch::{BlockBatchRecord, BlockExecutorStats}, + db::states::bundle_state::BundleRetention, + optimism::ensure_create2_deployer, + processor::compare_receipts_root_and_logs_bloom, + stack::InspectorStack, + state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, + Evm, State, +}; +use revm_primitives::{ + db::{Database, DatabaseCommit}, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, +}; +use std::sync::Arc; +use tracing::{debug, trace}; + +/// Provides executors to execute regular ethereum blocks +#[derive(Debug, Clone)] +pub struct OpExecutorProvider { + chain_spec: Arc, + evm_config: EvmConfig, + inspector: Option, + prune_modes: PruneModes, +} + +impl OpExecutorProvider { + /// Creates a new default optimism executor provider. + pub fn optimism(chain_spec: Arc) -> Self { + Self::new(chain_spec, Default::default()) + } +} + +impl OpExecutorProvider { + /// Creates a new executor provider. + pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { + Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } + } + + /// Configures an optional inspector stack for debugging. + pub fn with_inspector(mut self, inspector: Option) -> Self { + self.inspector = inspector; + self + } + + /// Configures the prune modes for the executor. + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } +} + +impl OpExecutorProvider +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + fn op_executor(&self, db: DB) -> OpBlockExecutor + where + DB: Database, + { + OpBlockExecutor::new( + self.chain_spec.clone(), + self.evm_config.clone(), + State::builder().with_database(db).with_bundle_update().without_state_clear().build(), + ) + .with_inspector(self.inspector.clone()) + } +} + +impl ExecutorProvider for OpExecutorProvider +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + type Executor> = OpBlockExecutor; + + type BatchExecutor> = OpBatchExecutor; + fn executor(&self, db: DB) -> Self::Executor + where + DB: Database, + { + self.op_executor(db) + } + + fn batch_executor(&self, db: DB) -> Self::BatchExecutor + where + DB: Database, + { + let executor = self.op_executor(db); + OpBatchExecutor { + executor, + batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + stats: BlockExecutorStats::default(), + } + } +} + +/// Helper container type for EVM with chain spec. +#[derive(Debug, Clone)] +struct OpEvmExecutor { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EvmConfig, +} + +impl OpEvmExecutor +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + /// Executes the transactions in the block and returns the receipts. + /// + /// This applies the pre-execution changes, and executes the transactions. + /// + /// # Note + /// + /// It does __not__ apply post-execution changes. + fn execute_pre_and_transactions( + &mut self, + block: &BlockWithSenders, + mut evm: Evm<'_, Ext, &mut State>, + ) -> Result<(Vec, u64), BlockExecutionError> + where + DB: Database, + { + // apply pre execution changes + apply_beacon_root_contract_call( + &self.chain_spec, + block.timestamp, + block.number, + block.parent_beacon_block_root, + &mut evm, + )?; + + // execute transactions + let is_regolith = + self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); + + // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism + // blocks will always have at least a single transaction in them (the L1 info transaction), + // so we can safely assume that this will always be triggered upon the transition and that + // the above check for empty blocks will never be hit on OP chains. + ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()).map_err( + |_| { + BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::ForceCreate2DeployerFail, + ) + }, + )?; + + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(block.body.len()); + for (sender, transaction) in block.transactions_with_sender() { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = block.header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas && + (is_regolith || !transaction.is_system_transaction()) + { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()); + } + + // An optimism block should never contain blob transactions. + if matches!(transaction.tx_type(), TxType::Eip4844) { + return Err(BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::BlobTransactionRejected, + )); + } + + // Cache the depositor account prior to the state transition for the deposit nonce. + // + // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces + // were not introduced in Bedrock. In addition, regular transactions don't have deposit + // nonces, so we don't need to touch the DB for those. + let depositor = (is_regolith && transaction.is_deposit()) + .then(|| { + evm.db_mut() + .load_cache_account(*sender) + .map(|acc| acc.account_info().unwrap_or_default()) + }) + .transpose() + .map_err(|_| { + BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::AccountLoadFailed(*sender), + ) + })?; + + let mut buf = Vec::with_capacity(transaction.length_without_header()); + transaction.encode_enveloped(&mut buf); + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, buf.into()); + + // Execute transaction. + let ResultAndState { result, state } = evm.transact().map_err(move |err| { + // Ensure hash is calculated for error log, if not already done + BlockValidationError::EVM { + hash: transaction.recalculate_hash(), + error: err.into(), + } + })?; + + trace!( + target: "evm", + ?transaction, + "Executed transaction" + ); + + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + receipts.push(Receipt { + tx_type: transaction.tx_type(), + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs(), + deposit_nonce: depositor.map(|account| account.nonce), + // The deposit receipt version was introduced in Canyon to indicate an update to how + // receipt hashes should be computed when set. The state transition process ensures + // this is only set for post-Canyon deposit transactions. + deposit_receipt_version: (transaction.is_deposit() && + self.chain_spec + .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) + .then_some(1), + }); + } + drop(evm); + + // Check if gas used matches the value set in header. + if block.gas_used != cumulative_gas_used { + let receipts = Receipts::from_block_receipt(receipts); + return Err(BlockValidationError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: receipts.gas_spent_by_tx()?, + } + .into()); + } + + Ok((receipts, cumulative_gas_used)) + } +} + +/// A basic Ethereum block executor. +/// +/// Expected usage: +/// - Create a new instance of the executor. +/// - Execute the block. +#[derive(Debug)] +pub struct OpBlockExecutor { + /// Chain specific evm config that's used to execute a block. + executor: OpEvmExecutor, + /// The state to use for execution + state: State, + /// Optional inspector stack for debugging + inspector: Option, +} + +impl OpBlockExecutor { + /// Creates a new Ethereum block executor. + pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { + Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, inspector: None } + } + + /// Sets the inspector stack for debugging. + pub fn with_inspector(mut self, inspector: Option) -> Self { + self.inspector = inspector; + self + } + + #[inline] + fn chain_spec(&self) -> &ChainSpec { + &self.executor.chain_spec + } + + /// Returns mutable reference to the state that wraps the underlying database. + #[allow(unused)] + fn state_mut(&mut self) -> &mut State { + &mut self.state + } +} + +impl OpBlockExecutor +where + EvmConfig: ConfigureEvm, + // TODO(mattsse): get rid of this + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + /// Configures a new evm configuration and block environment for the given block. + /// + /// Caution: this does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + EvmConfig::fill_cfg_and_block_env( + &mut cfg, + &mut block_env, + self.chain_spec(), + header, + total_difficulty, + ); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } + + /// Execute a single block and apply the state changes to the internal state. + /// + /// Returns the receipts of the transactions in the block and the total gas used. + /// + /// Returns an error if execution fails or receipt verification fails. + fn execute_and_verify( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(Vec, u64), BlockExecutionError> { + // 1. prepare state on new block + self.on_new_block(&block.header); + + // 2. configure the evm and execute + let env = self.evm_env_for_block(&block.header, total_difficulty); + + let (receipts, gas_used) = { + if let Some(inspector) = self.inspector.as_mut() { + let evm = self.executor.evm_config.evm_with_env_and_inspector( + &mut self.state, + env, + inspector, + ); + self.executor.execute_pre_and_transactions(block, evm)? + } else { + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + + self.executor.execute_pre_and_transactions(block, evm)? + } + }; + + // 3. apply post execution changes + self.post_execution(block, total_difficulty)?; + + // Before Byzantium, receipts contained state root that would mean that expensive + // operation as hashing that is required for state root got calculated in every + // transaction This was replaced with is_success flag. + // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 + if self.chain_spec().is_byzantium_active_at_block(block.header.number) { + if let Err(error) = verify_receipt_optimism( + block.header.receipts_root, + block.header.logs_bloom, + receipts.iter(), + self.chain_spec(), + block.timestamp, + ) { + debug!(target: "evm", %error, ?receipts, "receipts verification failed"); + return Err(error); + }; + } + + Ok((receipts, gas_used)) + } + + /// Apply settings before a new block is executed. + pub(crate) fn on_new_block(&mut self, header: &Header) { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); + self.state.set_state_clear_flag(state_clear_flag); + } + + /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO + /// hardfork state change. + pub fn post_execution( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(), BlockExecutionError> { + let balance_increments = post_block_balance_increments( + self.chain_spec(), + block.number, + block.difficulty, + block.beneficiary, + block.timestamp, + total_difficulty, + &block.ommers, + block.withdrawals.as_ref().map(Withdrawals::as_ref), + ); + // increment balances + self.state + .increment_balances(balance_increments) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + + Ok(()) + } +} + +impl Executor for OpBlockExecutor +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; + type Output = EthBlockOutput; + type Error = BlockExecutionError; + + /// Executes the block and commits the state changes. + /// + /// Returns the receipts of the transactions in the block. + /// + /// Returns an error if the block could not be executed or failed verification. + /// + /// State changes are committed to the database. + fn execute(mut self, input: Self::Input<'_>) -> Result { + let EthBlockExecutionInput { block, total_difficulty } = input; + let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; + + // prepare the state for extraction + self.state.merge_transitions(BundleRetention::PlainState); + + Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + } +} + +/// An executor for a batch of blocks. +/// +/// State changes are tracked until the executor is finalized. +#[derive(Debug)] +pub struct OpBatchExecutor { + /// The executor used to execute blocks. + executor: OpBlockExecutor, + /// Keeps track of the batch and record receipts based on the configured prune mode + batch_record: BlockBatchRecord, + stats: BlockExecutorStats, +} + +impl OpBatchExecutor { + /// Returns the receipts of the executed blocks. + pub fn receipts(&self) -> &Receipts { + self.batch_record.receipts() + } + + /// Returns mutable reference to the state that wraps the underlying database. + #[allow(unused)] + fn state_mut(&mut self) -> &mut State { + self.executor.state_mut() + } +} + +impl BatchExecutor for OpBatchExecutor +where + EvmConfig: ConfigureEvm, + // TODO: get rid of this + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; + type Output = BundleStateWithReceipts; + type Error = BlockExecutionError; + + fn execute_one(&mut self, input: Self::Input<'_>) -> Result { + let EthBlockExecutionInput { block, total_difficulty } = input; + let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; + + // prepare the state according to the prune mode + let retention = self.batch_record.bundle_retention(block.number); + self.executor.state.merge_transitions(retention); + + // store receipts in the set + self.batch_record.save_receipts(receipts)?; + + Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + } + + fn finalize(mut self) -> Self::Output { + // TODO: track stats + self.stats.log_debug(); + + BundleStateWithReceipts::new( + self.executor.state.take_bundle(), + self.batch_record.take_receipts(), + self.batch_record.first_block().unwrap_or_default(), + ) + } +} + +/// Verify the calculated receipts root against the expected receipts root. +pub fn verify_receipt_optimism<'a>( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, + chain_spec: &ChainSpec, + timestamp: u64, +) -> Result<(), BlockExecutionError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = + calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::{ + b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, + Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, + }; + use reth_revm::{database::StateProviderDatabase, L1_BLOCK_CONTRACT}; + use std::{collections::HashMap, str::FromStr}; + + use crate::OptimismEvmConfig; + use reth_revm::test_utils::StateProviderTest; + + fn create_op_state_provider() -> StateProviderTest { + let mut db = StateProviderTest::default(); + + let l1_block_contract_account = + Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; + + let mut l1_block_storage = HashMap::new(); + // base fee + l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); + // l1 fee overhead + l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); + // l1 fee scalar + l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); + // l1 free scalars post ecotone + l1_block_storage.insert( + StorageKey::with_last_byte(3), + StorageValue::from_str( + "0x0000000000000000000000000000000000001db0000d27300000000000000005", + ) + .unwrap(), + ); + + db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); + + db + } + + fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { + OpExecutorProvider { + chain_spec, + evm_config: Default::default(), + inspector: None, + prune_modes: Default::default(), + } + } + + #[test] + fn op_deposit_fields_pre_canyon() { + let header = Header { + timestamp: 1, + number: 1, + gas_limit: 1_000_000, + gas_used: 42_000, + receipts_root: b256!( + "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" + ), + ..Default::default() + }; + + let mut db = create_op_state_provider(); + + let addr = Address::ZERO; + let account = Account { balance: U256::MAX, ..Account::default() }; + db.insert_account(addr, account, None, HashMap::new()); + + let chain_spec = + Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).regolith_activated().build()); + + let tx = TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21_000, + to: TransactionKind::Call(addr), + ..Default::default() + }), + Signature::default(), + ); + + let tx_deposit = TransactionSigned::from_transaction_and_signature( + Transaction::Deposit(reth_primitives::TxDeposit { + from: addr, + to: TransactionKind::Call(addr), + gas_limit: 21_000, + ..Default::default() + }), + Signature::default(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + + // Attempt to execute a block with one deposit and one non-deposit transaction + executor + .execute_one( + ( + &BlockWithSenders { + block: Block { + header, + body: vec![tx, tx_deposit], + ommers: vec![], + withdrawals: None, + }, + senders: vec![addr, addr], + }, + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); + let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + + // deposit_receipt_version is not present in pre canyon transactions + assert!(deposit_receipt.deposit_receipt_version.is_none()); + assert!(tx_receipt.deposit_receipt_version.is_none()); + + // deposit_nonce is present only in deposit transactions + assert!(deposit_receipt.deposit_nonce.is_some()); + assert!(tx_receipt.deposit_nonce.is_none()); + } + + #[test] + fn op_deposit_fields_post_canyon() { + // ensure_create2_deployer will fail if timestamp is set to less then 2 + let header = Header { + timestamp: 2, + number: 1, + gas_limit: 1_000_000, + gas_used: 42_000, + receipts_root: b256!( + "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" + ), + ..Default::default() + }; + + let mut db = create_op_state_provider(); + let addr = Address::ZERO; + let account = Account { balance: U256::MAX, ..Account::default() }; + + db.insert_account(addr, account, None, HashMap::new()); + + let chain_spec = + Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).canyon_activated().build()); + + let tx = TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21_000, + to: TransactionKind::Call(addr), + ..Default::default() + }), + Signature::default(), + ); + + let tx_deposit = TransactionSigned::from_transaction_and_signature( + Transaction::Deposit(reth_primitives::TxDeposit { + from: addr, + to: TransactionKind::Call(addr), + gas_limit: 21_000, + ..Default::default() + }), + Signature::optimism_deposit_tx_signature(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + + // attempt to execute an empty block with parent beacon block root, this should not fail + executor + .execute_one( + ( + &BlockWithSenders { + block: Block { + header, + body: vec![tx, tx_deposit], + ommers: vec![], + withdrawals: None, + }, + senders: vec![addr, addr], + }, + U256::ZERO, + ) + .into(), + ) + .expect("Executing a block while canyon is active should not fail"); + + let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); + let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + + // deposit_receipt_version is set to 1 for post canyon deposit transactions + assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); + assert!(tx_receipt.deposit_receipt_version.is_none()); + + // deposit_nonce is present only in deposit transactions + assert!(deposit_receipt.deposit_nonce.is_some()); + assert!(tx_receipt.deposit_nonce.is_none()); + } +} diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs new file mode 100644 index 0000000000000..6a6324302e10c --- /dev/null +++ b/crates/optimism/evm/src/lib.rs @@ -0,0 +1,107 @@ +//! EVM config for vanilla optimism. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_primitives::{ + revm::{config::revm_spec, env::fill_op_tx_env}, + revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + Address, Bytes, ChainSpec, Head, Header, Transaction, U256, +}; +use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; + +mod execute; +pub use execute::*; + +/// Optimism-related EVM configuration. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct OptimismEvmConfig; + +impl ConfigureEvmEnv for OptimismEvmConfig { + type TxMeta = Bytes; + + fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Bytes) + where + T: AsRef, + { + fill_op_tx_env(tx_env, transaction, sender, meta); + } + + fn fill_cfg_env( + cfg_env: &mut CfgEnvWithHandlerCfg, + chain_spec: &ChainSpec, + header: &Header, + total_difficulty: U256, + ) { + let spec_id = revm_spec( + chain_spec, + Head { + number: header.number, + timestamp: header.timestamp, + difficulty: header.difficulty, + total_difficulty, + hash: Default::default(), + }, + ); + + cfg_env.chain_id = chain_spec.chain().id(); + cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; + + cfg_env.handler_cfg.spec_id = spec_id; + cfg_env.handler_cfg.is_optimism = chain_spec.is_optimism(); + } +} + +impl ConfigureEvm for OptimismEvmConfig { + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + EvmBuilder::default().with_db(db).optimism().build() + } + + fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + where + DB: Database + 'a, + I: GetInspector, + { + EvmBuilder::default() + .with_db(db) + .with_external_context(inspector) + .optimism() + .append_handler_register(inspector_handle_register) + .build() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::revm_primitives::{BlockEnv, CfgEnv}; + use reth_revm::primitives::SpecId; + + #[test] + #[ignore] + fn test_fill_cfg_and_block_env() { + let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); + let mut block_env = BlockEnv::default(); + let header = Header::default(); + let chain_spec = ChainSpec::default(); + let total_difficulty = U256::ZERO; + + OptimismEvmConfig::fill_cfg_and_block_env( + &mut cfg_env, + &mut block_env, + &chain_spec, + &header, + total_difficulty, + ); + + assert_eq!(cfg_env.chain_id, chain_spec.chain().id()); + } +} diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index f242adf5a79ae..36bfe96b55134 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -28,6 +28,7 @@ reth-network.workspace = true reth-interfaces.workspace = true reth-evm.workspace = true reth-revm.workspace = true +reth-evm-optimism.workspace = true reth-beacon-consensus.workspace = true revm.workspace = true revm-primitives.workspace = true @@ -39,7 +40,7 @@ http.workspace = true http-body.workspace = true reqwest = { version = "0.11", default-features = false, features = [ "rustls-tls", -]} +] } tracing.workspace = true # misc @@ -54,7 +55,7 @@ jsonrpsee.workspace = true [dev-dependencies] reth.workspace = true reth-db.workspace = true -reth-revm = { workspace = true, features = ["test-utils"]} +reth-revm = { workspace = true, features = ["test-utils"] } reth-e2e-test-utils.workspace = true tokio.workspace = true alloy-primitives.workspace = true @@ -66,6 +67,7 @@ optimism = [ "reth-rpc-types-compat/optimism", "reth-rpc/optimism", "reth-revm/optimism", + "reth-evm-optimism/optimism", "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", ] diff --git a/crates/optimism/node/src/evm/mod.rs b/crates/optimism/node/src/evm/mod.rs index 086253a0d8bb9..139597f9cb07c 100644 --- a/crates/optimism/node/src/evm/mod.rs +++ b/crates/optimism/node/src/evm/mod.rs @@ -1,96 +1,2 @@ -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm::{config::revm_spec, env::fill_op_tx_env}, - revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, Bytes, ChainSpec, Head, Header, Transaction, U256, -}; -use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; -mod execute; -pub use execute::*; -/// Optimism-related EVM configuration. -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] -pub struct OptimismEvmConfig; - -impl ConfigureEvmEnv for OptimismEvmConfig { - type TxMeta = Bytes; - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Bytes) - where - T: AsRef, - { - fill_op_tx_env(tx_env, transaction, sender, meta); - } - - fn fill_cfg_env( - cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, - total_difficulty: U256, - ) { - let spec_id = revm_spec( - chain_spec, - Head { - number: header.number, - timestamp: header.timestamp, - difficulty: header.difficulty, - total_difficulty, - hash: Default::default(), - }, - ); - - cfg_env.chain_id = chain_spec.chain().id(); - cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; - - cfg_env.handler_cfg.spec_id = spec_id; - cfg_env.handler_cfg.is_optimism = chain_spec.is_optimism(); - } -} - -impl ConfigureEvm for OptimismEvmConfig { - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { - EvmBuilder::default().with_db(db).optimism().build() - } - - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> - where - DB: Database + 'a, - I: GetInspector, - { - EvmBuilder::default() - .with_db(db) - .with_external_context(inspector) - .optimism() - .append_handler_register(inspector_handle_register) - .build() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_primitives::revm_primitives::{BlockEnv, CfgEnv}; - use revm::primitives::SpecId; - - #[test] - #[ignore] - fn test_fill_cfg_and_block_env() { - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - let mut block_env = BlockEnv::default(); - let header = Header::default(); - let chain_spec = ChainSpec::default(); - let total_difficulty = U256::ZERO; - - OptimismEvmConfig::fill_cfg_and_block_env( - &mut cfg_env, - &mut block_env, - &chain_spec, - &header, - total_difficulty, - ); - - assert_eq!(cfg_env.chain_id, chain_spec.chain().id()); - } -} diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index e75b038907bbe..7fc1c34b60fe6 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -17,11 +17,6 @@ pub mod args; pub mod engine; pub use engine::OptimismEngineTypes; -/// Exports optimism-specific implementations of the -/// [ConfigureEvmEnv](reth_node_api::ConfigureEvmEnv) trait. -pub mod evm; -pub use evm::OptimismEvmConfig; - pub mod node; pub use node::OptimismNode; @@ -32,3 +27,5 @@ pub mod rpc; pub use reth_optimism_payload_builder::{ OptimismBuiltPayload, OptimismPayloadBuilder, OptimismPayloadBuilderAttributes, }; + +pub use reth_evm_optimism::*; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 0c365ccc7e3e7..0d6e4996a6010 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -3,9 +3,10 @@ use crate::{ args::RollupArgs, txpool::{OpTransactionPool, OpTransactionValidator}, - OptimismEngineTypes, OptimismEvmConfig, + OptimismEngineTypes, }; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_evm_optimism::OptimismEvmConfig; use reth_network::{NetworkHandle, NetworkManager}; use reth_node_builder::{ components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 357309de7b5ba..d5bd324ae027c 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -74,7 +74,7 @@ tracing-futures = "0.2" schnellru.workspace = true futures.workspace = true derive_more.workspace = true -dyn-clone.workspace = true +dyn-clone.workspace = true [dev-dependencies] reth-evm-ethereum.workspace = true From a75d6cd753a6988ff6c1033b24e485d65b4c3b48 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 23 Apr 2024 23:10:58 +0200 Subject: [PATCH 295/700] chore: rm dbg (#7827) --- crates/rpc/rpc-engine-api/src/error.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 8a7790cf03bab..01b99a54f6bc1 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -222,7 +222,6 @@ mod tests { err: impl Into>, ) { let err = err.into(); - dbg!(&err); assert_eq!(err.code(), code); assert_eq!(err.message(), message); } From 0f9145b728d2079d9c0fd0e6cd997f5641e3a675 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 23 Apr 2024 23:17:36 +0200 Subject: [PATCH 296/700] chore: add CanonStateSubscriptions to NoopTree (#7828) --- crates/blockchain-tree/src/noop.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index eff385fb6ef73..9fa82025511ae 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -22,7 +22,10 @@ use std::collections::{BTreeMap, HashSet}; /// Caution: this is only intended for testing purposes, or for wiring components together. #[derive(Debug, Clone, Default)] #[non_exhaustive] -pub struct NoopBlockchainTree {} +pub struct NoopBlockchainTree { + /// Broadcast channel for canon state changes notifications. + pub canon_state_notification_sender: Option, +} impl BlockchainTreeEngine for NoopBlockchainTree { fn buffer_block(&self, _block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { @@ -127,6 +130,9 @@ impl BlockchainTreePendingStateProvider for NoopBlockchainTree { impl CanonStateSubscriptions for NoopBlockchainTree { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - CanonStateNotificationSender::new(1).subscribe() + self.canon_state_notification_sender + .as_ref() + .map(|sender| sender.subscribe()) + .unwrap_or_else(|| CanonStateNotificationSender::new(1).subscribe()) } } From 665e67ec7cfdbaa00dc4d1068c8400d50c6cc2b5 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 23 Apr 2024 23:30:34 +0200 Subject: [PATCH 297/700] ci: use `HOMEBREW` secret (#7829) --- .github/workflows/release-dist.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml index 5989a532c4ad0..2142360e03962 100644 --- a/.github/workflows/release-dist.yml +++ b/.github/workflows/release-dist.yml @@ -14,7 +14,7 @@ jobs: - name: Update Homebrew formula uses: dawidd6/action-homebrew-bump-formula@v3 with: - token: ${{ secrets.GITHUB_TOKEN }} + token: ${{ secrets.HOMEBREW }} no_fork: true tap: paradigmxyz/brew formula: reth From 7a593882e1735d76772cc8685d4ff597f9bbf114 Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 24 Apr 2024 05:32:59 +0800 Subject: [PATCH 298/700] chore(github): use codespell to inspect and correct spelling issues (#7775) Signed-off-by: jsvisa Co-authored-by: Matthias Seitz --- .codespellrc | 3 +++ .github/workflows/lint.yml | 8 +++++++- Makefile | 12 +++++++++++- bin/reth/src/commands/stage/dump/hashing_account.rs | 2 +- bin/reth/src/commands/stage/dump/hashing_storage.rs | 2 +- bin/reth/src/commands/stage/dump/merkle.rs | 2 +- bin/reth/src/commands/test_vectors/tables.rs | 4 ++-- crates/engine-primitives/src/error.rs | 6 ++++-- crates/ethereum/engine-primitives/src/lib.rs | 2 +- crates/net/ecies/src/error.rs | 4 ++-- crates/net/network/src/transactions/validation.rs | 2 +- crates/node-builder/src/components/builder.rs | 2 +- crates/node-core/src/args/rpc_server_args.rs | 2 +- crates/rpc/rpc-api/src/engine.rs | 2 +- crates/rpc/rpc-api/src/ganache.rs | 2 +- crates/rpc/rpc-builder/src/lib.rs | 2 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 4 ++-- crates/rpc/rpc/src/eth/signer.rs | 2 +- crates/rpc/rpc/src/lib.rs | 2 +- crates/static-file/README.md | 2 +- crates/storage/db/benches/hash_keys.rs | 6 +++--- crates/storage/db/src/abstraction/common.rs | 2 +- crates/storage/libmdbx-rs/src/environment.rs | 2 +- crates/storage/nippy-jar/src/lib.rs | 2 +- .../provider/src/providers/state/historical.rs | 2 +- crates/storage/provider/src/traits/transactions.rs | 2 +- crates/transaction-pool/src/pool/pending.rs | 2 +- crates/transaction-pool/src/pool/txpool.rs | 4 ++-- 28 files changed, 55 insertions(+), 34 deletions(-) create mode 100644 .codespellrc diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 0000000000000..771985af19124 --- /dev/null +++ b/.codespellrc @@ -0,0 +1,3 @@ +[codespell] +skip = .git,target,./crates/storage/libmdbx-rs/mdbx-sys/libmdbx,Cargo.toml,Cargo.lock +ignore-words-list = crate,ser,ratatui diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1f1f7a13cddb7..ff3dad495eabb 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -110,6 +110,12 @@ jobs: components: rustfmt - run: cargo fmt --all --check + codespell: + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: codespell-project/actions-codespell@v2 + grafana: runs-on: ubuntu-latest timeout-minutes: 30 @@ -124,7 +130,7 @@ jobs: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, grafana] + needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/Makefile b/Makefile index c8adf4ff91333..187de174d0746 100644 --- a/Makefile +++ b/Makefile @@ -303,11 +303,21 @@ lint-other-targets: --all-features \ -- -D warnings +lint-codespell: ensure-codespell + codespell + +ensure-codespell: + @if ! command -v codespell &> /dev/null; then \ + echo "codespell not found. Please install it by running the command `pip install codespell` or refer to the following link for more information: https://github.com/codespell-project/codespell" \ + exit 1; \ + fi + lint: make fmt && \ make lint-reth && \ make lint-op-reth && \ - make lint-other-targets + make lint-other-targets \ + make lint-codespell fix-lint-reth: cargo +nightly clippy \ diff --git a/bin/reth/src/commands/stage/dump/hashing_account.rs b/bin/reth/src/commands/stage/dump/hashing_account.rs index 1888f0e303e7a..35bbfa4d74c94 100644 --- a/bin/reth/src/commands/stage/dump/hashing_account.rs +++ b/bin/reth/src/commands/stage/dump/hashing_account.rs @@ -69,7 +69,7 @@ fn unwind_and_copy( Ok(()) } -/// Try to re-execute the stage straightaway +/// Try to re-execute the stage straight away async fn dry_run( output_provider_factory: ProviderFactory, to: u64, diff --git a/bin/reth/src/commands/stage/dump/hashing_storage.rs b/bin/reth/src/commands/stage/dump/hashing_storage.rs index 7f827b25cd7ad..a7e3878806822 100644 --- a/bin/reth/src/commands/stage/dump/hashing_storage.rs +++ b/bin/reth/src/commands/stage/dump/hashing_storage.rs @@ -65,7 +65,7 @@ fn unwind_and_copy( Ok(()) } -/// Try to re-execute the stage straightaway +/// Try to re-execute the stage straight away async fn dry_run( output_provider_factory: ProviderFactory, to: u64, diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/bin/reth/src/commands/stage/dump/merkle.rs index 08ac0a3aaefe0..2dfd0172b8c74 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/bin/reth/src/commands/stage/dump/merkle.rs @@ -138,7 +138,7 @@ async fn unwind_and_copy( Ok(()) } -/// Try to re-execute the stage straightaway +/// Try to re-execute the stage straight away async fn dry_run( output_provider_factory: ProviderFactory, to: u64, diff --git a/bin/reth/src/commands/test_vectors/tables.rs b/bin/reth/src/commands/test_vectors/tables.rs index 6399c81ac235c..181ed0e3e3a35 100644 --- a/bin/reth/src/commands/test_vectors/tables.rs +++ b/bin/reth/src/commands/test_vectors/tables.rs @@ -81,7 +81,7 @@ where { let mut rows = vec![]; let mut seen_keys = HashSet::new(); - let strat = proptest::collection::vec( + let strategy = proptest::collection::vec( any_with::>(( ::Parameters::default(), ::Parameters::default(), @@ -94,7 +94,7 @@ where while rows.len() < per_table { // Generate all `per_table` rows: (Key, Value) rows.extend( - &mut strat + &mut strategy .new_tree(runner) .map_err(|e| eyre::eyre!("{e}"))? .current() diff --git a/crates/engine-primitives/src/error.rs b/crates/engine-primitives/src/error.rs index d6549a516f384..f6dd3a8b7f400 100644 --- a/crates/engine-primitives/src/error.rs +++ b/crates/engine-primitives/src/error.rs @@ -8,11 +8,13 @@ use thiserror::Error; /// both execution payloads and forkchoice update attributes with respect to a method version. #[derive(Error, Debug)] pub enum EngineObjectValidationError { - /// Thrown when the underlying validation error occured while validating an `ExecutionPayload`. + /// Thrown when the underlying validation error occurred while validating an + /// `ExecutionPayload`. #[error("Payload validation error: {0}")] Payload(VersionSpecificValidationError), - /// Thrown when the underlying validation error occured while validating a `PayloadAttributes`. + /// Thrown when the underlying validation error occurred while validating a + /// `PayloadAttributes`. #[error("Payload attributes validation error: {0}")] PayloadAttributes(VersionSpecificValidationError), diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 6b030a9c553d0..cb6d0231eed42 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -1,4 +1,4 @@ -//! Ethereum specifc +//! Ethereum specific #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index d87545871fe2f..64526f16d6bad 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -81,8 +81,8 @@ pub enum ECIESErrorImpl { /// a message from the (partially filled) buffer. #[error("stream closed due to not being readable")] UnreadableStream, - // Error when data is not recieved from peer for a prolonged period. - #[error("never recieved data from remote peer")] + // Error when data is not received from peer for a prolonged period. + #[error("never received data from remote peer")] StreamTimeout, } diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index e508b2b2452cc..9171004bde669 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -21,7 +21,7 @@ pub const SIGNATURE_DECODED_SIZE_BYTES: usize = mem::size_of::(); pub trait ValidateTx68 { /// Validates a [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) /// entry. Returns [`ValidationOutcome`] which signals to the caller whether to fetch the - /// transaction or wether to drop it, and whether the sender of the announcement should be + /// transaction or to drop it, and whether the sender of the announcement should be /// penalized. fn should_fetch( &self, diff --git a/crates/node-builder/src/components/builder.rs b/crates/node-builder/src/components/builder.rs index 6abdca96c5c0c..14bdf7a4a589c 100644 --- a/crates/node-builder/src/components/builder.rs +++ b/crates/node-builder/src/components/builder.rs @@ -164,7 +164,7 @@ impl Default for ComponentsBuilder<(), (), (), ()> { /// A type that configures all the customizable components of the node and knows how to build them. /// -/// Implementors of this trait are responsible for building all the components of the node: See +/// Implementers of this trait are responsible for building all the components of the node: See /// [NodeComponents]. /// /// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server_args.rs index da3095815118b..2ac48e2ba3ab8 100644 --- a/crates/node-core/src/args/rpc_server_args.rs +++ b/crates/node-core/src/args/rpc_server_args.rs @@ -363,7 +363,7 @@ impl RpcServerArgs { impl RethRpcConfig for RpcServerArgs { fn is_ipc_enabled(&self) -> bool { - // By default IPC is enabled therefor it is enabled if the `ipcdisable` is false. + // By default IPC is enabled therefore it is enabled if the `ipcdisable` is false. !self.ipcdisable } diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 9304bbc5b8b4a..d320c74601d08 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -131,7 +131,7 @@ pub trait EngineApi { /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// - /// Implementors should take care when acting on the input to this method, specifically + /// Implementers should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. #[method(name = "getPayloadBodiesByRangeV1")] diff --git a/crates/rpc/rpc-api/src/ganache.rs b/crates/rpc/rpc-api/src/ganache.rs index 0156f074acee7..338c914980ebf 100644 --- a/crates/rpc/rpc-api/src/ganache.rs +++ b/crates/rpc/rpc-api/src/ganache.rs @@ -34,7 +34,7 @@ pub trait GanacheApi { /// is the snapshot id to revert to. This deletes the given snapshot, as well as any snapshots /// taken after (e.g.: reverting to id 0x1 will delete snapshots with ids 0x1, 0x2, etc.). /// - /// Reutnrs `true` if a snapshot was reverted, otherwise `false`. + /// Returns `true` if a snapshot was reverted, otherwise `false`. #[method(name = "revert")] async fn evm_revert(&self, snapshot_id: U256) -> RpcResult; diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 62f82b8f8bee4..4b9159e2d0035 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -717,7 +717,7 @@ impl RpcModuleSelection { /// Creates a new [RpcModule] based on the configured reth modules. /// - /// Note: This will always create new instance of the module handlers and is therefor only + /// Note: This will always create new instance of the module handlers and is therefore only /// recommended for launching standalone transports. If multiple transports need to be /// configured it's recommended to use the [RpcModuleBuilder]. #[allow(clippy::too_many_arguments)] diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index be9f98832abe1..eb3b1bfc7bd8d 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -292,7 +292,7 @@ where /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// - /// Implementors should take care when acting on the input to this method, specifically + /// Implementers should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. pub async fn get_payload_bodies_by_range( @@ -661,7 +661,7 @@ where /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// - /// Implementors should take care when acting on the input to this method, specifically + /// Implementers should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. /// diff --git a/crates/rpc/rpc/src/eth/signer.rs b/crates/rpc/rpc/src/eth/signer.rs index b744d83efa55d..578907604ea48 100644 --- a/crates/rpc/rpc/src/eth/signer.rs +++ b/crates/rpc/rpc/src/eth/signer.rs @@ -53,7 +53,7 @@ impl DevSigner { /// Generates a random dev signer which satisfies [EthSigner] trait pub(crate) fn random() -> Box { let mut signers = Self::random_signers(1); - signers.pop().expect("expect to generate at leas one signer") + signers.pop().expect("expect to generate at least one signer") } /// Generates provided number of random dev signers diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index fe5e2a97d733e..c75fa9b6be128 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -12,7 +12,7 @@ //! //! To avoid this, all blocking or CPU intensive handlers must be spawned to a separate task. See //! the [EthApi] handler implementations for examples. The rpc-api traits make no use of the -//! available jsonrpsee `blocking` attribute to give implementors more freedom because the +//! available jsonrpsee `blocking` attribute to give implementers more freedom because the //! `blocking` attribute and async handlers are mutually exclusive. However, as mentioned above, a //! lot of handlers make use of async functions, caching for example, but are also using blocking //! disk-io, hence these calls are spawned as futures to a blocking task manually. diff --git a/crates/static-file/README.md b/crates/static-file/README.md index b6eb385dd9ef4..3aab25a97ad96 100644 --- a/crates/static-file/README.md +++ b/crates/static-file/README.md @@ -106,7 +106,7 @@ In descending order of abstraction hierarchy: [`StaticFileProducer`](../../crates/static-file/src/static_file_producer.rs#L25): A `reth` [hook](../../crates/consensus/beacon/src/engine/hooks/static_file.rs) service that when triggered, **copies** finalized data from the database to the latest static file. Upon completion, it updates the internal index at `StaticFileProvider` with the new highest block and transaction on each specific segment. -[`StaticFileProvider`](../../crates/storage/provider/src/providers/static_file/manager.rs#L44) A provider similar to `DatabaseProvider`, **managing all existing static_file files** and selecting the optimal one (by range and segment type) to fulfill a request. **A single instance is shared across all components and should be instantiated only once within `ProviderFactory`**. An immutable reference is given everytime `ProviderFactory` creates a new `DatabaseProvider`. +[`StaticFileProvider`](../../crates/storage/provider/src/providers/static_file/manager.rs#L44) A provider similar to `DatabaseProvider`, **managing all existing static_file files** and selecting the optimal one (by range and segment type) to fulfill a request. **A single instance is shared across all components and should be instantiated only once within `ProviderFactory`**. An immutable reference is given every time `ProviderFactory` creates a new `DatabaseProvider`. [`StaticFileJarProvider`](../../crates/storage/provider/src/providers/static_file/jar.rs#L42) A provider similar to `DatabaseProvider` that provides access to a **single static file segment data** one a specific block range. diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index 5376bf5040ccc..ee21883fea0e6 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -136,7 +136,7 @@ where T::Key: std::hash::Hash + Arbitrary, T::Value: Arbitrary, { - let strat = proptest::collection::vec( + let strategy = proptest::collection::vec( any_with::>(( ::Parameters::default(), ::Parameters::default(), @@ -147,8 +147,8 @@ where .boxed(); let mut runner = TestRunner::new(ProptestConfig::default()); - let mut preload = strat.new_tree(&mut runner).unwrap().current(); - let mut input = strat.new_tree(&mut runner).unwrap().current(); + let mut preload = strategy.new_tree(&mut runner).unwrap().current(); + let mut input = strategy.new_tree(&mut runner).unwrap().current(); let mut unique_keys = HashSet::new(); preload.retain(|(k, _)| unique_keys.insert(k.clone())); diff --git a/crates/storage/db/src/abstraction/common.rs b/crates/storage/db/src/abstraction/common.rs index 9bce16e397d2d..eef41293527d4 100644 --- a/crates/storage/db/src/abstraction/common.rs +++ b/crates/storage/db/src/abstraction/common.rs @@ -23,7 +23,7 @@ mod sealed { use crate::{database::Database, mock::DatabaseMock, DatabaseEnv}; use std::sync::Arc; - /// Sealed trait to limit the implementors of the Database trait. + /// Sealed trait to limit the implementers of the Database trait. pub trait Sealed: Sized {} impl Sealed for &DB {} diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 31430fb992d16..218196c49e680 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -501,7 +501,7 @@ impl Default for Geometry { /// /// # Arguments /// -/// * `process_id` – A proceess id of the reader process. +/// * `process_id` – A process id of the reader process. /// * `thread_id` – A thread id of the reader thread. /// * `read_txn_id` – An oldest read transaction number on which stalled. /// * `gap` – A lag from the last committed txn. diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 4d311f2732c3f..cc4f2b0f51471 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -1071,7 +1071,7 @@ mod tests { let num_rows = 2; // (missing_offsets, expected number of rows) - // If a row wasnt fully pruned, then it should clear it up as well + // If a row wasn't fully pruned, then it should clear it up as well let missing_offsets_scenarios = [(1, 1), (2, 1), (3, 0)]; for (missing_offsets, expected_rows) in missing_offsets_scenarios { diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index a2dba78a04130..e87be25c969f7 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -127,7 +127,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { tracing::warn!( target: "provider::historical_sp", target = self.block_number, - "Attempt to calculate state root for an old block might result in OOM, tread carefully" + "Attempt to calculate state root for an old block might result in OOM, treat carefully" ); } diff --git a/crates/storage/provider/src/traits/transactions.rs b/crates/storage/provider/src/traits/transactions.rs index 9041593b552ef..3e798bb419c68 100644 --- a/crates/storage/provider/src/traits/transactions.rs +++ b/crates/storage/provider/src/traits/transactions.rs @@ -15,7 +15,7 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { /// Returns None if the transaction is not found. fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult>; - /// Get transaction by id, computes hash everytime so more expensive. + /// Get transaction by id, computes hash every time so more expensive. fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; /// Get transaction by id without computing the hash. diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 9703213237d17..7e733a6593c9a 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -399,7 +399,7 @@ impl PendingPool { unique_senders = self.highest_nonces.len(); non_local_senders -= unique_removed; - // we can re-use the temp array + // we can reuse the temp array removed.clear(); // loop through the highest nonces set, removing transactions until we reach the limit diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index dfc63c921ebcc..cdd897448e8e8 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1766,8 +1766,8 @@ pub(crate) struct PoolInternalTransaction { pub(crate) transaction: Arc>, /// The `SubPool` that currently contains this transaction. pub(crate) subpool: SubPool, - /// Keeps track of the current state of the transaction and therefor in which subpool it should - /// reside + /// Keeps track of the current state of the transaction and therefore in which subpool it + /// should reside pub(crate) state: TxState, /// The total cost all transactions before this transaction. /// From 86980836b8cc7e36ecf47400485f544cd2ffe2eb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 24 Apr 2024 00:37:24 +0200 Subject: [PATCH 299/700] fix: dont panic on invalid l1 blockinfo calldata (#7834) --- crates/revm/src/optimism/mod.rs | 40 ++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/crates/revm/src/optimism/mod.rs b/crates/revm/src/optimism/mod.rs index 470e7a914468e..0dc6c687704eb 100644 --- a/crates/revm/src/optimism/mod.rs +++ b/crates/revm/src/optimism/mod.rs @@ -32,13 +32,18 @@ pub fn extract_l1_info(block: &Block) -> Result Result Result { - // The setL1BlockValuesEcotone tx calldata must be exactly 160 bytes long, considering that - // we already removed the first 4 bytes (the function selector). Detailed breakdown: - // 8 bytes for the block sequence number - // + 4 bytes for the blob base fee scalar - // + 4 bytes for the base fee scalar - // + 8 bytes for the block number - // + 8 bytes for the block timestamp - // + 32 bytes for the base fee - // + 32 bytes for the blob base fee - // + 32 bytes for the block hash - // + 32 bytes for the batcher hash if data.len() != 160 { return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( reth_executor::OptimismBlockExecutionError::L1BlockInfoError { From dcad03c9b8c5375bc0aaba2ff3b481c31b3b03e1 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Wed, 24 Apr 2024 03:31:04 -0600 Subject: [PATCH 300/700] refactor(reth-basic-payload-builder): remove unnecessary assignment (#7835) --- crates/payload/basic/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 359a0fb16a036..7903dfa8dea54 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -410,7 +410,6 @@ where BuildOutcome::Better { payload, cached_reads } => { this.cached_reads = Some(cached_reads); debug!(target: "payload_builder", value = %payload.fees(), "built better payload"); - let payload = payload; this.best_payload = Some(payload); } BuildOutcome::Aborted { fees, cached_reads } => { From 9db17123b42a49fc911abb8fc4e92b26b9749c80 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:32:22 +0200 Subject: [PATCH 301/700] refactor: replace `to_primitive_transaction_kind` by `From` impl (#7831) --- crates/primitives/src/transaction/mod.rs | 9 +++++++++ .../rpc-types-compat/src/transaction/typed.rs | 18 ++++-------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 31cb277f00886..c2df133053b06 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -868,6 +868,15 @@ impl TransactionKind { } } +impl From for TransactionKind { + fn from(kind: reth_rpc_types::TransactionKind) -> Self { + match kind { + reth_rpc_types::TransactionKind::Call(to) => Self::Call(to), + reth_rpc_types::TransactionKind::Create => Self::Create, + } + } +} + impl Compact for TransactionKind { fn to_compact(self, buf: &mut B) -> usize where diff --git a/crates/rpc/rpc-types-compat/src/transaction/typed.rs b/crates/rpc/rpc-types-compat/src/transaction/typed.rs index cc90c626ec98f..6b0ed52947bb6 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/typed.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/typed.rs @@ -16,7 +16,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind.into(), value: tx.value, input: tx.input, }), @@ -25,7 +25,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind.into(), value: tx.value, input: tx.input, access_list: tx.access_list, @@ -35,7 +35,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, max_fee_per_gas: tx.max_fee_per_gas.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind.into(), value: tx.value, input: tx.input, access_list: tx.access_list, @@ -47,7 +47,7 @@ pub fn to_primitive_transaction( gas_limit: tx.gas_limit.to(), max_fee_per_gas: tx.max_fee_per_gas.to(), max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind.into(), value: tx.value, access_list: tx.access_list, blob_versioned_hashes: tx.blob_versioned_hashes, @@ -56,13 +56,3 @@ pub fn to_primitive_transaction( }), }) } - -/// Transforms a [reth_rpc_types::TransactionKind] into a [reth_primitives::TransactionKind] -pub fn to_primitive_transaction_kind( - kind: reth_rpc_types::TransactionKind, -) -> reth_primitives::TransactionKind { - match kind { - reth_rpc_types::TransactionKind::Call(to) => reth_primitives::TransactionKind::Call(to), - reth_rpc_types::TransactionKind::Create => reth_primitives::TransactionKind::Create, - } -} From f372db40c5f6c85f3d1b468180a5233d6ce16e57 Mon Sep 17 00:00:00 2001 From: Luca Provini Date: Wed, 24 Apr 2024 11:36:31 +0200 Subject: [PATCH 302/700] feat: blob e2e test (#7823) --- Cargo.lock | 2 + crates/e2e-test-utils/Cargo.toml | 2 +- crates/e2e-test-utils/src/engine_api.rs | 16 ++-- crates/e2e-test-utils/src/lib.rs | 12 ++- crates/e2e-test-utils/src/network.rs | 4 +- crates/e2e-test-utils/src/node.rs | 106 +++++++++++++---------- crates/e2e-test-utils/src/payload.rs | 4 +- crates/e2e-test-utils/src/rpc.rs | 24 +++++ crates/e2e-test-utils/src/transaction.rs | 80 +++++++++++++++++ crates/e2e-test-utils/src/wallet.rs | 55 +++++------- crates/node-ethereum/tests/e2e/blobs.rs | 96 ++++++++++++++++++++ crates/node-ethereum/tests/e2e/eth.rs | 39 +++++++-- crates/node-ethereum/tests/e2e/main.rs | 1 + crates/node-ethereum/tests/e2e/p2p.rs | 19 ++-- crates/optimism/node/tests/e2e/p2p.rs | 25 ++++-- crates/optimism/node/tests/e2e/utils.rs | 17 +--- 16 files changed, 373 insertions(+), 129 deletions(-) create mode 100644 crates/e2e-test-utils/src/rpc.rs create mode 100644 crates/e2e-test-utils/src/transaction.rs create mode 100644 crates/node-ethereum/tests/e2e/blobs.rs diff --git a/Cargo.lock b/Cargo.lock index 9c450fe702112..d96e0fe3bf1a5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -155,6 +155,7 @@ dependencies = [ "c-kzg", "serde", "sha2 0.10.8", + "thiserror", ] [[package]] @@ -185,6 +186,7 @@ dependencies = [ "alloy-serde", "arbitrary", "c-kzg", + "derive_more", "ethereum_ssz", "ethereum_ssz_derive", "once_cell", diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 96b4ca2e68cf3..03e0edb91c386 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -33,5 +33,5 @@ alloy-signer.workspace = true alloy-signer-wallet = { workspace = true, features = ["mnemonic"] } alloy-rpc-types.workspace = true alloy-network.workspace = true -alloy-consensus.workspace = true +alloy-consensus = { workspace = true, features = ["kzg"] } tracing.workspace = true \ No newline at end of file diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index fe05b0b6893f9..9ede69e674414 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -13,13 +13,13 @@ use reth_primitives::B256; use std::marker::PhantomData; /// Helper for engine api operations -pub struct EngineApiHelper { +pub struct EngineApiTestContext { pub canonical_stream: CanonStateNotificationStream, pub engine_api_client: HttpClient, pub _marker: PhantomData, } -impl EngineApiHelper { +impl EngineApiTestContext { /// Retrieves a v3 payload from the engine api pub async fn get_payload_v3( &self, @@ -34,6 +34,7 @@ impl EngineApiHelper { payload: E::BuiltPayload, payload_builder_attributes: E::PayloadBuilderAttributes, expected_status: PayloadStatusEnum, + versioned_hashes: Vec, ) -> eyre::Result where E::ExecutionPayloadV3: From + PayloadEnvelopeExt, @@ -45,7 +46,7 @@ impl EngineApiHelper { let submission = EngineApiClient::::new_payload_v3( &self.engine_api_client, envelope_v3.execution_payload(), - vec![], + versioned_hashes, payload_builder_attributes.parent_beacon_block_root().unwrap(), ) .await?; @@ -56,18 +57,17 @@ impl EngineApiHelper { } /// Sends forkchoice update to the engine api - pub async fn update_forkchoice(&self, hash: B256) -> eyre::Result<()> { + pub async fn update_forkchoice(&self, current_head: B256, new_head: B256) -> eyre::Result<()> { EngineApiClient::::fork_choice_updated_v2( &self.engine_api_client, ForkchoiceState { - head_block_hash: hash, - safe_block_hash: hash, - finalized_block_hash: hash, + head_block_hash: new_head, + safe_block_hash: current_head, + finalized_block_hash: current_head, }, None, ) .await?; - Ok(()) } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 3c34f76e57aa5..8e57eebed729f 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,4 +1,4 @@ -use node::NodeHelper; +use node::NodeTestContext; use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, builder::{NodeBuilder, NodeConfig, NodeHandle}, @@ -18,6 +18,9 @@ use wallet::Wallet; /// Wrapper type to create test nodes pub mod node; +/// Helper for transaction operations +pub mod transaction; + /// Helper type to yield accounts from mnemonic pub mod wallet; @@ -29,6 +32,8 @@ mod network; /// Helper for engine api operations mod engine_api; +/// Helper for rpc operations +mod rpc; /// Helper traits mod traits; @@ -75,7 +80,7 @@ where .launch() .await?; - let mut node = NodeHelper::new(node).await?; + let mut node = NodeTestContext::new(node).await?; // Connect each node in a chain. if let Some(previous_node) = nodes.last_mut() { @@ -104,4 +109,5 @@ type TmpPool = <>>::PoolBuilde type TmpNodeAdapter = FullNodeTypesAdapter>; /// Type alias for a type of NodeHelper -pub type NodeHelperType = NodeHelper, TmpPool>>; +pub type NodeHelperType = + NodeTestContext, TmpPool>>; diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 341b0d7d0aea9..92e9b316a9a46 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -5,12 +5,12 @@ use reth_tracing::tracing::info; use tokio_stream::wrappers::UnboundedReceiverStream; /// Helper for network operations -pub struct NetworkHelper { +pub struct NetworkTestContext { network_events: UnboundedReceiverStream, network: NetworkHandle, } -impl NetworkHelper { +impl NetworkTestContext { /// Creates a new network helper pub fn new(network: NetworkHandle) -> Self { let network_events = network.event_listener(); diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 18d147fd913a8..b2ccf899e1412 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,35 +1,36 @@ use crate::{ - engine_api::EngineApiHelper, network::NetworkHelper, payload::PayloadHelper, - traits::PayloadEnvelopeExt, + engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, + rpc::RpcTestContext, traits::PayloadEnvelopeExt, }; + use alloy_rpc_types::BlockNumberOrTag; use eyre::Ok; + use futures_util::Future; use reth::{ api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, builder::FullNode, providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, - rpc::{ - eth::{error::EthResult, EthTransactions}, - types::engine::PayloadStatusEnum, - }, + rpc::types::engine::PayloadStatusEnum, }; +use reth_node_builder::NodeTypes; use reth_primitives::{stage::StageId, BlockHash, BlockNumber, Bytes, B256}; use std::{marker::PhantomData, pin::Pin}; use tokio_stream::StreamExt; /// An helper struct to handle node actions -pub struct NodeHelper +pub struct NodeTestContext where Node: FullNodeComponents, { pub inner: FullNode, - pub payload: PayloadHelper, - pub network: NetworkHelper, - pub engine_api: EngineApiHelper, + pub payload: PayloadTestContext, + pub network: NetworkTestContext, + pub engine_api: EngineApiTestContext, + pub rpc: RpcTestContext, } -impl NodeHelper +impl NodeTestContext where Node: FullNodeComponents, { @@ -39,17 +40,18 @@ where Ok(Self { inner: node.clone(), - network: NetworkHelper::new(node.network.clone()), - payload: PayloadHelper::new(builder).await?, - engine_api: EngineApiHelper { + payload: PayloadTestContext::new(builder).await?, + network: NetworkTestContext::new(node.network.clone()), + engine_api: EngineApiTestContext { engine_api_client: node.auth_server_handle().http_client(), canonical_stream: node.provider.canonical_state_stream(), _marker: PhantomData::, }, + rpc: RpcTestContext { inner: node.rpc_registry }, }) } - pub async fn connect(&mut self, node: &mut NodeHelper) { + pub async fn connect(&mut self, node: &mut NodeTestContext) { self.network.add_peer(node.network.record()).await; node.network.add_peer(self.network.record()).await; node.network.expect_session().await; @@ -62,7 +64,7 @@ where pub async fn advance( &mut self, length: u64, - tx_generator: impl Fn() -> Pin>>, + tx_generator: impl Fn(u64) -> Pin>>, attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes + Copy, ) -> eyre::Result< @@ -76,60 +78,74 @@ where From<::BuiltPayload> + PayloadEnvelopeExt, { let mut chain = Vec::with_capacity(length as usize); - for _ in 0..length { - let (payload, _) = - self.advance_block(tx_generator().await, attributes_generator).await?; - chain.push(payload); + for i in 0..length { + let raw_tx = tx_generator(i).await; + let tx_hash = self.rpc.inject_tx(raw_tx).await?; + let (payload, eth_attr) = self.advance_block(vec![], attributes_generator).await?; + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + self.assert_new_block(tx_hash, block_hash, block_number).await?; + chain.push((payload, eth_attr)); } Ok(chain) } - /// Advances the node forward one block - pub async fn advance_block( + /// Creates a new payload from given attributes generator + /// expects a payload attribute event and waits until the payload is built. + /// + /// It triggers the resolve payload via engine api and expects the built payload event. + pub async fn new_payload( &mut self, - raw_tx: Bytes, attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, ) -> eyre::Result<( - ( - ::BuiltPayload, - ::PayloadBuilderAttributes, - ), - B256, + <::Engine as EngineTypes>::BuiltPayload, + <::Engine as EngineTypes>::PayloadBuilderAttributes, )> where ::ExecutionPayloadV3: From<::BuiltPayload> + PayloadEnvelopeExt, { - // push tx into pool via RPC server - let tx_hash = self.inject_tx(raw_tx).await?; - // trigger new payload building draining the pool let eth_attr = self.payload.new_payload(attributes_generator).await.unwrap(); - // first event is the payload attributes self.payload.expect_attr_event(eth_attr.clone()).await?; - // wait for the payload builder to have finished building self.payload.wait_for_built_payload(eth_attr.payload_id()).await; - // trigger resolve payload via engine api self.engine_api.get_payload_v3(eth_attr.payload_id()).await?; - // ensure we're also receiving the built payload as event - let payload = self.payload.expect_built_payload().await?; + Ok((self.payload.expect_built_payload().await?, eth_attr)) + } + + /// Advances the node forward one block + pub async fn advance_block( + &mut self, + versioned_hashes: Vec, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, + ) -> eyre::Result<( + ::BuiltPayload, + <::Engine as EngineTypes>::PayloadBuilderAttributes, + )> + where + ::ExecutionPayloadV3: + From<::BuiltPayload> + PayloadEnvelopeExt, + { + let (payload, eth_attr) = self.new_payload(attributes_generator).await?; - // submit payload via engine api let block_hash = self .engine_api - .submit_payload(payload.clone(), eth_attr.clone(), PayloadStatusEnum::Valid) + .submit_payload( + payload.clone(), + eth_attr.clone(), + PayloadStatusEnum::Valid, + versioned_hashes, + ) .await?; // trigger forkchoice update via engine api to commit the block to the blockchain - self.engine_api.update_forkchoice(block_hash).await?; + self.engine_api.update_forkchoice(block_hash, block_hash).await?; - // assert the block has been committed to the blockchain - self.assert_new_block(tx_hash, block_hash, payload.block().number).await?; - Ok(((payload, eth_attr), tx_hash)) + Ok((payload, eth_attr)) } /// Waits for block to be available on node. @@ -169,12 +185,6 @@ where Ok(()) } - /// Injects a raw transaction into the node tx pool via RPC server - async fn inject_tx(&mut self, raw_tx: Bytes) -> EthResult { - let eth_api = self.inner.rpc_registry.eth_api(); - eth_api.send_raw_transaction(raw_tx).await - } - /// Asserts that a new block has been added to the blockchain /// and the tx has been included in the block pub async fn assert_new_block( diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 2d349721b232c..47f4134d7fe83 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -4,13 +4,13 @@ use reth_payload_builder::{Events, PayloadBuilderHandle, PayloadId}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations -pub struct PayloadHelper { +pub struct PayloadTestContext { pub payload_event_stream: BroadcastStream>, payload_builder: PayloadBuilderHandle, pub timestamp: u64, } -impl PayloadHelper { +impl PayloadTestContext { /// Creates a new payload helper pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs new file mode 100644 index 0000000000000..09f161a91dc78 --- /dev/null +++ b/crates/e2e-test-utils/src/rpc.rs @@ -0,0 +1,24 @@ +use alloy_consensus::TxEnvelope; +use alloy_network::eip2718::Decodable2718; +use reth::{api::FullNodeComponents, builder::rpc::RpcRegistry, rpc::api::DebugApiServer}; +use reth_primitives::{Bytes, B256}; +use reth_rpc::eth::{error::EthResult, EthTransactions}; + +pub struct RpcTestContext { + pub inner: RpcRegistry, +} + +impl RpcTestContext { + /// Injects a raw transaction into the node tx pool via RPC server + pub async fn inject_tx(&mut self, raw_tx: Bytes) -> EthResult { + let eth_api = self.inner.eth_api(); + eth_api.send_raw_transaction(raw_tx).await + } + + /// Retrieves a transaction envelope by its hash + pub async fn envelope_by_hash(&mut self, hash: B256) -> eyre::Result { + let tx = self.inner.debug_api().raw_transaction(hash).await?.unwrap(); + let tx = tx.to_vec(); + Ok(TxEnvelope::decode_2718(&mut tx.as_ref()).unwrap()) + } +} diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs new file mode 100644 index 0000000000000..a2c40052c47d2 --- /dev/null +++ b/crates/e2e-test-utils/src/transaction.rs @@ -0,0 +1,80 @@ +use alloy_consensus::{ + BlobTransactionSidecar, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope, +}; +use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; +use alloy_rpc_types::{TransactionInput, TransactionRequest}; +use alloy_signer_wallet::LocalWallet; +use eyre::Ok; +use reth_primitives::{hex, Address, Bytes, U256}; + +use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, B256}; + +pub struct TransactionTestContext; + +impl TransactionTestContext { + /// Creates a static transfer and signs it + pub async fn transfer_tx(chain_id: u64, wallet: LocalWallet) -> Bytes { + let tx = tx(chain_id, None, 0); + let signer = EthereumSigner::from(wallet); + tx.build(&signer).await.unwrap().encoded_2718().into() + } + + /// Creates a tx with blob sidecar and sign it + pub async fn tx_with_blobs(chain_id: u64, wallet: LocalWallet) -> eyre::Result { + let mut tx = tx(chain_id, None, 0); + + let mut builder = SidecarBuilder::::new(); + builder.ingest(b"dummy blob"); + let sidecar: BlobTransactionSidecar = builder.build()?; + + tx.set_blob_sidecar(sidecar); + tx.set_max_fee_per_blob_gas(15e9 as u128); + + let signer = EthereumSigner::from(wallet); + let signed = tx.clone().build(&signer).await.unwrap(); + + Ok(signed.encoded_2718().into()) + } + + pub async fn optimism_l1_block_info_tx( + chain_id: u64, + wallet: LocalWallet, + nonce: u64, + ) -> Bytes { + let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); + let tx = tx(chain_id, Some(l1_block_info), nonce); + let signer = EthereumSigner::from(wallet); + tx.build(&signer).await.unwrap().encoded_2718().into() + } + + /// Validates the sidecar of a given tx envelope and returns the versioned hashes + pub fn validate_sidecar(tx: TxEnvelope) -> Vec { + let proof_setting = MAINNET_KZG_TRUSTED_SETUP.clone(); + + match tx { + TxEnvelope::Eip4844(signed) => match signed.tx() { + TxEip4844Variant::TxEip4844WithSidecar(tx) => { + tx.validate_blob(&proof_setting).unwrap(); + tx.sidecar.versioned_hashes().collect() + } + _ => panic!("Expected Eip4844 transaction with sidecar"), + }, + _ => panic!("Expected Eip4844 transaction"), + } + } +} + +/// Creates a type 2 transaction +fn tx(chain_id: u64, data: Option, nonce: u64) -> TransactionRequest { + TransactionRequest { + nonce: Some(nonce), + value: Some(U256::from(100)), + to: Some(Address::random()), + gas: Some(210000), + max_fee_per_gas: Some(20e9 as u128), + max_priority_fee_per_gas: Some(20e9 as u128), + chain_id: Some(chain_id), + input: TransactionInput { input: None, data }, + ..Default::default() + } +} diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index d064eede99c11..d94dec2a08c3f 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -1,19 +1,19 @@ -use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; -use alloy_rpc_types::{TransactionInput, TransactionRequest}; +use alloy_signer::Signer; use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; -use reth_primitives::{hex, Address, Bytes, U256}; + /// One of the accounts of the genesis allocations. pub struct Wallet { - inner: LocalWallet, - pub nonce: u64, + pub inner: LocalWallet, chain_id: u64, + amount: usize, + derivation_path: Option, } impl Wallet { /// Creates a new account from one of the secret/pubkeys of the genesis allocations (test.json) - pub(crate) fn new(phrase: &str) -> Self { - let inner = MnemonicBuilder::::default().phrase(phrase).build().unwrap(); - Self { inner, chain_id: 1, nonce: 0 } + pub fn new(amount: usize) -> Self { + let inner = MnemonicBuilder::::default().phrase(TEST_MNEMONIC).build().unwrap(); + Self { inner, chain_id: 1, amount, derivation_path: None } } /// Sets chain id @@ -22,31 +22,24 @@ impl Wallet { self } - /// Creates a static transfer and signs it - pub async fn transfer_tx(&mut self) -> Bytes { - self.tx(None).await + fn get_derivation_path(&self) -> &str { + self.derivation_path.as_deref().unwrap_or("m/44'/60'/0'/0/") } - pub async fn optimism_l1_block_info_tx(&mut self) -> Bytes { - let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); - self.tx(Some(l1_block_info)).await - } + pub fn gen(&self) -> Vec { + let builder = MnemonicBuilder::::default().phrase(TEST_MNEMONIC); + + // use the derivation path + let derivation_path = self.get_derivation_path(); - /// Creates a transaction with data and signs it - pub async fn tx(&mut self, data: Option) -> Bytes { - let tx = TransactionRequest { - nonce: Some(self.nonce), - value: Some(U256::from(100)), - to: Some(Address::random()), - gas_price: Some(20e9 as u128), - gas: Some(210000), - chain_id: Some(self.chain_id), - input: TransactionInput { input: None, data }, - ..Default::default() - }; - self.nonce += 1; - let signer = EthereumSigner::from(self.inner.clone()); - tx.build(&signer).await.unwrap().encoded_2718().into() + let mut wallets = Vec::with_capacity(self.amount); + for idx in 0..self.amount { + let builder = + builder.clone().derivation_path(&format!("{derivation_path}{idx}")).unwrap(); + let wallet = builder.build().unwrap().with_chain_id(Some(self.chain_id)); + wallets.push(wallet) + } + wallets } } @@ -54,6 +47,6 @@ const TEST_MNEMONIC: &str = "test test test test test test test test test test t impl Default for Wallet { fn default() -> Self { - Wallet::new(TEST_MNEMONIC) + Wallet::new(1) } } diff --git a/crates/node-ethereum/tests/e2e/blobs.rs b/crates/node-ethereum/tests/e2e/blobs.rs new file mode 100644 index 0000000000000..d8fca42d6257d --- /dev/null +++ b/crates/node-ethereum/tests/e2e/blobs.rs @@ -0,0 +1,96 @@ +use std::sync::Arc; + +use reth::{ + args::RpcServerArgs, + builder::{NodeBuilder, NodeConfig, NodeHandle}, + rpc::types::engine::PayloadStatusEnum, + tasks::TaskManager, +}; +use reth_e2e_test_utils::{ + node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, +}; +use reth_node_ethereum::EthereumNode; +use reth_primitives::{b256, ChainSpecBuilder, Genesis, MAINNET}; +use reth_transaction_pool::TransactionPool; + +use crate::utils::eth_payload_attributes; + +#[tokio::test] +async fn can_handle_blobs() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node).await?; + + let wallets = Wallet::new(2).gen(); + let blob_wallet = wallets.first().unwrap(); + let second_wallet = wallets.last().unwrap(); + + // inject normal tx + let raw_tx = TransactionTestContext::transfer_tx(1, second_wallet.clone()).await; + let tx_hash = node.rpc.inject_tx(raw_tx).await?; + // build payload with normal tx + let (payload, attributes) = node.new_payload(eth_payload_attributes).await?; + + // clean the pool + node.inner.pool.remove_transactions(vec![tx_hash]); + + // build blob tx + let blob_tx = TransactionTestContext::tx_with_blobs(1, blob_wallet.clone()).await?; + + // inject blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(blob_tx).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // validate sidecar + let versioned_hashes = TransactionTestContext::validate_sidecar(envelope); + + // build a payload + let (blob_payload, blob_attr) = node.new_payload(eth_payload_attributes).await?; + + // submit the blob payload + let blob_block_hash = node + .engine_api + .submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid, versioned_hashes.clone()) + .await?; + + let genesis_hash = b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); + + let (_, _) = tokio::join!( + // send fcu with blob hash + node.engine_api.update_forkchoice(genesis_hash, blob_block_hash), + // send fcu with normal hash + node.engine_api.update_forkchoice(genesis_hash, payload.block().hash()) + ); + + // submit normal payload + node.engine_api.submit_payload(payload, attributes, PayloadStatusEnum::Valid, vec![]).await?; + + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + + // expects the blob tx to be back in the pool + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // make sure the sidecar is present + TransactionTestContext::validate_sidecar(envelope); + + Ok(()) +} diff --git a/crates/node-ethereum/tests/e2e/eth.rs b/crates/node-ethereum/tests/e2e/eth.rs index 39ba5e2326995..4f566e7c8d4fa 100644 --- a/crates/node-ethereum/tests/e2e/eth.rs +++ b/crates/node-ethereum/tests/e2e/eth.rs @@ -4,7 +4,9 @@ use reth::{ builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; -use reth_e2e_test_utils::{node::NodeHelper, setup, wallet::Wallet}; +use reth_e2e_test_utils::{ + node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, +}; use reth_node_ethereum::EthereumNode; use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; use std::sync::Arc; @@ -13,7 +15,7 @@ use std::sync::Arc; async fn can_run_eth_node() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, mut wallet) = setup::( + let (mut nodes, _tasks, _wallet) = setup::( 1, Arc::new( ChainSpecBuilder::default() @@ -27,10 +29,20 @@ async fn can_run_eth_node() -> eyre::Result<()> { .await?; let mut node = nodes.pop().unwrap(); - let raw_tx = wallet.transfer_tx().await; + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; + + // make the node advance + let tx_hash = node.rpc.inject_tx(raw_tx).await?; // make the node advance - node.advance_block(raw_tx, eth_payload_attributes).await?; + let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + + // assert the block has been committed to the blockchain + node.assert_new_block(tx_hash, block_hash, block_number).await?; Ok(()) } @@ -62,14 +74,23 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { .node(EthereumNode::default()) .launch() .await?; - let mut node = NodeHelper::new(node).await?; + let mut node = NodeTestContext::new(node).await?; // Configure wallet from test mnemonic and create dummy transfer tx - let mut wallet = Wallet::default(); - let raw_tx = wallet.transfer_tx().await; + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; // make the node advance - node.advance_block(raw_tx, crate::utils::eth_payload_attributes).await?; + let tx_hash = node.rpc.inject_tx(raw_tx).await?; + + // make the node advance + let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + + // assert the block has been committed to the blockchain + node.assert_new_block(tx_hash, block_hash, block_number).await?; Ok(()) } @@ -99,7 +120,7 @@ async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyr .launch() .await?; - let node = NodeHelper::new(node).await?; + let node = NodeTestContext::new(node).await?; // Ensure that the engine api client is not available let client = node.inner.engine_ipc_client().await; diff --git a/crates/node-ethereum/tests/e2e/main.rs b/crates/node-ethereum/tests/e2e/main.rs index 6a8a010649666..1d0d6db8cddb0 100644 --- a/crates/node-ethereum/tests/e2e/main.rs +++ b/crates/node-ethereum/tests/e2e/main.rs @@ -1,3 +1,4 @@ +mod blobs; mod dev; mod eth; mod p2p; diff --git a/crates/node-ethereum/tests/e2e/p2p.rs b/crates/node-ethereum/tests/e2e/p2p.rs index c7ce2a7c12d36..768d1ac5a11b1 100644 --- a/crates/node-ethereum/tests/e2e/p2p.rs +++ b/crates/node-ethereum/tests/e2e/p2p.rs @@ -1,5 +1,5 @@ use crate::utils::eth_payload_attributes; -use reth_e2e_test_utils::setup; +use reth_e2e_test_utils::{setup, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; use reth_primitives::{ChainSpecBuilder, MAINNET}; use std::sync::Arc; @@ -8,7 +8,7 @@ use std::sync::Arc; async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, mut wallet) = setup::( + let (mut nodes, _tasks, wallet) = setup::( 2, Arc::new( ChainSpecBuilder::default() @@ -21,17 +21,24 @@ async fn can_sync() -> eyre::Result<()> { ) .await?; - let raw_tx = wallet.transfer_tx().await; + let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; let mut second_node = nodes.pop().unwrap(); let mut first_node = nodes.pop().unwrap(); // Make the first node advance - let ((payload, _), tx_hash) = - first_node.advance_block(raw_tx.clone(), eth_payload_attributes).await?; + let tx_hash = first_node.rpc.inject_tx(raw_tx).await?; + + // make the node advance + let (payload, _) = first_node.advance_block(vec![], eth_payload_attributes).await?; + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + + // assert the block has been committed to the blockchain + first_node.assert_new_block(tx_hash, block_hash, block_number).await?; // only send forkchoice update to second node - second_node.engine_api.update_forkchoice(block_hash).await?; + second_node.engine_api.update_forkchoice(block_hash, block_hash).await?; // expect second node advanced via p2p gossip second_node.assert_new_block(tx_hash, block_hash, 1).await?; diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index da6af2090e03b..a38fadf678438 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,13 +1,15 @@ use crate::utils::{advance_chain, setup}; -use std::sync::Arc; -use tokio::sync::Mutex; +use reth::primitives::BASE_MAINNET; +use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet}; +use reth_primitives::ChainId; #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, wallet) = setup(2).await?; - let wallet = Arc::new(Mutex::new(wallet)); + let chain_id: ChainId = BASE_MAINNET.chain.into(); + + let (mut nodes, _tasks, _wallet) = setup(2).await?; let second_node = nodes.pop().unwrap(); let mut first_node = nodes.pop().unwrap(); @@ -15,13 +17,24 @@ async fn can_sync() -> eyre::Result<()> { let tip: usize = 300; let tip_index: usize = tip - 1; + let wallet = Wallet::default(); + // On first node, create a chain up to block number 300a - let canonical_payload_chain = advance_chain(tip, &mut first_node, wallet.clone()).await?; + let canonical_payload_chain = advance_chain(tip, &mut first_node, |nonce: u64| { + let wallet = wallet.inner.clone(); + Box::pin(async move { + TransactionTestContext::optimism_l1_block_info_tx(chain_id, wallet, nonce).await + }) + }) + .await?; let canonical_chain = canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); // On second node, sync up to block number 300a - second_node.engine_api.update_forkchoice(canonical_chain[tip_index]).await?; + second_node + .engine_api + .update_forkchoice(canonical_chain[tip_index], canonical_chain[tip_index]) + .await?; second_node.wait_block(tip as u64, canonical_chain[tip_index], true).await?; Ok(()) diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 5322cad9a6114..e86a7c654142f 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,10 +1,9 @@ -use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use reth::{primitives::Bytes, rpc::types::engine::PayloadAttributes, tasks::TaskManager}; use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType}; use reth_node_optimism::{OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes}; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_primitives::{Address, ChainSpecBuilder, Genesis, B256, BASE_MAINNET}; -use std::sync::Arc; -use tokio::sync::Mutex; +use std::{future::Future, pin::Pin, sync::Arc}; /// Optimism Node Helper type pub(crate) type OpNode = NodeHelperType; @@ -28,17 +27,9 @@ pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskMa pub(crate) async fn advance_chain( length: usize, node: &mut OpNode, - wallet: Arc>, + tx_generator: impl Fn(u64) -> Pin>>, ) -> eyre::Result> { - node.advance( - length as u64, - || { - let wallet = wallet.clone(); - Box::pin(async move { wallet.lock().await.optimism_l1_block_info_tx().await }) - }, - optimism_payload_attributes, - ) - .await + node.advance(length as u64, tx_generator, optimism_payload_attributes).await } /// Helper function to create a new eth payload attributes From dc6a02ce783b5ec9b821e76bd4a76d6317bca752 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 24 Apr 2024 10:38:38 +0100 Subject: [PATCH 303/700] docs(book): recommend running with Docker Compose (#7637) --- book/installation/installation.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/book/installation/installation.md b/book/installation/installation.md index 9ecf71cc5d8d1..edd8849af4f6f 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -8,6 +8,11 @@ There are three core methods to obtain Reth: * [Docker images](./docker.md) * [Building from source.](./source.md) +> **Note** +> +> If you have Docker installed, we recommend using the [Docker Compose](./docker.md#using-docker-compose) configuration +> that will get you Reth, Lighthouse (Consensus Client), Prometheus and Grafana running and syncing with just one command. + ## Hardware Requirements The hardware requirements for running Reth depend on the node configuration and can change over time as the network grows or new features are implemented. From 4f81f3acc9fdc56b18444c6754f852b7060c57ee Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 24 Apr 2024 11:53:54 +0200 Subject: [PATCH 304/700] feat(discv5): recycle clean up code (#7727) Co-authored-by: Oliver Nordbjerg --- crates/net/discv5/src/config.rs | 58 ++++---- crates/net/discv5/src/filter.rs | 2 +- crates/net/discv5/src/lib.rs | 210 +++++++++++++++------------ crates/net/discv5/src/metrics.rs | 9 +- crates/net/discv5/src/network_key.rs | 11 ++ crates/net/network/src/config.rs | 15 +- 6 files changed, 173 insertions(+), 132 deletions(-) create mode 100644 crates/net/discv5/src/network_key.rs diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 809f0fa325075..bf15be861b2df 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -2,6 +2,7 @@ use std::{ collections::HashSet, + fmt::Debug, net::{IpAddr, SocketAddr}, }; @@ -10,14 +11,7 @@ use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; use reth_primitives::{Bytes, ForkId, NodeRecord, MAINNET}; -use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys}; - -/// L1 EL -pub const ETH: &[u8] = b"eth"; -/// L1 CL -pub const ETH2: &[u8] = b"eth2"; -/// Optimism -pub const OPSTACK: &[u8] = b"opstack"; +use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, network_key}; /// Default interval in seconds at which to run a lookup up query. /// @@ -31,14 +25,18 @@ pub struct ConfigBuilder { discv5_config: Option, /// Nodes to boot from. bootstrap_nodes: HashSet, - /// [`ForkId`] to set in local node record. + /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node + /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", ForkId)`. + /// + /// Defaults to L1 mainnet if not set. fork: Option<(&'static [u8], ForkId)>, /// RLPx TCP port to advertise. Note: so long as `reth_network` handles [`NodeRecord`]s as /// opposed to [`Enr`](enr::Enr)s, TCP is limited to same IP address as UDP, since /// [`NodeRecord`] doesn't supply an extra field for and alternative TCP address. tcp_port: u16, - /// Additional kv-pairs that should be advertised to peers by including in local node record. - other_enr_data: Vec<(&'static str, Bytes)>, + /// List of `(key, rlp-encoded-value)` tuples that should be advertised in local node record + /// (in addition to tcp port, udp port and fork). + other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query to populate kbuckets. lookup_interval: Option, /// Custom filter rules to apply to a discovered peer in order to determine if it should be @@ -52,9 +50,9 @@ impl ConfigBuilder { let Config { discv5_config, bootstrap_nodes, - fork: fork_id, + fork, tcp_port, - other_enr_data, + other_enr_kv_pairs, lookup_interval, discovered_peer_filter, } = discv5_config; @@ -62,9 +60,9 @@ impl ConfigBuilder { Self { discv5_config: Some(discv5_config), bootstrap_nodes, - fork: Some(fork_id), + fork: Some(fork), tcp_port, - other_enr_data, + other_enr_kv_pairs, lookup_interval: Some(lookup_interval), discovered_peer_filter: Some(discovered_peer_filter), } @@ -117,9 +115,10 @@ impl ConfigBuilder { self } - /// Set [`ForkId`], and key used to identify it, to set in local [`Enr`](discv5::enr::Enr). - pub fn fork(mut self, key: &'static [u8], value: ForkId) -> Self { - self.fork = Some((key, value)); + /// Set fork ID kv-pair to set in local [`Enr`](discv5::enr::Enr). This lets peers on discovery + /// network know which chain this node belongs to. + pub fn fork(mut self, network_key: &'static [u8], fork_id: ForkId) -> Self { + self.fork = Some((network_key, fork_id)); self } @@ -129,9 +128,10 @@ impl ConfigBuilder { self } - /// Adds an additional kv-pair to include in the local [`Enr`](discv5::enr::Enr). - pub fn add_enr_kv_pair(mut self, kv_pair: (&'static str, Bytes)) -> Self { - self.other_enr_data.push(kv_pair); + /// Adds an additional kv-pair to include in the local [`Enr`](discv5::enr::Enr). Takes the key + /// to use for the kv-pair and the rlp encoded value. + pub fn add_enr_kv_pair(mut self, key: &'static [u8], value: Bytes) -> Self { + self.other_enr_kv_pairs.push((key, value)); self } @@ -152,7 +152,7 @@ impl ConfigBuilder { bootstrap_nodes, fork, tcp_port, - other_enr_data, + other_enr_kv_pairs, lookup_interval, discovered_peer_filter, } = self; @@ -160,19 +160,19 @@ impl ConfigBuilder { let discv5_config = discv5_config .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); - let fork = fork.unwrap_or((ETH, MAINNET.latest_fork_id())); + let fork = fork.unwrap_or((network_key::ETH, MAINNET.latest_fork_id())); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); let discovered_peer_filter = - discovered_peer_filter.unwrap_or_else(|| MustNotIncludeKeys::new(&[ETH2])); + discovered_peer_filter.unwrap_or_else(|| MustNotIncludeKeys::new(&[network_key::ETH2])); Config { discv5_config, bootstrap_nodes, fork, tcp_port, - other_enr_data, + other_enr_kv_pairs, lookup_interval, discovered_peer_filter, } @@ -187,12 +187,14 @@ pub struct Config { pub(super) discv5_config: discv5::Config, /// Nodes to boot from. pub(super) bootstrap_nodes: HashSet, - /// [`ForkId`] to set in local node record. + /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node + /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", ForkId)`. pub(super) fork: (&'static [u8], ForkId), /// RLPx TCP port to advertise. pub(super) tcp_port: u16, - /// Additional kv-pairs to include in local node record. - pub(super) other_enr_data: Vec<(&'static str, Bytes)>, + /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to + /// peers by including in local node record. + pub(super) other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query with to populate kbuckets. pub(super) lookup_interval: u64, /// Custom filter rules to apply to a discovered peer in order to determine if it should be diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index 5cb7be18c60a4..f2f2f2fd6e196 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -96,7 +96,7 @@ mod tests { use alloy_rlp::Bytes; use discv5::enr::{CombinedKey, Enr}; - use crate::config::{ETH, ETH2}; + use crate::network_key::{ETH, ETH2}; use super::*; diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 218d4299dc837..7e9fd81b1ba51 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -33,6 +33,7 @@ pub mod enr; pub mod error; pub mod filter; pub mod metrics; +pub mod network_key; pub use discv5::{self, IpMode}; @@ -40,11 +41,13 @@ pub use config::{BootNode, Config, ConfigBuilder}; pub use enr::enr_to_discv4_id; pub use error::Error; pub use filter::{FilterOutcome, MustNotIncludeKeys}; + use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; -/// Default number of times to do pulse lookup queries, at bootstrap (5 second intervals). +/// Default number of times to do pulse lookup queries, at bootstrap (pulse intervals, defaulting +/// to 5 seconds). /// -/// Default is 100 seconds. +/// Default is 100 counts. pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; /// Default duration of look up interval, for pulse look ups at bootstrap. @@ -52,7 +55,7 @@ pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; /// Default is 5 seconds. pub const DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL: u64 = 5; -/// Max kbucket index. +/// Max kbucket index is 255. /// /// This is the max log2distance for 32 byte [`NodeId`](discv5::enr::NodeId) - 1. See . pub const MAX_KBUCKET_INDEX: usize = 255; @@ -71,8 +74,8 @@ pub struct Discv5 { discv5: Arc, /// [`IpMode`] of the the node. ip_mode: IpMode, - /// Key used in kv-pair to ID chain. - fork_id_key: &'static [u8], + /// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'. + fork_key: &'static [u8], /// Filter applied to a discovered peers before passing it up to app. discovered_peer_filter: MustNotIncludeKeys, /// Metrics for underlying [`discv5::Discv5`] node and filtered discovered peers. @@ -165,82 +168,21 @@ impl Discv5 { // // 1. make local enr from listen config // - let Config { - discv5_config, - bootstrap_nodes, - fork, - tcp_port, - other_enr_data, - lookup_interval, - discovered_peer_filter, - } = discv5_config; - - let (enr, bc_enr, ip_mode, fork_id_key) = { - let mut builder = discv5::enr::Enr::builder(); - - let (ip_mode, socket) = match discv5_config.listen_config { - ListenConfig::Ipv4 { ip, port } => { - if ip != Ipv4Addr::UNSPECIFIED { - builder.ip4(ip); - } - builder.udp4(port); - builder.tcp4(tcp_port); - - (IpMode::Ip4, (ip, port).into()) - } - ListenConfig::Ipv6 { ip, port } => { - if ip != Ipv6Addr::UNSPECIFIED { - builder.ip6(ip); - } - builder.udp6(port); - builder.tcp6(tcp_port); - - (IpMode::Ip6, (ip, port).into()) - } - ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { - if ipv4 != Ipv4Addr::UNSPECIFIED { - builder.ip4(ipv4); - } - builder.udp4(ipv4_port); - builder.tcp4(tcp_port); - - if ipv6 != Ipv6Addr::UNSPECIFIED { - builder.ip6(ipv6); - } - builder.udp6(ipv6_port); - - (IpMode::DualStack, (ipv6, ipv6_port).into()) - } - }; - - // add fork id - let (chain, fork_id) = fork; - builder.add_value_rlp(chain, alloy_rlp::encode(fork_id).into()); - - // add other data - for (key, value) in other_enr_data { - builder.add_value_rlp(key, alloy_rlp::encode(value).into()); - } - - // enr v4 not to get confused with discv4, independent versioning enr and - // discovery - let enr = builder.build(sk).expect("should build enr v4"); - let EnrCombinedKeyWrapper(enr) = enr.into(); - - trace!(target: "net::discv5", - ?enr, - "local ENR" - ); + let (enr, bc_enr, fork_key, ip_mode) = Self::build_local_enr(sk, &discv5_config); - // backwards compatible enr - let bc_enr = NodeRecord::from_secret_key(socket, sk); - - (enr, bc_enr, ip_mode, chain) - }; + trace!(target: "net::discv5", + ?enr, + "local ENR" + ); // // 2. start discv5 // + let Config { + discv5_config, bootstrap_nodes, lookup_interval, discovered_peer_filter, .. + } = discv5_config; + + let EnrCombinedKeyWrapper(enr) = enr.into(); let sk = discv5::enr::CombinedKey::secp256k1_from_bytes(&mut sk.secret_bytes()).unwrap(); let mut discv5 = match discv5::Discv5::new(enr, sk, discv5_config) { Ok(discv5) => discv5, @@ -261,17 +203,79 @@ impl Discv5 { let metrics = Discv5Metrics::default(); // - // 4. bg kbuckets maintenance + // 4. start bg kbuckets maintenance // Self::spawn_populate_kbuckets_bg(lookup_interval, metrics.clone(), discv5.clone()); Ok(( - Self { discv5, ip_mode, fork_id_key, discovered_peer_filter, metrics }, + Self { discv5, ip_mode, fork_key, discovered_peer_filter, metrics }, discv5_updates, bc_enr, )) } + fn build_local_enr( + sk: &SecretKey, + config: &Config, + ) -> (Enr, NodeRecord, &'static [u8], IpMode) { + let mut builder = discv5::enr::Enr::builder(); + + let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; + + let (ip_mode, socket) = match discv5_config.listen_config { + ListenConfig::Ipv4 { ip, port } => { + if ip != Ipv4Addr::UNSPECIFIED { + builder.ip4(ip); + } + builder.udp4(port); + builder.tcp4(*tcp_port); + + (IpMode::Ip4, (ip, port).into()) + } + ListenConfig::Ipv6 { ip, port } => { + if ip != Ipv6Addr::UNSPECIFIED { + builder.ip6(ip); + } + builder.udp6(port); + builder.tcp6(*tcp_port); + + (IpMode::Ip6, (ip, port).into()) + } + ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { + if ipv4 != Ipv4Addr::UNSPECIFIED { + builder.ip4(ipv4); + } + builder.udp4(ipv4_port); + builder.tcp4(*tcp_port); + + if ipv6 != Ipv6Addr::UNSPECIFIED { + builder.ip6(ipv6); + } + builder.udp6(ipv6_port); + + (IpMode::DualStack, (ipv6, ipv6_port).into()) + } + }; + + // identifies which network node is on + let (network, fork_value) = fork; + builder.add_value_rlp(network, alloy_rlp::encode(fork_value).into()); + + // add other data + for (key, value) in other_enr_kv_pairs { + builder.add_value_rlp(key, value.clone().into()); + } + + // enr v4 not to get confused with discv4, independent versioning enr and + // discovery + let enr = builder.build(sk).expect("should build enr v4"); + + // backwards compatible enr + let bc_enr = NodeRecord::from_secret_key(socket, sk); + + (enr, bc_enr, network, ip_mode) + } + /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. async fn bootstrap( bootstrap_nodes: HashSet, @@ -423,20 +427,20 @@ impl Discv5 { return None } }; - let fork_id = match self.filter_discovered_peer(enr) { - FilterOutcome::Ok => self.get_fork_id(enr).ok(), - FilterOutcome::Ignore { reason } => { - trace!(target: "net::discovery::discv5", - ?enr, - reason, - "filtered out discovered peer" - ); + if let FilterOutcome::Ignore { reason } = self.filter_discovered_peer(enr) { + trace!(target: "net::discovery::discv5", + ?enr, + reason, + "filtered out discovered peer" + ); - self.metrics.discovered_peers.increment_established_sessions_filtered(1); + self.metrics.discovered_peers.increment_established_sessions_filtered(1); - return None - } - }; + return None + } + + let fork_id = + (self.fork_key == network_key::ETH).then(|| self.get_fork_id(enr).ok()).flatten(); trace!(target: "net::discovery::discv5", ?fork_id, @@ -485,7 +489,7 @@ impl Discv5 { &self, enr: &discv5::enr::Enr, ) -> Result { - let key = self.fork_id_key; + let key = self.fork_key; let mut fork_id_bytes = enr.get_raw_rlp(key).ok_or(Error::ForkMissing(key))?; Ok(ForkId::decode(&mut fork_id_bytes)?) @@ -513,8 +517,8 @@ impl Discv5 { } /// Returns the key to use to identify the [`ForkId`] kv-pair on the [`Enr`](discv5::Enr). - pub fn fork_id_key(&self) -> &[u8] { - self.fork_id_key + pub fn fork_key(&self) -> &[u8] { + self.fork_key } } @@ -603,6 +607,7 @@ pub async fn lookup( mod tests { use super::*; use ::enr::{CombinedKey, EnrKey}; + use reth_primitives::MAINNET; use secp256k1::rand::thread_rng; use tracing::trace; @@ -618,7 +623,7 @@ mod tests { .unwrap(), ), ip_mode: IpMode::Ip4, - fork_id_key: b"noop", + fork_key: b"noop", discovered_peer_filter: MustNotIncludeKeys::default(), metrics: Discv5Metrics::default(), } @@ -818,4 +823,21 @@ mod tests { assert_eq!(local_node_id.log2_distance(&target), Some(bucket_index as u64 + 1)); } } + + #[test] + fn build_enr_from_config() { + const TCP_PORT: u16 = 30303; + let fork_id = MAINNET.latest_fork_id(); + + let config = Config::builder(TCP_PORT).fork(network_key::ETH, fork_id).build(); + + let sk = SecretKey::new(&mut thread_rng()); + let (enr, _, _, _) = Discv5::build_local_enr(&sk, &config); + + let decoded_fork_id = + ForkId::decode(&mut enr.get_raw_rlp(network_key::ETH).unwrap()).unwrap(); + + assert_eq!(fork_id, decoded_fork_id); + assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4 + } } diff --git a/crates/net/discv5/src/metrics.rs b/crates/net/discv5/src/metrics.rs index 72ea5fc0e0cb5..12b024a2fbcd3 100644 --- a/crates/net/discv5/src/metrics.rs +++ b/crates/net/discv5/src/metrics.rs @@ -2,7 +2,7 @@ use metrics::{Counter, Gauge}; use reth_metrics::Metrics; -use crate::config::{ETH, ETH2, OPSTACK}; +use crate::network_key::{ETH, ETH2, OPSTACK}; /// Information tracked by [`Discv5`](crate::Discv5). #[derive(Debug, Default, Clone)] @@ -91,13 +91,14 @@ impl DiscoveredPeersMetrics { #[derive(Metrics, Clone)] #[metrics(scope = "discv5")] pub struct AdvertisedChainMetrics { - /// Frequency of node records with a kv-pair with [`OPSTACK`] as key. + /// Frequency of node records with a kv-pair with [`OPSTACK`](crate::network_key) as + /// key. opstack: Counter, - /// Frequency of node records with a kv-pair with [`ETH`] as key. + /// Frequency of node records with a kv-pair with [`ETH`](crate::network_key) as key. eth: Counter, - /// Frequency of node records with a kv-pair with [`ETH2`] as key. + /// Frequency of node records with a kv-pair with [`ETH2`](crate::network_key) as key. eth2: Counter, } diff --git a/crates/net/discv5/src/network_key.rs b/crates/net/discv5/src/network_key.rs new file mode 100644 index 0000000000000..47576e5b2384d --- /dev/null +++ b/crates/net/discv5/src/network_key.rs @@ -0,0 +1,11 @@ +//! Keys of ENR [`ForkId`](reth_primitives::ForkId) kv-pair. Identifies which network a node +//! belongs to. + +/// ENR fork ID kv-pair key, for an Ethereum L1 EL node. +pub const ETH: &[u8] = b"eth"; + +/// ENR fork ID kv-pair key, for an Ethereum L1 CL node. +pub const ETH2: &[u8] = b"eth2"; + +/// ENR fork ID kv-pair key, for an Optimism CL node. +pub const OPSTACK: &[u8] = b"opstack"; diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index f9c9212d97ec2..3e89a1f3ae81e 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -9,11 +9,12 @@ use crate::{ NetworkHandle, NetworkManager, }; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; -use reth_discv5::config::OPSTACK; +use reth_discv5::network_key; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; use reth_primitives::{ - mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, PeerId, MAINNET, + mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NamedChain, NodeRecord, + PeerId, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -121,15 +122,19 @@ impl NetworkConfig { ) -> Self { let rlpx_port = self.listener_addr.port(); let chain = self.chain_spec.chain; - let fork_id = self.status.forkid; + let fork_id = self.chain_spec.latest_fork_id(); let boot_nodes = self.boot_nodes.clone(); let mut builder = reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); - if chain.is_optimism() { - builder = builder.fork(OPSTACK, fork_id) + if chain.named() == Some(NamedChain::Mainnet) { + builder = builder.fork(network_key::ETH, fork_id) } + // todo: set op EL fork id + /*if chain.is_optimism() { + builder = builder.fork(network_key::, fork_id) + }*/ self.set_discovery_v5(f(builder)) } From 1f84c27c35b63a4cd44558292f992e4995da40d7 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 24 Apr 2024 12:35:21 -0400 Subject: [PATCH 305/700] chore: delete empty files (#7843) --- crates/optimism/node/src/evm/execute.rs | 744 ------------------------ crates/optimism/node/src/evm/mod.rs | 2 - 2 files changed, 746 deletions(-) delete mode 100644 crates/optimism/node/src/evm/execute.rs delete mode 100644 crates/optimism/node/src/evm/mod.rs diff --git a/crates/optimism/node/src/evm/execute.rs b/crates/optimism/node/src/evm/execute.rs deleted file mode 100644 index f51c6cd3bb794..0000000000000 --- a/crates/optimism/node/src/evm/execute.rs +++ /dev/null @@ -1,744 +0,0 @@ -//! Optimism block executor. - -use crate::OptimismEvmConfig; -use reth_evm::{ - execute::{ - BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, - ExecutorProvider, - }, - ConfigureEvm, ConfigureEvmEnv, -}; -use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError, OptimismBlockExecutionError}, - provider::ProviderError, -}; -use reth_primitives::{ - proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, Bytes, ChainSpec, - GotExpected, Hardfork, Header, PruneModes, Receipt, ReceiptWithBloom, Receipts, TxType, - Withdrawals, B256, U256, -}; -use reth_provider::BundleStateWithReceipts; -use reth_revm::{ - batch::{BlockBatchRecord, BlockExecutorStats}, - db::states::bundle_state::BundleRetention, - optimism::ensure_create2_deployer, - processor::compare_receipts_root_and_logs_bloom, - stack::InspectorStack, - state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, - Evm, State, -}; -use revm_primitives::{ - db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, -}; -use std::sync::Arc; -use tracing::{debug, trace}; - -/// Provides executors to execute regular ethereum blocks -#[derive(Debug, Clone)] -pub struct OpExecutorProvider { - chain_spec: Arc, - evm_config: EvmConfig, - inspector: Option, - prune_modes: PruneModes, -} - -impl OpExecutorProvider { - /// Creates a new default optimism executor provider. - pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec, Default::default()) - } -} - -impl OpExecutorProvider { - /// Creates a new executor provider. - pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } - } - - /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self - } - - /// Configures the prune modes for the executor. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; - self - } -} - -impl OpExecutorProvider -where - EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, -{ - fn op_executor(&self, db: DB) -> OpBlockExecutor - where - DB: Database, - { - OpBlockExecutor::new( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder().with_database(db).with_bundle_update().without_state_clear().build(), - ) - .with_inspector(self.inspector.clone()) - } -} - -impl ExecutorProvider for OpExecutorProvider -where - EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, -{ - type Executor> = OpBlockExecutor; - - type BatchExecutor> = OpBatchExecutor; - fn executor(&self, db: DB) -> Self::Executor - where - DB: Database, - { - self.op_executor(db) - } - - fn batch_executor(&self, db: DB) -> Self::BatchExecutor - where - DB: Database, - { - let executor = self.op_executor(db); - OpBatchExecutor { - executor, - batch_record: BlockBatchRecord::new(self.prune_modes.clone()), - stats: BlockExecutorStats::default(), - } - } -} - -/// Helper container type for EVM with chain spec. -#[derive(Debug, Clone)] -struct OpEvmExecutor { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, -} - -impl OpEvmExecutor -where - EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, -{ - /// Executes the transactions in the block and returns the receipts. - /// - /// This applies the pre-execution changes, and executes the transactions. - /// - /// # Note - /// - /// It does __not__ apply post-execution changes. - fn execute_pre_and_transactions( - &mut self, - block: &BlockWithSenders, - mut evm: Evm<'_, Ext, &mut State>, - ) -> Result<(Vec, u64), BlockExecutionError> - where - DB: Database, - { - // apply pre execution changes - apply_beacon_root_contract_call( - &self.chain_spec, - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut evm, - )?; - - // execute transactions - let is_regolith = - self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()).map_err( - |_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::ForceCreate2DeployerFail, - ) - }, - )?; - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.len()); - for (sender, transaction) in block.transactions_with_sender() { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas && - (is_regolith || !transaction.is_system_transaction()) - { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - // An optimism block should never contain blob transactions. - if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::BlobTransactionRejected, - )) - } - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && transaction.is_deposit()) - .then(|| { - evm.db_mut() - .load_cache_account(*sender) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::AccountLoadFailed(*sender), - ) - })?; - - let mut buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut buf); - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, buf.into()); - - // Execute transaction. - let ResultAndState { result, state } = evm.transact().map_err(move |err| { - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { - hash: transaction.recalculate_hash(), - error: err.into(), - } - })?; - - trace!( - target: "evm", - ?transaction, - "Executed transaction" - ); - - evm.db_mut().commit(state); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs(), - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process ensures - // this is only set for post-Canyon deposit transactions. - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec - .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) - .then_some(1), - }); - } - drop(evm); - - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - let receipts = Receipts::from_block_receipt(receipts); - return Err(BlockValidationError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: receipts.gas_spent_by_tx()?, - } - .into()) - } - - Ok((receipts, cumulative_gas_used)) - } -} - -/// A basic Ethereum block executor. -/// -/// Expected usage: -/// - Create a new instance of the executor. -/// - Execute the block. -#[derive(Debug)] -pub struct OpBlockExecutor { - /// Chain specific evm config that's used to execute a block. - executor: OpEvmExecutor, - /// The state to use for execution - state: State, - /// Optional inspector stack for debugging - inspector: Option, -} - -impl OpBlockExecutor { - /// Creates a new Ethereum block executor. - pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, inspector: None } - } - - /// Sets the inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self - } - - #[inline] - fn chain_spec(&self) -> &ChainSpec { - &self.executor.chain_spec - } - - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { - &mut self.state - } -} - -impl OpBlockExecutor -where - EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, - DB: Database, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - EvmConfig::fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - self.chain_spec(), - header, - total_difficulty, - ); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } - - /// Execute a single block and apply the state changes to the internal state. - /// - /// Returns the receipts of the transactions in the block and the total gas used. - /// - /// Returns an error if execution fails or receipt verification fails. - fn execute_and_verify( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - // 1. prepare state on new block - self.on_new_block(&block.header); - - // 2. configure the evm and execute - let env = self.evm_env_for_block(&block.header, total_difficulty); - - let (receipts, gas_used) = { - if let Some(inspector) = self.inspector.as_mut() { - let evm = self.executor.evm_config.evm_with_env_and_inspector( - &mut self.state, - env, - inspector, - ); - self.executor.execute_pre_and_transactions(block, evm)? - } else { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - - self.executor.execute_pre_and_transactions(block, evm)? - } - }; - - // 3. apply post execution changes - self.post_execution(block, total_difficulty)?; - - // Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is required for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = verify_receipt_optimism( - block.header.receipts_root, - block.header.logs_bloom, - receipts.iter(), - self.chain_spec(), - block.timestamp, - ) { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - } - - Ok((receipts, gas_used)) - } - - /// Apply settings before a new block is executed. - pub(crate) fn on_new_block(&mut self, header: &Header) { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); - self.state.set_state_clear_flag(state_clear_flag); - } - - /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO - /// hardfork state change. - pub fn post_execution( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - let balance_increments = post_block_balance_increments( - self.chain_spec(), - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - total_difficulty, - &block.ommers, - block.withdrawals.as_ref().map(Withdrawals::as_ref), - ); - // increment balances - self.state - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(()) - } -} - -impl Executor for OpBlockExecutor -where - EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, - DB: Database, -{ - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = EthBlockOutput; - type Error = BlockExecutionError; - - /// Executes the block and commits the state changes. - /// - /// Returns the receipts of the transactions in the block. - /// - /// Returns an error if the block could not be executed or failed verification. - /// - /// State changes are committed to the database. - fn execute(mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; - - // prepare the state for extraction - self.state.merge_transitions(BundleRetention::PlainState); - - Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) - } -} - -/// An executor for a batch of blocks. -/// -/// State changes are tracked until the executor is finalized. -#[derive(Debug)] -pub struct OpBatchExecutor { - /// The executor used to execute blocks. - executor: OpBlockExecutor, - /// Keeps track of the batch and record receipts based on the configured prune mode - batch_record: BlockBatchRecord, - stats: BlockExecutorStats, -} - -impl OpBatchExecutor { - /// Returns the receipts of the executed blocks. - pub fn receipts(&self) -> &Receipts { - self.batch_record.receipts() - } - - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { - self.executor.state_mut() - } -} - -impl BatchExecutor for OpBatchExecutor -where - EvmConfig: ConfigureEvm, - // TODO: get rid of this - EvmConfig: ConfigureEvmEnv, - DB: Database, -{ - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = BundleStateWithReceipts; - type Error = BlockExecutionError; - - fn execute_one(&mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; - let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; - - // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); - self.executor.state.merge_transitions(retention); - - // store receipts in the set - self.batch_record.save_receipts(receipts)?; - - Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) - } - - fn finalize(mut self) -> Self::Output { - // TODO: track stats - self.stats.log_debug(); - - BundleStateWithReceipts::new( - self.executor.state.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - ) - } -} - -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipt_optimism<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_primitives::{ - b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, - Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, - }; - use reth_revm::database::StateProviderDatabase; - use revm::L1_BLOCK_CONTRACT; - use std::{collections::HashMap, str::FromStr}; - - use reth_revm::test_utils::StateProviderTest; - - fn create_op_state_provider() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let l1_block_contract_account = - Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; - - let mut l1_block_storage = HashMap::new(); - // base fee - l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); - // l1 fee overhead - l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); - // l1 fee scalar - l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); - // l1 free scalars post ecotone - l1_block_storage.insert( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .unwrap(), - ); - - db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); - - db - } - - fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { - chain_spec, - evm_config: Default::default(), - inspector: None, - prune_modes: Default::default(), - } - } - - #[test] - fn op_deposit_fields_pre_canyon() { - let header = Header { - timestamp: 1, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).regolith_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: TransactionKind::Call(addr), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: TransactionKind::Call(addr), - gas_limit: 21_000, - ..Default::default() - }), - Signature::default(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - - // Attempt to execute a block with one deposit and one non-deposit transaction - executor - .execute_one( - ( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is not present in pre canyon transactions - assert!(deposit_receipt.deposit_receipt_version.is_none()); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } - - #[test] - fn op_deposit_fields_post_canyon() { - // ensure_create2_deployer will fail if timestamp is set to less then 2 - let header = Header { - timestamp: 2, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).canyon_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: TransactionKind::Call(addr), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: TransactionKind::Call(addr), - gas_limit: 21_000, - ..Default::default() - }), - Signature::optimism_deposit_tx_signature(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_one( - ( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .expect("Executing a block while canyon is active should not fail"); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is set to 1 for post canyon deposit transactions - assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } -} diff --git a/crates/optimism/node/src/evm/mod.rs b/crates/optimism/node/src/evm/mod.rs deleted file mode 100644 index 139597f9cb07c..0000000000000 --- a/crates/optimism/node/src/evm/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ - - From 784d8dc597fa8eebf1ef45b7dcfa9374ea94e669 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Wed, 24 Apr 2024 14:19:33 -0400 Subject: [PATCH 306/700] refactor: replace OP error variant with general purpose error (#7844) Co-authored-by: Oliver Nordbjerg --- crates/optimism/node/src/rpc.rs | 3 ++- crates/rpc/rpc-engine-api/src/error.rs | 26 ++++++++++++-------------- crates/rpc/rpc-types/src/eth/error.rs | 9 +++++++++ crates/rpc/rpc-types/src/eth/mod.rs | 1 + crates/rpc/rpc-types/src/lib.rs | 1 + crates/rpc/rpc-types/src/mev.rs | 2 +- crates/rpc/rpc/src/eth/error.rs | 10 +++------- crates/rpc/rpc/src/eth/optimism.rs | 6 ++---- 8 files changed, 31 insertions(+), 27 deletions(-) create mode 100644 crates/rpc/rpc-types/src/eth/error.rs diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 66eb824505e3e..25a399e1859e2 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -3,9 +3,10 @@ use jsonrpsee::types::ErrorObject; use reqwest::Client; use reth_rpc::eth::{ - error::{EthApiError, EthResult, ToRpcError}, + error::{EthApiError, EthResult}, traits::RawTransactionForwarder, }; +use reth_rpc_types::ToRpcError; use std::sync::{atomic::AtomicUsize, Arc}; /// Error type when interacting with the Sequencer diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 01b99a54f6bc1..57318d0d665e8 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -5,6 +5,7 @@ use reth_beacon_consensus::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError use reth_engine_primitives::EngineObjectValidationError; use reth_payload_builder::error::PayloadBuilderError; use reth_primitives::{B256, U256}; +use reth_rpc_types::ToRpcError; use thiserror::Error; /// The Engine API result type @@ -86,11 +87,16 @@ pub enum EngineApiError { /// The payload or attributes are known to be malformed before processing. #[error(transparent)] EngineObjectValidationError(#[from] EngineObjectValidationError), - /// If the optimism feature flag is enabled, the payload attributes must have a present - /// gas limit for the forkchoice updated method. - #[cfg(feature = "optimism")] - #[error("Missing gas limit in payload attributes")] - MissingGasLimitInPayloadAttributes, + /// Any other error + #[error("{0}")] + Other(Box), +} + +impl EngineApiError { + /// Crates a new [EngineApiError::Other] variant. + pub fn other(err: E) -> Self { + Self::Other(Box::new(err)) + } } /// Helper type to represent the `error` field in the error response: @@ -188,15 +194,6 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { ) } }, - // Optimism errors - #[cfg(feature = "optimism")] - EngineApiError::MissingGasLimitInPayloadAttributes => { - jsonrpsee_types::error::ErrorObject::owned( - INVALID_PARAMS_CODE, - INVALID_PARAMS_MSG, - Some(ErrorData::new(error)), - ) - } // Any other server error EngineApiError::TerminalTD { .. } | EngineApiError::TerminalBlockHash { .. } | @@ -206,6 +203,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { SERVER_ERROR_MSG, Some(ErrorData::new(error)), ), + EngineApiError::Other(err) => err.to_rpc_error(), } } } diff --git a/crates/rpc/rpc-types/src/eth/error.rs b/crates/rpc/rpc-types/src/eth/error.rs new file mode 100644 index 0000000000000..e8d55b0874c2f --- /dev/null +++ b/crates/rpc/rpc-types/src/eth/error.rs @@ -0,0 +1,9 @@ +//! Implementation specific Errors for the `eth_` namespace. + +use jsonrpsee_types::ErrorObject; + +/// A tait to convert an error to an RPC error. +pub trait ToRpcError: std::error::Error + Send + Sync + 'static { + /// Converts the error to a JSON-RPC error object. + fn to_rpc_error(&self) -> ErrorObject<'static>; +} diff --git a/crates/rpc/rpc-types/src/eth/mod.rs b/crates/rpc/rpc-types/src/eth/mod.rs index dd36e7fd5cdd3..6313dbeedb29e 100644 --- a/crates/rpc/rpc-types/src/eth/mod.rs +++ b/crates/rpc/rpc-types/src/eth/mod.rs @@ -1,5 +1,6 @@ //! Ethereum related types +pub(crate) mod error; pub mod transaction; // re-export diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 964144ed65bfc..68ad11c6ebdfe 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -37,6 +37,7 @@ pub use eth::{ engine::{ ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, }, + error::ToRpcError, transaction::{self, TransactionKind, TransactionRequest, TypedTransactionRequest}, }; diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs index 2137e1ecf246d..ae94375dbc296 100644 --- a/crates/rpc/rpc-types/src/mev.rs +++ b/crates/rpc/rpc-types/src/mev.rs @@ -706,7 +706,7 @@ mod u256_numeric_string { match val { serde_json::Value::String(s) => { if let Ok(val) = s.parse::() { - return Ok(U256::from(val)) + return Ok(U256::from(val)); } U256::from_str(&s).map_err(de::Error::custom) } diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index d8add639726ab..75fbcc220bb1c 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -6,7 +6,9 @@ use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}; use reth_interfaces::RethError; use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes, U256}; use reth_revm::tracing::{js::JsInspectorError, MuxError}; -use reth_rpc_types::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; +use reth_rpc_types::{ + error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, +}; use reth_transaction_pool::error::{ Eip4844PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolErrorKind, PoolTransactionError, @@ -17,12 +19,6 @@ use std::time::Duration; /// Result alias pub type EthResult = Result; -/// A tait for custom rpc errors used by [EthApiError::Other]. -pub trait ToRpcError: std::error::Error + Send + Sync + 'static { - /// Converts the error to a JSON-RPC error object. - fn to_rpc_error(&self) -> ErrorObject<'static>; -} - /// Errors that can occur when interacting with the `eth_` namespace #[derive(Debug, thiserror::Error)] pub enum EthApiError { diff --git a/crates/rpc/rpc/src/eth/optimism.rs b/crates/rpc/rpc/src/eth/optimism.rs index 2871058f80dd6..24f6f36ff4635 100644 --- a/crates/rpc/rpc/src/eth/optimism.rs +++ b/crates/rpc/rpc/src/eth/optimism.rs @@ -1,11 +1,9 @@ //! Optimism specific types. use jsonrpsee::types::ErrorObject; +use reth_rpc_types::ToRpcError; -use crate::{ - eth::error::{EthApiError, ToRpcError}, - result::internal_rpc_err, -}; +use crate::{eth::error::EthApiError, result::internal_rpc_err}; /// Eth Optimism Api Error #[cfg(feature = "optimism")] From 90da3205de4e7353848bd21cfe7832a0b56d3a50 Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Wed, 24 Apr 2024 20:29:19 +0200 Subject: [PATCH 307/700] refactor: remove unused map_err (#7837) Co-authored-by: Oliver Nordbjerg --- crates/rpc/rpc/src/debug.rs | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 500f786d35137..b212d1636df6d 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -101,24 +101,16 @@ where env: Env::boxed(cfg.cfg_env.clone(), block_env.clone(), tx), handler_cfg: cfg.handler_cfg, }; - let (result, state_changes) = this - .trace_transaction( - opts.clone(), - env, - &mut db, - Some(TransactionContext { - block_hash, - tx_hash: Some(tx_hash), - tx_index: Some(index), - }), - ) - .map_err(|err| { - results.push(TraceResult::Error { - error: err.to_string(), - tx_hash: Some(tx_hash), - }); - err - })?; + let (result, state_changes) = this.trace_transaction( + opts.clone(), + env, + &mut db, + Some(TransactionContext { + block_hash, + tx_hash: Some(tx_hash), + tx_index: Some(index), + }), + )?; results.push(TraceResult::Success { result, tx_hash: Some(tx_hash) }); if transactions.peek().is_some() { From 66c6cbc573505d7d6dd55cc54068e89e8fc97898 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 24 Apr 2024 19:34:11 +0100 Subject: [PATCH 308/700] feat(exex): do not log ID on ExEx start (#7846) --- crates/node-builder/src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 327d906b34848..28e447775ee7b 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -630,7 +630,7 @@ where // spawn it as a crit task executor.spawn_critical("exex", async move { - info!(target: "reth::cli", id, "ExEx started"); + info!(target: "reth::cli", "ExEx started"); match exex.await { Ok(_) => panic!("ExEx {id} finished. ExEx's should run indefinitely"), Err(err) => panic!("ExEx {id} crashed: {err}"), From ddc5ed326390ba30fd3382bff7df0da77d99da20 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Wed, 24 Apr 2024 20:36:19 +0200 Subject: [PATCH 309/700] fix(rpc): correct `Other` error msg (#7845) --- crates/rpc/rpc/src/eth/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 75fbcc220bb1c..203b5bbd70e5a 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -115,7 +115,7 @@ pub enum EthApiError { #[error(transparent)] MuxTracerError(#[from] MuxError), /// Any other error - #[error("0")] + #[error("{0}")] Other(Box), } From 659059c67fc8c3bd562b0815d5e12af4c29adad0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 24 Apr 2024 22:18:07 +0200 Subject: [PATCH 310/700] feat: split nodebuilder generics into separate states (#7847) --- bin/reth/src/cli/mod.rs | 4 +- bin/reth/src/commands/node/mod.rs | 4 +- crates/e2e-test-utils/src/lib.rs | 8 +- crates/node-builder/src/builder.rs | 1425 ----------------- crates/node-builder/src/builder/mod.rs | 619 +++++++ crates/node-builder/src/builder/states.rs | 237 +++ crates/node-builder/src/components/builder.rs | 30 +- crates/node-builder/src/components/mod.rs | 56 +- crates/node-builder/src/launch.rs | 558 +++++++ crates/node-builder/src/lib.rs | 3 + crates/node-core/src/node_config.rs | 2 +- crates/node/api/src/node.rs | 84 - 12 files changed, 1492 insertions(+), 1538 deletions(-) delete mode 100644 crates/node-builder/src/builder.rs create mode 100644 crates/node-builder/src/builder/mod.rs create mode 100644 crates/node-builder/src/builder/states.rs create mode 100644 crates/node-builder/src/launch.rs diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index d511d7182ab66..34fd09456beb0 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -14,7 +14,7 @@ use crate::{ use clap::{value_parser, Parser, Subcommand}; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; -use reth_node_builder::{InitState, WithLaunchContext}; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_primitives::ChainSpec; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; @@ -130,7 +130,7 @@ impl Cli { /// ```` pub fn run(mut self, launcher: L) -> eyre::Result<()> where - L: FnOnce(WithLaunchContext, InitState>, Ext) -> Fut, + L: FnOnce(WithLaunchContext>>, Ext) -> Fut, Fut: Future>, { // add network name to logs dir diff --git a/bin/reth/src/commands/node/mod.rs b/bin/reth/src/commands/node/mod.rs index 3491304865342..5f95c534d2a7d 100644 --- a/bin/reth/src/commands/node/mod.rs +++ b/bin/reth/src/commands/node/mod.rs @@ -11,7 +11,7 @@ use crate::{ use clap::{value_parser, Args, Parser}; use reth_cli_runner::CliContext; use reth_db::{init_db, DatabaseEnv}; -use reth_node_builder::{InitState, NodeBuilder, WithLaunchContext}; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{node_config::NodeConfig, version}; use reth_primitives::ChainSpec; use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sync::Arc}; @@ -136,7 +136,7 @@ impl NodeCommand { /// closure. pub async fn execute(self, ctx: CliContext, launcher: L) -> eyre::Result<()> where - L: FnOnce(WithLaunchContext, InitState>, Ext) -> Fut, + L: FnOnce(WithLaunchContext>>, Ext) -> Fut, Fut: Future>, { tracing::info!(target: "reth::cli", version = ?version::SHORT_VERSION, "Starting reth"); diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 8e57eebed729f..8fdaa044b9012 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -6,8 +6,8 @@ use reth::{ }; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ - components::{NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, - FullNodeComponentsAdapter, FullNodeTypesAdapter, + components::{Components, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, + FullNodeTypesAdapter, NodeAdapter, }; use reth_primitives::ChainSpec; use reth_provider::providers::BlockchainProvider; @@ -59,7 +59,7 @@ where }; // Create nodes and peer them - let mut nodes: Vec> = Vec::with_capacity(num_nodes); + let mut nodes: Vec> = Vec::with_capacity(num_nodes); for idx in 0..num_nodes { let mut node_config = NodeConfig::test() @@ -110,4 +110,4 @@ type TmpNodeAdapter = FullNodeTypesAdapter = - NodeTestContext, TmpPool>>; + NodeTestContext, Components, TmpPool>>>; diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs deleted file mode 100644 index 28e447775ee7b..0000000000000 --- a/crates/node-builder/src/builder.rs +++ /dev/null @@ -1,1425 +0,0 @@ -//! Customizable node builder. - -#![allow(clippy::type_complexity, missing_debug_implementations)] - -use crate::{ - components::{ComponentsBuilder, NodeComponents, NodeComponentsBuilder, PoolBuilder}, - exex::BoxedLaunchExEx, - hooks::NodeHooks, - node::FullNode, - rpc::{RethRpcServerHandles, RpcContext, RpcHooks}, - Node, NodeHandle, -}; -use eyre::Context; -use futures::{future, future::Either, stream, stream_select, Future, StreamExt}; -use rayon::ThreadPoolBuilder; -use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; -use reth_beacon_consensus::{ - hooks::{EngineHooks, PruneHook, StaticFileHook}, - BeaconConsensus, BeaconConsensusEngine, -}; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, -}; -use reth_config::config::EtlConfig; -use reth_consensus::Consensus; -use reth_db::{ - database::Database, - database_metrics::{DatabaseMetadata, DatabaseMetrics}, - test_utils::{create_test_rw_db, TempDatabase}, - DatabaseEnv, -}; -use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; -use reth_interfaces::p2p::either::EitherDownloader; -use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; -use reth_node_api::{ - FullNodeComponents, FullNodeComponentsAdapter, FullNodeTypes, FullNodeTypesAdapter, NodeTypes, -}; -use reth_node_core::{ - cli::config::{PayloadBuilderConfig, RethRpcConfig, RethTransactionPoolConfig}, - dirs::{ChainPath, DataDirPath, MaybePlatformPath}, - engine_api_store::EngineApiStore, - engine_skip_fcu::EngineApiSkipFcu, - exit::NodeExitFuture, - init::init_genesis, - node_config::NodeConfig, - primitives::{kzg::KzgSettings, Head}, - utils::write_peers_to_file, -}; -use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, format_ether, ChainSpec}; -use reth_provider::{ - providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, -}; -use reth_prune::PrunerBuilder; -use reth_revm::EvmProcessorFactory; -use reth_rpc_engine_api::EngineApi; -use reth_static_file::StaticFileProducer; -use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{debug, error, info}; -use reth_transaction_pool::{PoolConfig, TransactionPool}; -use std::{cmp::max, str::FromStr, sync::Arc, thread::available_parallelism}; -use tokio::sync::{mpsc::unbounded_channel, oneshot}; - -/// The builtin provider type of the reth node. -// Note: we need to hardcode this because custom components might depend on it in associated types. -type RethFullProviderType = BlockchainProvider; - -type RethFullAdapter = FullNodeTypesAdapter>; - -#[cfg_attr(doc, aquamarine::aquamarine)] -/// Declaratively construct a node. -/// -/// [`NodeBuilder`] provides a [builder-like interface][builder] for composing -/// components of a node. -/// -/// ## Order -/// -/// Configuring a node starts out with a [`NodeConfig`] (this can be obtained from cli arguments for -/// example) and then proceeds to configure the core static types of the node: [NodeTypes], these -/// include the node's primitive types and the node's engine types. -/// -/// Next all stateful components of the node are configured, these include the -/// [ConfigureEvm](reth_node_api::evm::ConfigureEvm), the database [Database] and all the -/// components of the node that are downstream of those types, these include: -/// -/// - The transaction pool: [PoolBuilder] -/// - The network: [NetworkBuilder](crate::components::NetworkBuilder) -/// - The payload builder: [PayloadBuilder](crate::components::PayloadServiceBuilder) -/// -/// Once all the components are configured, the node is ready to be launched. -/// -/// On launch the builder returns a fully type aware [NodeHandle] that has access to all the -/// configured components and can interact with the node. -/// -/// There are convenience functions for networks that come with a preset of types and components via -/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_node_optimism::OptimismNode`. -/// -/// The [NodeBuilder::node] function configures the node's types and components in one step. -/// -/// ## Components -/// -/// All components are configured with a [NodeComponentsBuilder] that is responsible for actually -/// creating the node components during the launch process. The [ComponentsBuilder] is a general -/// purpose implementation of the [NodeComponentsBuilder] trait that can be used to configure the -/// network, transaction pool and payload builder of the node. It enforces the correct order of -/// configuration, for example the network and the payload builder depend on the transaction pool -/// type that is configured first. -/// -/// All builder traits are generic over the node types and are invoked with the [BuilderContext] -/// that gives access to internals of the that are needed to configure the components. This include -/// the original config, chain spec, the database provider and the task executor, -/// -/// ## Hooks -/// -/// Once all the components are configured, the builder can be used to set hooks that are run at -/// specific points in the node's lifecycle. This way custom services can be spawned before the node -/// is launched [NodeBuilder::on_component_initialized], or once the rpc server(s) are launched -/// [NodeBuilder::on_rpc_started]. The [NodeBuilder::extend_rpc_modules] can be used to inject -/// custom rpc modules into the rpc server before it is launched. See also [RpcContext] -/// All hooks accept a closure that is then invoked at the appropriate time in the node's launch -/// process. -/// -/// ## Flow -/// -/// The [NodeBuilder] is intended to sit behind a CLI that provides the necessary [NodeConfig] -/// input: [NodeBuilder::new] -/// -/// From there the builder is configured with the node's types, components, and hooks, then launched -/// with the [NodeBuilder::launch] method. On launch all the builtin internals, such as the -/// `Database` and its providers [BlockchainProvider] are initialized before the configured -/// [NodeComponentsBuilder] is invoked with the [BuilderContext] to create the transaction pool, -/// network, and payload builder components. When the RPC is configured, the corresponding hooks are -/// invoked to allow for custom rpc modules to be injected into the rpc server: -/// [NodeBuilder::extend_rpc_modules] -/// -/// Finally all components are created and all services are launched and a [NodeHandle] is returned -/// that can be used to interact with the node: [FullNode] -/// -/// The following diagram shows the flow of the node builder from CLI to a launched node. -/// -/// include_mmd!("docs/mermaid/builder.mmd") -/// -/// ## Internals -/// -/// The node builder is fully type safe, it uses the [NodeTypes] trait to enforce that all -/// components are configured with the correct types. However the database types and with that the -/// provider trait implementations are currently created by the builder itself during the launch -/// process, hence the database type is not part of the [NodeTypes] trait and the node's components, -/// that depend on the database, are configured separately. In order to have a nice trait that -/// encapsulates the entire node the [FullNodeComponents] trait was introduced. This trait has -/// convenient associated types for all the components of the node. After [NodeBuilder::launch] the -/// [NodeHandle] contains an instance of [FullNode] that implements the [FullNodeComponents] trait -/// and has access to all the components of the node. Internally the node builder uses several -/// generic adapter types that are then map to traits with associated types for ease of use. -/// -/// ### Limitations -/// -/// Currently the launch process is limited to ethereum nodes and requires all the components -/// specified above. It also expect beacon consensus with the ethereum engine API that is configured -/// by the builder itself during launch. This might change in the future. -/// -/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html -pub struct NodeBuilder { - /// All settings for how the node should be configured. - config: NodeConfig, - /// State of the node builder process. - state: State, - /// The configured database for the node. - database: DB, -} - -impl NodeBuilder { - /// Returns a reference to the node builder's config. - pub fn config(&self) -> &NodeConfig { - &self.config - } - - /// Loads the reth config with the given datadir root - fn load_config(&self, data_dir: &ChainPath) -> eyre::Result { - let config_path = self.config.config.clone().unwrap_or_else(|| data_dir.config_path()); - - let mut config = confy::load_path::(&config_path) - .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; - - info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); - - // Update the config with the command line arguments - config.peers.trusted_nodes_only = self.config.network.trusted_only; - - if !self.config.network.trusted_peers.is_empty() { - info!(target: "reth::cli", "Adding trusted nodes"); - self.config.network.trusted_peers.iter().for_each(|peer| { - config.peers.trusted_nodes.insert(*peer); - }); - } - - Ok(config) - } -} - -impl NodeBuilder<(), InitState> { - /// Create a new [`NodeBuilder`]. - pub fn new(config: NodeConfig) -> Self { - Self { config, database: (), state: InitState::default() } - } -} - -impl NodeBuilder { - /// Configures the underlying database that the node will use. - pub fn with_database(self, database: D) -> NodeBuilder { - NodeBuilder { config: self.config, state: self.state, database } - } - - /// Preconfigure the builder with the context to launch the node. - /// - /// This provides the task executor and the data directory for the node. - pub fn with_launch_context( - self, - task_executor: TaskExecutor, - data_dir: ChainPath, - ) -> WithLaunchContext { - WithLaunchContext { builder: self, task_executor, data_dir } - } - - /// Creates an _ephemeral_ preconfigured node for testing purposes. - pub fn testing_node( - self, - task_executor: TaskExecutor, - ) -> WithLaunchContext>, InitState> { - let db = create_test_rw_db(); - let db_path_str = db.path().to_str().expect("Path is not valid unicode"); - let path = - MaybePlatformPath::::from_str(db_path_str).expect("Path is not valid"); - let data_dir = path.unwrap_or_chain_default(self.config.chain.chain); - - WithLaunchContext { builder: self.with_database(db), task_executor, data_dir } - } -} - -impl NodeBuilder -where - DB: Database + Unpin + Clone + 'static, -{ - /// Configures the types of the node. - pub fn with_types(self, types: T) -> NodeBuilder> - where - T: NodeTypes, - { - NodeBuilder { - config: self.config, - state: TypesState { adapter: FullNodeTypesAdapter::new(types) }, - database: self.database, - } - } - - /// Preconfigures the node with a specific node implementation. - /// - /// This is a convenience method that sets the node's types and components in one call. - pub fn node( - self, - node: N, - ) -> NodeBuilder< - DB, - ComponentsState< - N, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - FullNodeComponentsAdapter< - RethFullAdapter, - >>::Pool, - >, - >, - > - where - N: Node>>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, - { - self.with_types(node.clone()).with_components(node.components()) - } -} - -impl NodeBuilder> -where - Types: NodeTypes, - DB: Database + Clone + Unpin + 'static, -{ - /// Configures the node's components. - pub fn with_components( - self, - components_builder: Components, - ) -> NodeBuilder< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > - where - Components: - NodeComponentsBuilder>>, - { - NodeBuilder { - config: self.config, - database: self.database, - state: ComponentsState { - types: self.state.adapter.types, - components_builder, - hooks: NodeHooks::new(), - rpc: RpcHooks::new(), - exexs: Vec::new(), - }, - } - } -} - -impl - NodeBuilder< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - Types: NodeTypes, - Components: NodeComponentsBuilder>>, -{ - /// Apply a function to the components builder. - pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { - Self { - config: self.config, - database: self.database, - state: ComponentsState { - types: self.state.types, - components_builder: f(self.state.components_builder), - hooks: self.state.hooks, - rpc: self.state.rpc, - exexs: self.state.exexs, - }, - } - } - - /// Sets the hook that is run once the node's components are initialized. - pub fn on_component_initialized(mut self, hook: F) -> Self - where - F: Fn( - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.hooks.set_on_component_initialized(hook); - self - } - - /// Sets the hook that is run once the node has started. - pub fn on_node_started(mut self, hook: F) -> Self - where - F: Fn( - FullNode< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.hooks.set_on_node_started(hook); - self - } - - /// Sets the hook that is run once the rpc server is started. - pub fn on_rpc_started(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - RethRpcServerHandles, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.rpc.set_on_rpc_started(hook); - self - } - - /// Sets the hook that is run to configure the rpc modules. - pub fn extend_rpc_modules(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.rpc.set_extend_rpc_modules(hook); - self - } - - /// Installs an ExEx (Execution Extension) in the node. - /// - /// # Note - /// - /// The ExEx ID must be unique. - pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self - where - F: Fn( - ExExContext< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> R - + Send - + 'static, - R: Future> + Send, - E: Future> + Send, - { - self.state.exexs.push((exex_id.into(), Box::new(exex))); - self - } - - /// Launches the node and returns a handle to it. - /// - /// This bootstraps the node internals, creates all the components with the provider - /// [NodeComponentsBuilder] and launches the node. - /// - /// Returns a [NodeHandle] that can be used to interact with the node. - pub async fn launch( - self, - executor: TaskExecutor, - data_dir: ChainPath, - ) -> eyre::Result< - NodeHandle< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > { - // get config from file - let reth_config = self.load_config(&data_dir)?; - - let Self { - config, - state: ComponentsState { types, components_builder, hooks, rpc, exexs: _ }, - database, - } = self; - - // Raise the fd limit of the process. - // Does not do anything on windows. - fdlimit::raise_fd_limit()?; - - // Limit the global rayon thread pool, reserving 2 cores for the rest of the system - let _ = ThreadPoolBuilder::new() - .num_threads( - available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), - ) - .build_global() - .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); - - let provider_factory = ProviderFactory::new( - database.clone(), - Arc::clone(&config.chain), - data_dir.static_files_path(), - )? - .with_static_files_metrics(); - info!(target: "reth::cli", "Database opened"); - - let prometheus_handle = config.install_prometheus_recorder()?; - config - .start_metrics_endpoint( - prometheus_handle, - database.clone(), - provider_factory.static_file_provider(), - executor.clone(), - ) - .await?; - - debug!(target: "reth::cli", chain=%config.chain.chain, genesis=?config.chain.genesis_hash(), "Initializing genesis"); - - let genesis_hash = init_genesis(provider_factory.clone())?; - - info!(target: "reth::cli", "\n{}", config.chain.display_hardforks()); - - // setup the consensus instance - let consensus: Arc = if config.dev.dev { - Arc::new(AutoSealConsensus::new(Arc::clone(&config.chain))) - } else { - Arc::new(BeaconConsensus::new(Arc::clone(&config.chain))) - }; - - debug!(target: "reth::cli", "Spawning stages metrics listener task"); - let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); - let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); - executor.spawn_critical("stages metrics listener task", sync_metrics_listener); - - let prune_config = config.prune_config()?.or_else(|| reth_config.prune.clone()); - - // Configure the blockchain tree for the node - let evm_config = types.evm_config(); - let tree_config = BlockchainTreeConfig::default(); - let tree_externals = TreeExternals::new( - provider_factory.clone(), - consensus.clone(), - EvmProcessorFactory::new(config.chain.clone(), evm_config.clone()), - ); - let tree = BlockchainTree::new( - tree_externals, - tree_config, - prune_config.as_ref().map(|config| config.segments.clone()), - )? - .with_sync_metrics_tx(sync_metrics_tx.clone()); - - let canon_state_notification_sender = tree.canon_state_notification_sender(); - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - debug!(target: "reth::cli", "configured blockchain tree"); - - // fetch the head block from the database - let head = - config.lookup_head(provider_factory.clone()).wrap_err("the head block is missing")?; - - // setup the blockchain provider - let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; - - let ctx = BuilderContext::new( - head, - blockchain_db, - executor, - data_dir, - config, - reth_config, - evm_config.clone(), - ); - - debug!(target: "reth::cli", "creating components"); - let NodeComponents { transaction_pool, network, payload_builder } = - components_builder.build_components(&ctx).await?; - - let BuilderContext { - provider: blockchain_db, - executor, - data_dir, - mut config, - mut reth_config, - .. - } = ctx; - - let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - - let node_components = FullNodeComponentsAdapter { - evm_config: evm_config.clone(), - pool: transaction_pool.clone(), - network: network.clone(), - provider: blockchain_db.clone(), - payload_builder: payload_builder.clone(), - executor: executor.clone(), - }; - debug!(target: "reth::cli", "calling on_component_initialized hook"); - on_component_initialized.on_event(node_components.clone())?; - - // spawn exexs - let mut exex_handles = Vec::with_capacity(self.state.exexs.len()); - let mut exexs = Vec::with_capacity(self.state.exexs.len()); - for (id, exex) in self.state.exexs { - // create a new exex handle - let (handle, events, notifications) = ExExHandle::new(id.clone()); - exex_handles.push(handle); - - // create the launch context for the exex - let context = ExExContext { - head, - provider: blockchain_db.clone(), - task_executor: executor.clone(), - data_dir: data_dir.clone(), - config: config.clone(), - reth_config: reth_config.clone(), - pool: transaction_pool.clone(), - events, - notifications, - }; - - let executor = executor.clone(); - exexs.push(async move { - debug!(target: "reth::cli", id, "spawning exex"); - let span = reth_tracing::tracing::info_span!("exex", id); - let _enter = span.enter(); - - // init the exex - let exex = exex.launch(context).await.unwrap(); - - // spawn it as a crit task - executor.spawn_critical("exex", async move { - info!(target: "reth::cli", "ExEx started"); - match exex.await { - Ok(_) => panic!("ExEx {id} finished. ExEx's should run indefinitely"), - Err(err) => panic!("ExEx {id} crashed: {err}"), - } - }); - }); - } - - future::join_all(exexs).await; - - // spawn exex manager - let exex_manager_handle = if !exex_handles.is_empty() { - debug!(target: "reth::cli", "spawning exex manager"); - // todo(onbjerg): rm magic number - let exex_manager = ExExManager::new(exex_handles, 1024); - let exex_manager_handle = exex_manager.handle(); - executor.spawn_critical("exex manager", async move { - exex_manager.await.expect("exex manager crashed"); - }); - - // send notifications from the blockchain tree to exex manager - let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); - let mut handle = exex_manager_handle.clone(); - executor.spawn_critical("exex manager blockchain tree notifications", async move { - while let Ok(notification) = canon_state_notifications.recv().await { - handle - .send_async(notification.into()) - .await - .expect("blockchain tree notification could not be sent to exex manager"); - } - }); - - info!(target: "reth::cli", "ExEx Manager started"); - - Some(exex_manager_handle) - } else { - None - }; - - // create pipeline - let network_client = network.fetch_client().await?; - let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); - - if let Some(skip_fcu_threshold) = config.debug.skip_fcu { - debug!(target: "reth::cli", "spawning skip FCU task"); - let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); - let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); - executor.spawn_critical( - "skip FCU interceptor", - engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), - ); - consensus_engine_rx = skip_fcu_rx; - } - - if let Some(store_path) = config.debug.engine_api_store.clone() { - debug!(target: "reth::cli", "spawning engine API store"); - let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); - let engine_api_store = EngineApiStore::new(store_path); - executor.spawn_critical( - "engine api interceptor", - engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx), - ); - consensus_engine_rx = engine_intercept_rx; - }; - - let max_block = config.max_block(&network_client, provider_factory.clone()).await?; - let mut hooks = EngineHooks::new(); - - let static_file_producer = StaticFileProducer::new( - provider_factory.clone(), - provider_factory.static_file_provider(), - prune_config.clone().unwrap_or_default().segments, - ); - let static_file_producer_events = static_file_producer.lock().events(); - hooks.add(StaticFileHook::new(static_file_producer.clone(), Box::new(executor.clone()))); - info!(target: "reth::cli", "StaticFileProducer initialized"); - - // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to - if reth_config.stages.etl.dir.is_none() { - reth_config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); - } - - // Configure the pipeline - let pipeline_exex_handle = - exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - let (mut pipeline, client) = if config.dev.dev { - info!(target: "reth::cli", "Starting Reth in dev mode"); - - for (idx, (address, alloc)) in config.chain.genesis.alloc.iter().enumerate() { - info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); - } - - // install auto-seal - let pending_transactions_listener = transaction_pool.pending_transactions_listener(); - - let mining_mode = if let Some(interval) = config.dev.block_time { - MiningMode::interval(interval) - } else if let Some(max_transactions) = config.dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) - } else { - info!(target: "reth::cli", "No mining mode specified, defaulting to ReadyTransaction"); - MiningMode::instant(1, pending_transactions_listener) - }; - - let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( - Arc::clone(&config.chain), - blockchain_db.clone(), - transaction_pool.clone(), - consensus_engine_tx.clone(), - canon_state_notification_sender, - mining_mode, - evm_config.clone(), - ) - .build(); - - let mut pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, - client.clone(), - Arc::clone(&consensus), - provider_factory.clone(), - &executor, - sync_metrics_tx, - prune_config.clone(), - max_block, - static_file_producer, - evm_config, - pipeline_exex_handle, - ) - .await?; - - let pipeline_events = pipeline.events(); - task.set_pipeline_events(pipeline_events); - debug!(target: "reth::cli", "Spawning auto mine task"); - executor.spawn(Box::pin(task)); - - (pipeline, EitherDownloader::Left(client)) - } else { - let pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, - network_client.clone(), - Arc::clone(&consensus), - provider_factory.clone(), - &executor, - sync_metrics_tx, - prune_config.clone(), - max_block, - static_file_producer, - evm_config, - pipeline_exex_handle, - ) - .await?; - - (pipeline, EitherDownloader::Right(network_client)) - }; - - let pipeline_events = pipeline.events(); - - let initial_target = config.initial_pipeline_target(genesis_hash); - - let prune_config = prune_config.unwrap_or_default(); - let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) - .max_reorg_depth(tree_config.max_reorg_depth() as usize) - .prune_delete_limit(config.chain.prune_delete_limit) - .timeout(PrunerBuilder::DEFAULT_TIMEOUT); - if let Some(exex_manager_handle) = &exex_manager_handle { - pruner_builder = - pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); - } - - let mut pruner = pruner_builder.build(provider_factory.clone()); - - let pruner_events = pruner.events(); - hooks.add(PruneHook::new(pruner, Box::new(executor.clone()))); - info!(target: "reth::cli", ?prune_config, "Pruner initialized"); - - // Configure the consensus engine - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( - client, - pipeline, - blockchain_db.clone(), - Box::new(executor.clone()), - Box::new(network.clone()), - max_block, - config.debug.continuous, - payload_builder.clone(), - initial_target, - reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, - consensus_engine_tx, - consensus_engine_rx, - hooks, - )?; - info!(target: "reth::cli", "Consensus engine initialized"); - - let events = stream_select!( - network.event_listener().map(Into::into), - beacon_engine_handle.event_listener().map(Into::into), - pipeline_events.map(Into::into), - if config.debug.tip.is_none() && !config.dev.dev { - Either::Left( - ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) - .map(Into::into), - ) - } else { - Either::Right(stream::empty()) - }, - pruner_events.map(Into::into), - static_file_producer_events.map(Into::into) - ); - executor.spawn_critical( - "events task", - node::handle_events(Some(network.clone()), Some(head.number), events, database.clone()), - ); - - let engine_api = EngineApi::new( - blockchain_db.clone(), - config.chain.clone(), - beacon_engine_handle, - payload_builder.into(), - Box::new(executor.clone()), - ); - info!(target: "reth::cli", "Engine API handler initialized"); - - // extract the jwt secret from the args if possible - let default_jwt_path = data_dir.jwt_path(); - let jwt_secret = config.rpc.auth_jwt_secret(default_jwt_path)?; - - // adjust rpc port numbers based on instance number - config.adjust_instance_ports(); - - // Start RPC servers - - let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( - node_components.clone(), - engine_api, - &config, - jwt_secret, - rpc, - ) - .await?; - - // in dev mode we generate 20 random dev-signer accounts - if config.dev.dev { - rpc_registry.eth_api().with_dev_accounts(); - } - - // Run consensus engine to completion - let (tx, rx) = oneshot::channel(); - info!(target: "reth::cli", "Starting consensus engine"); - executor.spawn_critical_blocking("consensus engine", async move { - let res = beacon_consensus_engine.await; - let _ = tx.send(res); - }); - - let FullNodeComponentsAdapter { - evm_config, - pool, - network, - provider, - payload_builder, - executor, - } = node_components; - - let full_node = FullNode { - evm_config, - pool, - network, - provider, - payload_builder, - task_executor: executor, - rpc_server_handles, - rpc_registry, - config, - data_dir, - }; - // Notify on node started - on_node_started.on_event(full_node.clone())?; - - let handle = NodeHandle { - node_exit_future: NodeExitFuture::new(rx, full_node.config.debug.terminate), - node: full_node, - }; - - Ok(handle) - } - - /// Check that the builder can be launched - /// - /// This is useful when writing tests to ensure that the builder is configured correctly. - pub fn check_launch(self) -> Self { - self - } -} - -/// A [NodeBuilder] with it's launch context already configured. -/// -/// This exposes the same methods as [NodeBuilder] but with the launch context already configured, -/// See [WithLaunchContext::launch] -pub struct WithLaunchContext { - builder: NodeBuilder, - task_executor: TaskExecutor, - data_dir: ChainPath, -} - -impl WithLaunchContext { - /// Returns a reference to the node builder's config. - pub fn config(&self) -> &NodeConfig { - self.builder.config() - } - - /// Returns a reference to the task executor. - pub fn task_executor(&self) -> &TaskExecutor { - &self.task_executor - } - - /// Returns a reference to the data directory. - pub fn data_dir(&self) -> &ChainPath { - &self.data_dir - } -} - -impl WithLaunchContext -where - DB: Database + Clone + Unpin + 'static, -{ - /// Configures the types of the node. - pub fn with_types(self, types: T) -> WithLaunchContext> - where - T: NodeTypes, - { - WithLaunchContext { - builder: self.builder.with_types(types), - task_executor: self.task_executor, - data_dir: self.data_dir, - } - } - - /// Preconfigures the node with a specific node implementation. - pub fn node( - self, - node: N, - ) -> WithLaunchContext< - DB, - ComponentsState< - N, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - FullNodeComponentsAdapter< - RethFullAdapter, - >>::Pool, - >, - >, - > - where - N: Node>>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, - { - self.with_types(node.clone()).with_components(node.components()) - } -} - -impl WithLaunchContext -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, -{ - /// Launches a preconfigured [Node] - /// - /// This bootstraps the node internals, creates all the components with the given [Node] type - /// and launches the node. - /// - /// Returns a [NodeHandle] that can be used to interact with the node. - pub async fn launch_node( - self, - node: N, - ) -> eyre::Result< - NodeHandle< - FullNodeComponentsAdapter< - RethFullAdapter, - >>::Pool, - >, - >, - > - where - N: Node>>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, - { - self.node(node).launch().await - } -} - -impl WithLaunchContext> -where - Types: NodeTypes, - DB: Database + Clone + Unpin + 'static, -{ - /// Configures the node's components. - /// - /// The given components builder is used to create the components of the node when it is - /// launched. - pub fn with_components( - self, - components_builder: Components, - ) -> WithLaunchContext< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > - where - Components: - NodeComponentsBuilder>>, - { - WithLaunchContext { - builder: self.builder.with_components(components_builder), - task_executor: self.task_executor, - data_dir: self.data_dir, - } - } -} - -impl - WithLaunchContext< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - Types: NodeTypes, - Components: NodeComponentsBuilder>>, -{ - /// Apply a function to the components builder. - pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { - Self { - builder: self.builder.map_components(f), - task_executor: self.task_executor, - data_dir: self.data_dir, - } - } - - /// Sets the hook that is run once the node's components are initialized. - pub fn on_component_initialized(mut self, hook: F) -> Self - where - F: Fn( - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.hooks.set_on_component_initialized(hook); - self - } - - /// Sets the hook that is run once the node has started. - pub fn on_node_started(mut self, hook: F) -> Self - where - F: Fn( - FullNode< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.hooks.set_on_node_started(hook); - self - } - - /// Sets the hook that is run once the rpc server is started. - pub fn on_rpc_started(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - RethRpcServerHandles, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.rpc.set_on_rpc_started(hook); - self - } - - /// Sets the hook that is run to configure the rpc modules. - pub fn extend_rpc_modules(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.rpc.set_extend_rpc_modules(hook); - self - } - - /// Installs an ExEx (Execution Extension) in the node. - pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self - where - F: Fn( - ExExContext< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> R - + Send - + 'static, - R: Future> + Send, - E: Future> + Send, - { - self.builder.state.exexs.push((exex_id.into(), Box::new(exex))); - self - } - - /// Launches the node and returns a handle to it. - pub async fn launch( - self, - ) -> eyre::Result< - NodeHandle< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > { - let Self { builder, task_executor, data_dir } = self; - - builder.launch(task_executor, data_dir).await - } - - /// Check that the builder can be launched - /// - /// This is useful when writing tests to ensure that the builder is configured correctly. - pub fn check_launch(self) -> Self { - self - } -} - -/// Captures the necessary context for building the components of the node. -pub struct BuilderContext { - /// The current head of the blockchain at launch. - head: Head, - /// The configured provider to interact with the blockchain. - provider: Node::Provider, - /// The executor of the node. - executor: TaskExecutor, - /// The data dir of the node. - data_dir: ChainPath, - /// The config of the node - config: NodeConfig, - /// loaded config - reth_config: reth_config::Config, - /// EVM config of the node - evm_config: Node::Evm, -} - -impl BuilderContext { - /// Create a new instance of [BuilderContext] - pub fn new( - head: Head, - provider: Node::Provider, - executor: TaskExecutor, - data_dir: ChainPath, - config: NodeConfig, - reth_config: reth_config::Config, - evm_config: Node::Evm, - ) -> Self { - Self { head, provider, executor, data_dir, config, reth_config, evm_config } - } - - /// Returns the configured provider to interact with the blockchain. - pub fn provider(&self) -> &Node::Provider { - &self.provider - } - - /// Returns the configured evm. - pub fn evm_config(&self) -> &Node::Evm { - &self.evm_config - } - - /// Returns the current head of the blockchain at launch. - pub fn head(&self) -> Head { - self.head - } - - /// Returns the config of the node. - pub fn config(&self) -> &NodeConfig { - &self.config - } - - /// Returns the data dir of the node. - /// - /// This gives access to all relevant files and directories of the node's datadir. - pub fn data_dir(&self) -> &ChainPath { - &self.data_dir - } - - /// Returns the executor of the node. - /// - /// This can be used to execute async tasks or functions during the setup. - pub fn task_executor(&self) -> &TaskExecutor { - &self.executor - } - - /// Returns the chain spec of the node. - pub fn chain_spec(&self) -> Arc { - self.provider().chain_spec() - } - - /// Returns the transaction pool config of the node. - pub fn pool_config(&self) -> PoolConfig { - self.config().txpool.pool_config() - } - - /// Loads `MAINNET_KZG_TRUSTED_SETUP`. - pub fn kzg_settings(&self) -> eyre::Result> { - Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) - } - - /// Returns the config for payload building. - pub fn payload_builder_config(&self) -> impl PayloadBuilderConfig { - self.config.builder.clone() - } - - /// Returns the default network config for the node. - pub fn network_config(&self) -> eyre::Result> { - self.config.network_config( - &self.reth_config, - self.provider.clone(), - self.executor.clone(), - self.head, - self.data_dir(), - ) - } - - /// Creates the [NetworkBuilder] for the node. - pub async fn network_builder(&self) -> eyre::Result> { - self.config - .build_network( - &self.reth_config, - self.provider.clone(), - self.executor.clone(), - self.head, - self.data_dir(), - ) - .await - } - - /// Convenience function to start the network. - /// - /// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected - /// to that network. - pub fn start_network( - &self, - builder: NetworkBuilder, - pool: Pool, - ) -> NetworkHandle - where - Pool: TransactionPool + Unpin + 'static, - { - let (handle, network, txpool, eth) = builder - .transactions(pool, Default::default()) - .request_handler(self.provider().clone()) - .split_with_handle(); - - self.executor.spawn_critical("p2p txpool", txpool); - self.executor.spawn_critical("p2p eth request handler", eth); - - let default_peers_path = self.data_dir().known_peers_path(); - let known_peers_file = self.config.network.persistent_peers_file(default_peers_path); - self.executor.spawn_critical_with_graceful_shutdown_signal( - "p2p network task", - |shutdown| { - network.run_until_graceful_shutdown(shutdown, |network| { - write_peers_to_file(network, known_peers_file) - }) - }, - ); - - handle - } -} - -impl std::fmt::Debug for BuilderContext { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("BuilderContext") - .field("head", &self.head) - .field("provider", &std::any::type_name::()) - .field("executor", &self.executor) - .field("data_dir", &self.data_dir) - .field("config", &self.config) - .finish() - } -} - -/// The initial state of the node builder process. -#[derive(Debug, Default)] -#[non_exhaustive] -pub struct InitState; - -/// The state after all types of the node have been configured. -pub struct TypesState -where - DB: Database + Clone + 'static, - Types: NodeTypes, -{ - adapter: FullNodeTypesAdapter>, -} - -/// The state of the node builder process after the node's components have been configured. -/// -/// With this state all types and components of the node are known and the node can be launched. -/// -/// Additionally, this state captures additional hooks that are called at specific points in the -/// node's launch lifecycle. -pub struct ComponentsState { - /// The types of the node. - types: Types, - /// Type that builds the components of the node. - components_builder: Components, - /// Additional NodeHooks that are called at specific points in the node's launch lifecycle. - hooks: NodeHooks, - /// Additional RPC hooks. - rpc: RpcHooks, - /// The ExExs (execution extensions) of the node. - exexs: Vec<(String, Box>)>, -} - -impl std::fmt::Debug - for ComponentsState -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ComponentsState") - .field("types", &std::any::type_name::()) - .field("components_builder", &std::any::type_name::()) - .field("hooks", &self.hooks) - .field("rpc", &self.rpc) - .field("exexs", &self.exexs.len()) - .finish() - } -} diff --git a/crates/node-builder/src/builder/mod.rs b/crates/node-builder/src/builder/mod.rs new file mode 100644 index 0000000000000..44bb60588872b --- /dev/null +++ b/crates/node-builder/src/builder/mod.rs @@ -0,0 +1,619 @@ +//! Customizable node builder. + +#![allow(clippy::type_complexity, missing_debug_implementations)] + +use crate::{ + components::{Components, ComponentsBuilder, NodeComponentsBuilder, PoolBuilder}, + node::FullNode, + rpc::{RethRpcServerHandles, RpcContext}, + DefaultNodeLauncher, Node, NodeHandle, +}; +use futures::Future; +use reth_db::{ + database::Database, + database_metrics::{DatabaseMetadata, DatabaseMetrics}, + test_utils::{create_test_rw_db, TempDatabase}, + DatabaseEnv, +}; +use reth_exex::ExExContext; +use reth_network::{NetworkBuilder, NetworkConfig, NetworkHandle}; +use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; +use reth_node_core::{ + cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, + dirs::{ChainPath, DataDirPath, MaybePlatformPath}, + node_config::NodeConfig, + primitives::{kzg::KzgSettings, Head}, + utils::write_peers_to_file, +}; +use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, ChainSpec}; +use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; +use reth_tasks::TaskExecutor; +use reth_transaction_pool::{PoolConfig, TransactionPool}; +pub use states::*; +use std::{str::FromStr, sync::Arc}; + +mod states; + +/// The builtin provider type of the reth node. +// Note: we need to hardcode this because custom components might depend on it in associated types. +pub type RethFullProviderType = BlockchainProvider; + +/// The adapter type for a reth node with the given types +pub type RethFullAdapter = FullNodeTypesAdapter>; + +#[cfg_attr(doc, aquamarine::aquamarine)] +/// Declaratively construct a node. +/// +/// [`NodeBuilder`] provides a [builder-like interface][builder] for composing +/// components of a node. +/// +/// ## Order +/// +/// Configuring a node starts out with a [`NodeConfig`] (this can be obtained from cli arguments for +/// example) and then proceeds to configure the core static types of the node: [NodeTypes], these +/// include the node's primitive types and the node's engine types. +/// +/// Next all stateful components of the node are configured, these include the +/// [ConfigureEvm](reth_node_api::evm::ConfigureEvm), the database [Database] and all the +/// components of the node that are downstream of those types, these include: +/// +/// - The transaction pool: [PoolBuilder] +/// - The network: [NetworkBuilder](crate::components::NetworkBuilder) +/// - The payload builder: [PayloadBuilder](crate::components::PayloadServiceBuilder) +/// +/// Once all the components are configured, the node is ready to be launched. +/// +/// On launch the builder returns a fully type aware [NodeHandle] that has access to all the +/// configured components and can interact with the node. +/// +/// There are convenience functions for networks that come with a preset of types and components via +/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_node_optimism::OptimismNode`. +/// +/// The [NodeBuilder::node] function configures the node's types and components in one step. +/// +/// ## Components +/// +/// All components are configured with a [NodeComponentsBuilder] that is responsible for actually +/// creating the node components during the launch process. The [ComponentsBuilder] is a general +/// purpose implementation of the [NodeComponentsBuilder] trait that can be used to configure the +/// network, transaction pool and payload builder of the node. It enforces the correct order of +/// configuration, for example the network and the payload builder depend on the transaction pool +/// type that is configured first. +/// +/// All builder traits are generic over the node types and are invoked with the [BuilderContext] +/// that gives access to internals of the that are needed to configure the components. This include +/// the original config, chain spec, the database provider and the task executor, +/// +/// ## Hooks +/// +/// Once all the components are configured, the builder can be used to set hooks that are run at +/// specific points in the node's lifecycle. This way custom services can be spawned before the node +/// is launched [NodeBuilder::on_component_initialized], or once the rpc server(s) are launched +/// [NodeBuilder::on_rpc_started]. The [NodeBuilder::extend_rpc_modules] can be used to inject +/// custom rpc modules into the rpc server before it is launched. See also [RpcContext] +/// All hooks accept a closure that is then invoked at the appropriate time in the node's launch +/// process. +/// +/// ## Flow +/// +/// The [NodeBuilder] is intended to sit behind a CLI that provides the necessary [NodeConfig] +/// input: [NodeBuilder::new] +/// +/// From there the builder is configured with the node's types, components, and hooks, then launched +/// with the [NodeBuilder::launch] method. On launch all the builtin internals, such as the +/// `Database` and its providers [BlockchainProvider] are initialized before the configured +/// [NodeComponentsBuilder] is invoked with the [BuilderContext] to create the transaction pool, +/// network, and payload builder components. When the RPC is configured, the corresponding hooks are +/// invoked to allow for custom rpc modules to be injected into the rpc server: +/// [NodeBuilder::extend_rpc_modules] +/// +/// Finally all components are created and all services are launched and a [NodeHandle] is returned +/// that can be used to interact with the node: [FullNode] +/// +/// The following diagram shows the flow of the node builder from CLI to a launched node. +/// +/// include_mmd!("docs/mermaid/builder.mmd") +/// +/// ## Internals +/// +/// The node builder is fully type safe, it uses the [NodeTypes] trait to enforce that all +/// components are configured with the correct types. However the database types and with that the +/// provider trait implementations are currently created by the builder itself during the launch +/// process, hence the database type is not part of the [NodeTypes] trait and the node's components, +/// that depend on the database, are configured separately. In order to have a nice trait that +/// encapsulates the entire node the [FullNodeComponents] trait was introduced. This trait has +/// convenient associated types for all the components of the node. After [NodeBuilder::launch] the +/// [NodeHandle] contains an instance of [FullNode] that implements the [FullNodeComponents] trait +/// and has access to all the components of the node. Internally the node builder uses several +/// generic adapter types that are then map to traits with associated types for ease of use. +/// +/// ### Limitations +/// +/// Currently the launch process is limited to ethereum nodes and requires all the components +/// specified above. It also expect beacon consensus with the ethereum engine API that is configured +/// by the builder itself during launch. This might change in the future. +/// +/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html +pub struct NodeBuilder { + /// All settings for how the node should be configured. + config: NodeConfig, + /// The configured database for the node. + database: DB, +} + +impl NodeBuilder<()> { + /// Create a new [`NodeBuilder`]. + pub fn new(config: NodeConfig) -> Self { + Self { config, database: () } + } +} + +impl NodeBuilder { + /// Returns a reference to the node builder's config. + pub fn config(&self) -> &NodeConfig { + &self.config + } + + /// Configures the underlying database that the node will use. + pub fn with_database(self, database: D) -> NodeBuilder { + NodeBuilder { config: self.config, database } + } + + /// Preconfigure the builder with the context to launch the node. + /// + /// This provides the task executor and the data directory for the node. + pub fn with_launch_context( + self, + task_executor: TaskExecutor, + data_dir: ChainPath, + ) -> WithLaunchContext> { + WithLaunchContext { builder: self, task_executor, data_dir } + } + + /// Creates an _ephemeral_ preconfigured node for testing purposes. + pub fn testing_node( + self, + task_executor: TaskExecutor, + ) -> WithLaunchContext>>> { + let db = create_test_rw_db(); + let db_path_str = db.path().to_str().expect("Path is not valid unicode"); + let path = + MaybePlatformPath::::from_str(db_path_str).expect("Path is not valid"); + let data_dir = path.unwrap_or_chain_default(self.config.chain.chain); + + WithLaunchContext { builder: self.with_database(db), task_executor, data_dir } + } +} + +impl NodeBuilder +where + DB: Database + Unpin + Clone + 'static, +{ + /// Configures the types of the node. + pub fn with_types(self, types: T) -> NodeBuilderWithTypes> + where + T: NodeTypes, + { + let types = FullNodeTypesAdapter::new(types); + NodeBuilderWithTypes::new(self.config, types, self.database) + } + + /// Preconfigures the node with a specific node implementation. + /// + /// This is a convenience method that sets the node's types and components in one call. + pub fn node( + self, + node: N, + ) -> NodeBuilderWithComponents< + RethFullAdapter, + ComponentsBuilder< + RethFullAdapter, + N::PoolBuilder, + N::PayloadBuilder, + N::NetworkBuilder, + >, + > + where + N: Node>, + N::PoolBuilder: PoolBuilder>, + N::NetworkBuilder: crate::components::NetworkBuilder< + RethFullAdapter, + >>::Pool, + >, + N::PayloadBuilder: crate::components::PayloadServiceBuilder< + RethFullAdapter, + >>::Pool, + >, + { + self.with_types(node.clone()).with_components(node.components()) + } +} + +/// A [NodeBuilder] with it's launch context already configured. +/// +/// This exposes the same methods as [NodeBuilder] but with the launch context already configured, +/// See [WithLaunchContext::launch] +pub struct WithLaunchContext { + builder: Builder, + task_executor: TaskExecutor, + data_dir: ChainPath, +} + +impl WithLaunchContext { + /// Returns a reference to the task executor. + pub fn task_executor(&self) -> &TaskExecutor { + &self.task_executor + } + + /// Returns a reference to the data directory. + pub fn data_dir(&self) -> &ChainPath { + &self.data_dir + } +} + +impl WithLaunchContext> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, +{ + /// Configures the types of the node. + pub fn with_types( + self, + types: T, + ) -> WithLaunchContext>> + where + T: NodeTypes, + { + WithLaunchContext { + builder: self.builder.with_types(types), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Preconfigures the node with a specific node implementation. + pub fn node( + self, + node: N, + ) -> WithLaunchContext< + NodeBuilderWithComponents< + RethFullAdapter, + ComponentsBuilder< + RethFullAdapter, + N::PoolBuilder, + N::PayloadBuilder, + N::NetworkBuilder, + >, + >, + > + where + N: Node>, + N::PoolBuilder: PoolBuilder>, + N::NetworkBuilder: crate::components::NetworkBuilder< + RethFullAdapter, + >>::Pool, + >, + N::PayloadBuilder: crate::components::PayloadServiceBuilder< + RethFullAdapter, + >>::Pool, + >, + { + self.with_types(node.clone()).with_components(node.components()) + } + + /// Launches a preconfigured [Node] + /// + /// This bootstraps the node internals, creates all the components with the given [Node] + /// + /// Returns a [NodeHandle] that can be used to interact with the node. + pub async fn launch_node( + self, + node: N, + ) -> eyre::Result< + NodeHandle< + NodeAdapter< + RethFullAdapter, + Components< + RethFullAdapter, + >>::Pool, + >, + >, + >, + > + where + N: Node>, + N::PoolBuilder: PoolBuilder>, + N::NetworkBuilder: crate::components::NetworkBuilder< + RethFullAdapter, + >>::Pool, + >, + N::PayloadBuilder: crate::components::PayloadServiceBuilder< + RethFullAdapter, + >>::Pool, + >, + { + self.node(node).launch().await + } +} + +impl WithLaunchContext>> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, +{ + /// Advances the state of the node builder to the next state where all components are configured + pub fn with_components( + self, + components_builder: CB, + ) -> WithLaunchContext, CB>> + where + CB: NodeComponentsBuilder>, + { + WithLaunchContext { + builder: self.builder.with_components(components_builder), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } +} + +impl WithLaunchContext, CB>> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, + CB: NodeComponentsBuilder>, +{ + /// Sets the hook that is run once the node's components are initialized. + pub fn on_component_initialized(self, hook: F) -> Self + where + F: Fn(NodeAdapter, CB::Components>) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.on_component_initialized(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Sets the hook that is run once the node has started. + pub fn on_node_started(self, hook: F) -> Self + where + F: Fn(FullNode, CB::Components>>) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.on_node_started(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(self, hook: F) -> Self + where + F: Fn( + RpcContext<'_, NodeAdapter, CB::Components>>, + RethRpcServerHandles, + ) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.on_rpc_started(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(self, hook: F) -> Self + where + F: Fn( + RpcContext<'_, NodeAdapter, CB::Components>>, + ) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.extend_rpc_modules(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Installs an ExEx (Execution Extension) in the node. + /// + /// # Note + /// + /// The ExEx ID must be unique. + pub fn install_exex(self, exex_id: impl Into, exex: F) -> Self + where + F: Fn(ExExContext, CB::Components>>) -> R + + Send + + 'static, + R: Future> + Send, + E: Future> + Send, + { + Self { + builder: self.builder.install_exex(exex_id, exex), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Launches the node and returns a handle to it. + pub async fn launch( + self, + ) -> eyre::Result, CB::Components>>> { + let Self { builder, task_executor, data_dir } = self; + + let launcher = DefaultNodeLauncher { task_executor, data_dir }; + builder.launch_with(launcher).await + } + + /// Check that the builder can be launched + /// + /// This is useful when writing tests to ensure that the builder is configured correctly. + pub fn check_launch(self) -> Self { + self + } +} + +/// Captures the necessary context for building the components of the node. +pub struct BuilderContext { + /// The current head of the blockchain at launch. + pub(crate) head: Head, + /// The configured provider to interact with the blockchain. + pub(crate) provider: Node::Provider, + /// The executor of the node. + pub(crate) executor: TaskExecutor, + /// The data dir of the node. + pub(crate) data_dir: ChainPath, + /// The config of the node + pub(crate) config: NodeConfig, + /// loaded config + pub(crate) reth_config: reth_config::Config, + /// EVM config of the node + pub(crate) evm_config: Node::Evm, +} + +impl BuilderContext { + /// Create a new instance of [BuilderContext] + pub fn new( + head: Head, + provider: Node::Provider, + executor: TaskExecutor, + data_dir: ChainPath, + config: NodeConfig, + reth_config: reth_config::Config, + evm_config: Node::Evm, + ) -> Self { + Self { head, provider, executor, data_dir, config, reth_config, evm_config } + } + + /// Returns the configured provider to interact with the blockchain. + pub fn provider(&self) -> &Node::Provider { + &self.provider + } + + /// Returns the configured evm. + pub fn evm_config(&self) -> &Node::Evm { + &self.evm_config + } + + /// Returns the current head of the blockchain at launch. + pub fn head(&self) -> Head { + self.head + } + + /// Returns the config of the node. + pub fn config(&self) -> &NodeConfig { + &self.config + } + + /// Returns the data dir of the node. + /// + /// This gives access to all relevant files and directories of the node's datadir. + pub fn data_dir(&self) -> &ChainPath { + &self.data_dir + } + + /// Returns the executor of the node. + /// + /// This can be used to execute async tasks or functions during the setup. + pub fn task_executor(&self) -> &TaskExecutor { + &self.executor + } + + /// Returns the chain spec of the node. + pub fn chain_spec(&self) -> Arc { + self.provider().chain_spec() + } + + /// Returns the transaction pool config of the node. + pub fn pool_config(&self) -> PoolConfig { + self.config().txpool.pool_config() + } + + /// Loads `MAINNET_KZG_TRUSTED_SETUP`. + pub fn kzg_settings(&self) -> eyre::Result> { + Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + } + + /// Returns the config for payload building. + pub fn payload_builder_config(&self) -> impl PayloadBuilderConfig { + self.config.builder.clone() + } + + /// Returns the default network config for the node. + pub fn network_config(&self) -> eyre::Result> { + self.config.network_config( + &self.reth_config, + self.provider.clone(), + self.executor.clone(), + self.head, + self.data_dir(), + ) + } + + /// Creates the [NetworkBuilder] for the node. + pub async fn network_builder(&self) -> eyre::Result> { + self.config + .build_network( + &self.reth_config, + self.provider.clone(), + self.executor.clone(), + self.head, + self.data_dir(), + ) + .await + } + + /// Convenience function to start the network. + /// + /// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected + /// to that network. + pub fn start_network( + &self, + builder: NetworkBuilder, + pool: Pool, + ) -> NetworkHandle + where + Pool: TransactionPool + Unpin + 'static, + { + let (handle, network, txpool, eth) = builder + .transactions(pool, Default::default()) + .request_handler(self.provider().clone()) + .split_with_handle(); + + self.executor.spawn_critical("p2p txpool", txpool); + self.executor.spawn_critical("p2p eth request handler", eth); + + let default_peers_path = self.data_dir().known_peers_path(); + let known_peers_file = self.config.network.persistent_peers_file(default_peers_path); + self.executor.spawn_critical_with_graceful_shutdown_signal( + "p2p network task", + |shutdown| { + network.run_until_graceful_shutdown(shutdown, |network| { + write_peers_to_file(network, known_peers_file) + }) + }, + ); + + handle + } +} + +impl std::fmt::Debug for BuilderContext { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("BuilderContext") + .field("head", &self.head) + .field("provider", &std::any::type_name::()) + .field("executor", &self.executor) + .field("data_dir", &self.data_dir) + .field("config", &self.config) + .finish() + } +} diff --git a/crates/node-builder/src/builder/states.rs b/crates/node-builder/src/builder/states.rs new file mode 100644 index 0000000000000..975590c5fe900 --- /dev/null +++ b/crates/node-builder/src/builder/states.rs @@ -0,0 +1,237 @@ +//! Node builder states and helper traits. +//! +//! Keeps track of the current state of the node builder. +//! +//! The node builder process is essentially a state machine that transitions through various states +//! before the node can be launched. + +use crate::{ + components::{NodeComponents, NodeComponentsBuilder}, + exex::BoxedLaunchExEx, + hooks::NodeHooks, + launch::LaunchNode, + rpc::{RethRpcServerHandles, RpcContext, RpcHooks}, + FullNode, +}; +use reth_exex::ExExContext; +use reth_network::NetworkHandle; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; +use reth_node_core::node_config::NodeConfig; +use reth_payload_builder::PayloadBuilderHandle; +use reth_tasks::TaskExecutor; +use std::{fmt, future::Future}; + +/// A node builder that also has the configured types. +pub struct NodeBuilderWithTypes { + /// All settings for how the node should be configured. + config: NodeConfig, + /// The configured database for the node. + adapter: NodeTypesAdapter, +} + +impl NodeBuilderWithTypes { + /// Creates a new instance of the node builder with the given configuration and types. + pub fn new(config: NodeConfig, types: T, database: T::DB) -> Self { + Self { config, adapter: NodeTypesAdapter::new(types, database) } + } + + /// Advances the state of the node builder to the next state where all components are configured + pub fn with_components(self, components_builder: CB) -> NodeBuilderWithComponents + where + CB: NodeComponentsBuilder, + { + let Self { config, adapter } = self; + + NodeBuilderWithComponents { + config, + adapter, + components_builder, + add_ons: NodeAddOns { + hooks: NodeHooks::default(), + rpc: RpcHooks::new(), + exexs: Vec::new(), + }, + } + } +} + +/// Container for the node's types and the database the node uses. +pub(crate) struct NodeTypesAdapter { + /// The database type used by the node. + pub(crate) database: T::DB, + // TODO(mattsse): make this stateless + pub(crate) types: T, +} + +impl NodeTypesAdapter { + /// Create a new adapter from the given node types. + pub(crate) fn new(types: T, database: T::DB) -> Self { + Self { types, database } + } +} + +impl fmt::Debug for NodeTypesAdapter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NodeTypesAdapter").field("db", &"...").field("types", &"...").finish() + } +} + +/// Container for the node's types and the components and other internals that can be used by addons +/// of the node. +pub struct NodeAdapter> { + /// The components of the node. + pub components: C, + /// The task executor for the node. + pub task_executor: TaskExecutor, + /// The provider of the node. + pub provider: T::Provider, + /// EVM config + pub evm: T::Evm, +} + +impl> NodeTypes for NodeAdapter { + type Primitives = T::Primitives; + type Engine = T::Engine; + type Evm = T::Evm; + + fn evm_config(&self) -> Self::Evm { + self.evm.clone() + } +} + +impl> FullNodeTypes for NodeAdapter { + type DB = T::DB; + type Provider = T::Provider; +} + +impl> FullNodeComponents for NodeAdapter { + type Pool = C::Pool; + + fn pool(&self) -> &Self::Pool { + self.components.pool() + } + + fn provider(&self) -> &Self::Provider { + &self.provider + } + + fn network(&self) -> &NetworkHandle { + self.components.network() + } + + fn payload_builder(&self) -> &PayloadBuilderHandle { + self.components.payload_builder() + } + + fn task_executor(&self) -> &TaskExecutor { + &self.task_executor + } +} + +impl> Clone for NodeAdapter { + fn clone(&self) -> Self { + Self { + components: self.components.clone(), + task_executor: self.task_executor.clone(), + provider: self.provider.clone(), + evm: self.evm.clone(), + } + } +} + +/// A fully type configured node builder. +/// +/// Supports adding additional addons to the node. +pub struct NodeBuilderWithComponents> { + /// All settings for how the node should be configured. + pub(crate) config: NodeConfig, + /// Adapter for the underlying node types and database + pub(crate) adapter: NodeTypesAdapter, + /// container for type specific components + pub(crate) components_builder: CB, + /// Additional node extensions. + pub(crate) add_ons: NodeAddOns>, +} + +impl> NodeBuilderWithComponents { + /// Sets the hook that is run once the node's components are initialized. + pub fn on_component_initialized(mut self, hook: F) -> Self + where + F: Fn(NodeAdapter) -> eyre::Result<()> + Send + 'static, + { + self.add_ons.hooks.set_on_component_initialized(hook); + self + } + + /// Sets the hook that is run once the node has started. + pub fn on_node_started(mut self, hook: F) -> Self + where + F: Fn(FullNode>) -> eyre::Result<()> + Send + 'static, + { + self.add_ons.hooks.set_on_node_started(hook); + self + } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(mut self, hook: F) -> Self + where + F: Fn( + RpcContext<'_, NodeAdapter>, + RethRpcServerHandles, + ) -> eyre::Result<()> + + Send + + 'static, + { + self.add_ons.rpc.set_on_rpc_started(hook); + self + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(mut self, hook: F) -> Self + where + F: Fn(RpcContext<'_, NodeAdapter>) -> eyre::Result<()> + Send + 'static, + { + self.add_ons.rpc.set_extend_rpc_modules(hook); + self + } + + /// Installs an ExEx (Execution Extension) in the node. + /// + /// # Note + /// + /// The ExEx ID must be unique. + pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self + where + F: Fn(ExExContext>) -> R + Send + 'static, + R: Future> + Send, + E: Future> + Send, + { + self.add_ons.exexs.push((exex_id.into(), Box::new(exex))); + self + } + + /// Launches the node with the given launcher. + pub async fn launch_with(self, launcher: L) -> eyre::Result + where + L: LaunchNode, + { + launcher.launch_node(self).await + } + + /// Check that the builder can be launched + /// + /// This is useful when writing tests to ensure that the builder is configured correctly. + pub fn check_launch(self) -> Self { + self + } +} + +/// Additional node extensions. +pub(crate) struct NodeAddOns { + /// Additional NodeHooks that are called at specific points in the node's launch lifecycle. + pub(crate) hooks: NodeHooks, + /// Additional RPC hooks. + pub(crate) rpc: RpcHooks, + /// The ExExs (execution extensions) of the node. + pub(crate) exexs: Vec<(String, Box>)>, +} diff --git a/crates/node-builder/src/components/builder.rs b/crates/node-builder/src/components/builder.rs index 14bdf7a4a589c..1c963f0241449 100644 --- a/crates/node-builder/src/components/builder.rs +++ b/crates/node-builder/src/components/builder.rs @@ -1,11 +1,11 @@ //! A generic [NodeComponentsBuilder] use crate::{ - components::{NetworkBuilder, NodeComponents, PayloadServiceBuilder, PoolBuilder}, + components::{Components, NetworkBuilder, NodeComponents, PayloadServiceBuilder, PoolBuilder}, BuilderContext, FullNodeTypes, }; use reth_transaction_pool::TransactionPool; -use std::marker::PhantomData; +use std::{future::Future, marker::PhantomData}; /// A generic, customizable [`NodeComponentsBuilder`]. /// @@ -135,19 +135,19 @@ where NetworkB: NetworkBuilder, PayloadB: PayloadServiceBuilder, { - type Pool = PoolB::Pool; + type Components = Components; async fn build_components( self, context: &BuilderContext, - ) -> eyre::Result> { + ) -> eyre::Result { let Self { pool_builder, payload_builder, network_builder, _marker } = self; let pool = pool_builder.build_pool(context).await?; let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; - Ok(NodeComponents { transaction_pool: pool, network, payload_builder }) + Ok(Components { transaction_pool: pool, network, payload_builder }) } } @@ -170,31 +170,31 @@ impl Default for ComponentsBuilder<(), (), (), ()> { /// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize /// certain components of the node using the builder pattern and defaults, e.g. Ethereum and /// Optimism. -pub trait NodeComponentsBuilder { - /// The transaction pool to use. - type Pool: TransactionPool + Unpin + 'static; +/// A type that's responsible for building the components of the node. +pub trait NodeComponentsBuilder: Send { + /// The components for the node with the given types + type Components: NodeComponents; - /// Builds the components of the node. + /// Consumes the type and returns the crated components. fn build_components( self, - context: &BuilderContext, - ) -> impl std::future::Future>> + Send; + ctx: &BuilderContext, + ) -> impl Future> + Send; } impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: std::future::Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, { - type Pool = Pool; + type Components = Components; fn build_components( self, ctx: &BuilderContext, - ) -> impl std::future::Future>> + Send - { + ) -> impl Future> + Send { self(ctx) } } diff --git a/crates/node-builder/src/components/mod.rs b/crates/node-builder/src/components/mod.rs index 4aa73f0fffcde..ea087ece23b28 100644 --- a/crates/node-builder/src/components/mod.rs +++ b/crates/node-builder/src/components/mod.rs @@ -14,17 +14,36 @@ pub use payload::*; pub use pool::*; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; +use reth_transaction_pool::TransactionPool; mod builder; mod network; mod payload; mod pool; +/// An abstraction over the components of a node, consisting of: +/// - transaction pool +/// - network +/// - payload builder. +pub trait NodeComponents: Clone + Send + Sync + 'static { + /// The transaction pool of the node. + type Pool: TransactionPool + Unpin; + + /// Returns the transaction pool of the node. + fn pool(&self) -> &Self::Pool; + + /// Returns the handle to the network + fn network(&self) -> &NetworkHandle; + + /// Returns the handle to the payload builder service. + fn payload_builder(&self) -> &PayloadBuilderHandle; +} + /// All the components of the node. /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct NodeComponents { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, /// The network implementation of the node. @@ -33,9 +52,36 @@ pub struct NodeComponents { pub payload_builder: PayloadBuilderHandle, } -impl NodeComponents { - /// Returns the handle to the payload builder service. - pub fn payload_builder(&self) -> PayloadBuilderHandle { - self.payload_builder.clone() +impl NodeComponents for Components +where + Node: FullNodeTypes, + Pool: TransactionPool + Unpin + 'static, +{ + type Pool = Pool; + + fn pool(&self) -> &Self::Pool { + &self.transaction_pool + } + + fn network(&self) -> &NetworkHandle { + &self.network + } + + fn payload_builder(&self) -> &PayloadBuilderHandle { + &self.payload_builder + } +} + +impl Clone for Components +where + Node: FullNodeTypes, + Pool: TransactionPool, +{ + fn clone(&self) -> Self { + Self { + transaction_pool: self.transaction_pool.clone(), + network: self.network.clone(), + payload_builder: self.payload_builder.clone(), + } } } diff --git a/crates/node-builder/src/launch.rs b/crates/node-builder/src/launch.rs new file mode 100644 index 0000000000000..645598adafd37 --- /dev/null +++ b/crates/node-builder/src/launch.rs @@ -0,0 +1,558 @@ +//! Abstraction for launching a node. + +use crate::{ + builder::{NodeAdapter, NodeAddOns, NodeTypesAdapter}, + components::{NodeComponents, NodeComponentsBuilder}, + hooks::NodeHooks, + node::FullNode, + BuilderContext, NodeBuilderWithComponents, NodeHandle, RethFullAdapter, +}; +use eyre::Context; +use futures::{future, future::Either, stream, stream_select, StreamExt}; +use rayon::ThreadPoolBuilder; +use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; +use reth_beacon_consensus::{ + hooks::{EngineHooks, PruneHook, StaticFileHook}, + BeaconConsensus, BeaconConsensusEngine, +}; +use reth_blockchain_tree::{ + BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, +}; +use reth_config::config::EtlConfig; +use reth_consensus::Consensus; +use reth_db::{ + database::Database, + database_metrics::{DatabaseMetadata, DatabaseMetrics}, +}; +use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; +use reth_interfaces::p2p::either::EitherDownloader; +use reth_network::NetworkEvents; +use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_core::{ + cli::config::RethRpcConfig, + dirs::{ChainPath, DataDirPath}, + engine_api_store::EngineApiStore, + engine_skip_fcu::EngineApiSkipFcu, + exit::NodeExitFuture, + init::init_genesis, + node_config::NodeConfig, +}; +use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; +use reth_primitives::format_ether; +use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory}; +use reth_prune::PrunerBuilder; +use reth_revm::EvmProcessorFactory; +use reth_rpc_engine_api::EngineApi; +use reth_static_file::StaticFileProducer; +use reth_tasks::TaskExecutor; +use reth_tracing::tracing::{debug, error, info}; +use reth_transaction_pool::TransactionPool; +use std::{cmp::max, future::Future, sync::Arc, thread::available_parallelism}; +use tokio::sync::{mpsc::unbounded_channel, oneshot}; + +/// Launches a new node. +/// +/// Acts as a node factory. +/// +/// This is essentially the launch logic for a node. +pub trait LaunchNode { + /// The node type that is created. + type Node; + + /// Create and return a new node asynchronously. + fn launch_node(self, target: Target) -> impl Future> + Send; +} + +/// The default launcher for a node. +#[derive(Debug)] +pub struct DefaultNodeLauncher { + /// The task executor for the node. + pub task_executor: TaskExecutor, + /// The data directory for the node. + pub data_dir: ChainPath, +} + +impl DefaultNodeLauncher { + /// Create a new instance of the default node launcher. + pub fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { + Self { task_executor, data_dir } + } + + /// Loads the reth config with the given datadir root + fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { + let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path()); + + let mut toml_config = confy::load_path::(&config_path) + .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; + + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); + + // Update the config with the command line arguments + toml_config.peers.trusted_nodes_only = config.network.trusted_only; + + if !config.network.trusted_peers.is_empty() { + info!(target: "reth::cli", "Adding trusted nodes"); + config.network.trusted_peers.iter().for_each(|peer| { + toml_config.peers.trusted_nodes.insert(*peer); + }); + } + + Ok(toml_config) + } +} + +impl LaunchNode, CB>> + for DefaultNodeLauncher +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, + CB: NodeComponentsBuilder>, +{ + type Node = NodeHandle, CB::Components>>; + + async fn launch_node( + self, + target: NodeBuilderWithComponents, CB>, + ) -> eyre::Result { + let NodeBuilderWithComponents { + adapter: NodeTypesAdapter { types, database }, + components_builder, + add_ons: NodeAddOns { hooks, rpc, exexs: installed_exex }, + config, + } = target; + + // get config from file + let reth_config = self.load_toml_config(&config)?; + + let Self { task_executor, data_dir } = self; + + // Raise the fd limit of the process. + // Does not do anything on windows. + fdlimit::raise_fd_limit()?; + + // Limit the global rayon thread pool, reserving 2 cores for the rest of the system + let _ = ThreadPoolBuilder::new() + .num_threads( + available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), + ) + .build_global() + .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); + + let provider_factory = ProviderFactory::new( + database.clone(), + Arc::clone(&config.chain), + data_dir.static_files_path(), + )? + .with_static_files_metrics(); + info!(target: "reth::cli", "Database opened"); + + let prometheus_handle = config.install_prometheus_recorder()?; + config + .start_metrics_endpoint( + prometheus_handle, + database.clone(), + provider_factory.static_file_provider(), + task_executor.clone(), + ) + .await?; + + debug!(target: "reth::cli", chain=%config.chain.chain, +genesis=?config.chain.genesis_hash(), "Initializing genesis"); + + let genesis_hash = init_genesis(provider_factory.clone())?; + + info!(target: "reth::cli", "\n{}", config.chain.display_hardforks()); + + // setup the consensus instance + let consensus: Arc = if config.dev.dev { + Arc::new(AutoSealConsensus::new(Arc::clone(&config.chain))) + } else { + Arc::new(BeaconConsensus::new(Arc::clone(&config.chain))) + }; + + debug!(target: "reth::cli", "Spawning stages metrics listener task"); + let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); + let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); + task_executor.spawn_critical("stages metrics listener task", sync_metrics_listener); + + let prune_config = config.prune_config()?.or_else(|| reth_config.prune.clone()); + + // Configure the blockchain tree for the node + let evm_config = types.evm_config(); + let tree_config = BlockchainTreeConfig::default(); + let tree_externals = TreeExternals::new( + provider_factory.clone(), + consensus.clone(), + EvmProcessorFactory::new(config.chain.clone(), evm_config.clone()), + ); + let tree = BlockchainTree::new( + tree_externals, + tree_config, + prune_config.as_ref().map(|config| config.segments.clone()), + )? + .with_sync_metrics_tx(sync_metrics_tx.clone()); + + let canon_state_notification_sender = tree.canon_state_notification_sender(); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + debug!(target: "reth::cli", "configured blockchain tree"); + + // fetch the head block from the database + let head = + config.lookup_head(provider_factory.clone()).wrap_err("the head block is missing")?; + + // setup the blockchain provider + let blockchain_db = + BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; + + let ctx = BuilderContext::new( + head, + blockchain_db, + task_executor, + data_dir, + config, + reth_config, + evm_config.clone(), + ); + + debug!(target: "reth::cli", "creating components"); + let components = components_builder.build_components(&ctx).await?; + + let BuilderContext { + provider: blockchain_db, + executor, + data_dir, + mut config, + mut reth_config, + .. + } = ctx; + + let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; + + let node_adapter = NodeAdapter { + components, + task_executor: executor.clone(), + provider: blockchain_db.clone(), + evm: evm_config.clone(), + }; + + debug!(target: "reth::cli", "calling on_component_initialized hook"); + on_component_initialized.on_event(node_adapter.clone())?; + + // spawn exexs + let mut exex_handles = Vec::with_capacity(installed_exex.len()); + let mut exexs = Vec::with_capacity(installed_exex.len()); + for (id, exex) in installed_exex { + // create a new exex handle + let (handle, events, notifications) = ExExHandle::new(id.clone()); + exex_handles.push(handle); + + // create the launch context for the exex + let context = ExExContext { + head, + provider: blockchain_db.clone(), + task_executor: executor.clone(), + data_dir: data_dir.clone(), + config: config.clone(), + reth_config: reth_config.clone(), + pool: node_adapter.components.pool().clone(), + events, + notifications, + }; + + let executor = executor.clone(); + exexs.push(async move { + debug!(target: "reth::cli", id, "spawning exex"); + let span = reth_tracing::tracing::info_span!("exex", id); + let _enter = span.enter(); + + // init the exex + let exex = exex.launch(context).await.unwrap(); + + // spawn it as a crit task + executor.spawn_critical("exex", async move { + info!(target: "reth::cli", "ExEx started"); + match exex.await { + Ok(_) => panic!("ExEx {id} finished. ExEx's should run indefinitely"), + Err(err) => panic!("ExEx {id} crashed: {err}"), + } + }); + }); + } + + future::join_all(exexs).await; + + // spawn exex manager + let exex_manager_handle = if !exex_handles.is_empty() { + debug!(target: "reth::cli", "spawning exex manager"); + // todo(onbjerg): rm magic number + let exex_manager = ExExManager::new(exex_handles, 1024); + let exex_manager_handle = exex_manager.handle(); + executor.spawn_critical("exex manager", async move { + exex_manager.await.expect("exex manager crashed"); + }); + + // send notifications from the blockchain tree to exex manager + let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); + let mut handle = exex_manager_handle.clone(); + executor.spawn_critical("exex manager blockchain tree notifications", async move { + while let Ok(notification) = canon_state_notifications.recv().await { + handle.send_async(notification.into()).await.expect( + "blockchain tree notification could not be sent to exex +manager", + ); + } + }); + + info!(target: "reth::cli", "ExEx Manager started"); + + Some(exex_manager_handle) + } else { + None + }; + + // create pipeline + let network_client = node_adapter.network().fetch_client().await?; + let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); + + if let Some(skip_fcu_threshold) = config.debug.skip_fcu { + debug!(target: "reth::cli", "spawning skip FCU task"); + let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); + let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); + executor.spawn_critical( + "skip FCU interceptor", + engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), + ); + consensus_engine_rx = skip_fcu_rx; + } + + if let Some(store_path) = config.debug.engine_api_store.clone() { + debug!(target: "reth::cli", "spawning engine API store"); + let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); + let engine_api_store = EngineApiStore::new(store_path); + executor.spawn_critical( + "engine api interceptor", + engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx), + ); + consensus_engine_rx = engine_intercept_rx; + }; + + let max_block = config.max_block(network_client.clone(), provider_factory.clone()).await?; + let mut hooks = EngineHooks::new(); + + let static_file_producer = StaticFileProducer::new( + provider_factory.clone(), + provider_factory.static_file_provider(), + prune_config.clone().unwrap_or_default().segments, + ); + let static_file_producer_events = static_file_producer.lock().events(); + hooks.add(StaticFileHook::new(static_file_producer.clone(), Box::new(executor.clone()))); + info!(target: "reth::cli", "StaticFileProducer initialized"); + + // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + if reth_config.stages.etl.dir.is_none() { + reth_config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + } + + // Configure the pipeline + let pipeline_exex_handle = + exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); + let (mut pipeline, client) = if config.dev.dev { + info!(target: "reth::cli", "Starting Reth in dev mode"); + + for (idx, (address, alloc)) in config.chain.genesis.alloc.iter().enumerate() { + info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, +address.to_string(), format_ether(alloc.balance)); + } + + // install auto-seal + let pending_transactions_listener = + node_adapter.components.pool().pending_transactions_listener(); + + let mining_mode = if let Some(interval) = config.dev.block_time { + MiningMode::interval(interval) + } else if let Some(max_transactions) = config.dev.block_max_transactions { + MiningMode::instant(max_transactions, pending_transactions_listener) + } else { + info!(target: "reth::cli", "No mining mode specified, defaulting to +ReadyTransaction"); + MiningMode::instant(1, pending_transactions_listener) + }; + + let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( + Arc::clone(&config.chain), + blockchain_db.clone(), + node_adapter.components.pool().clone(), + consensus_engine_tx.clone(), + canon_state_notification_sender, + mining_mode, + evm_config.clone(), + ) + .build(); + + let mut pipeline = crate::setup::build_networked_pipeline( + &config, + &reth_config.stages, + client.clone(), + Arc::clone(&consensus), + provider_factory.clone(), + &executor, + sync_metrics_tx, + prune_config.clone(), + max_block, + static_file_producer, + evm_config, + pipeline_exex_handle, + ) + .await?; + + let pipeline_events = pipeline.events(); + task.set_pipeline_events(pipeline_events); + debug!(target: "reth::cli", "Spawning auto mine task"); + executor.spawn(Box::pin(task)); + + (pipeline, EitherDownloader::Left(client)) + } else { + let pipeline = crate::setup::build_networked_pipeline( + &config, + &reth_config.stages, + network_client.clone(), + Arc::clone(&consensus), + provider_factory.clone(), + &executor, + sync_metrics_tx, + prune_config.clone(), + max_block, + static_file_producer, + evm_config, + pipeline_exex_handle, + ) + .await?; + + (pipeline, EitherDownloader::Right(network_client.clone())) + }; + + let pipeline_events = pipeline.events(); + + let initial_target = config.initial_pipeline_target(genesis_hash); + + let prune_config = prune_config.unwrap_or_default(); + let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) + .max_reorg_depth(tree_config.max_reorg_depth() as usize) + .prune_delete_limit(config.chain.prune_delete_limit) + .timeout(PrunerBuilder::DEFAULT_TIMEOUT); + if let Some(exex_manager_handle) = &exex_manager_handle { + pruner_builder = + pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); + } + + let mut pruner = pruner_builder.build(provider_factory.clone()); + + let pruner_events = pruner.events(); + hooks.add(PruneHook::new(pruner, Box::new(executor.clone()))); + info!(target: "reth::cli", ?prune_config, "Pruner initialized"); + + // Configure the consensus engine + let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( + client, + pipeline, + blockchain_db.clone(), + Box::new(executor.clone()), + Box::new(node_adapter.components.network().clone()), + max_block, + config.debug.continuous, + node_adapter.components.payload_builder().clone(), + initial_target, + reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, + consensus_engine_tx, + consensus_engine_rx, + hooks, + )?; + info!(target: "reth::cli", "Consensus engine initialized"); + + let events = stream_select!( + node_adapter.components.network().event_listener().map(Into::into), + beacon_engine_handle.event_listener().map(Into::into), + pipeline_events.map(Into::into), + if config.debug.tip.is_none() && !config.dev.dev { + Either::Left( + ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) + .map(Into::into), + ) + } else { + Either::Right(stream::empty()) + }, + pruner_events.map(Into::into), + static_file_producer_events.map(Into::into) + ); + executor.spawn_critical( + "events task", + node::handle_events( + Some(node_adapter.components.network().clone()), + Some(head.number), + events, + database.clone(), + ), + ); + + let engine_api = EngineApi::new( + blockchain_db.clone(), + config.chain.clone(), + beacon_engine_handle, + node_adapter.components.payload_builder().clone().into(), + Box::new(executor.clone()), + ); + info!(target: "reth::cli", "Engine API handler initialized"); + + // extract the jwt secret from the args if possible + let default_jwt_path = data_dir.jwt_path(); + let jwt_secret = config.rpc.auth_jwt_secret(default_jwt_path)?; + + // adjust rpc port numbers based on instance number + config.adjust_instance_ports(); + + // Start RPC servers + let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( + node_adapter.clone(), + engine_api, + &config, + jwt_secret, + rpc, + ) + .await?; + + // in dev mode we generate 20 random dev-signer accounts + if config.dev.dev { + rpc_registry.eth_api().with_dev_accounts(); + } + + // Run consensus engine to completion + let (tx, rx) = oneshot::channel(); + info!(target: "reth::cli", "Starting consensus engine"); + executor.spawn_critical_blocking("consensus engine", async move { + let res = beacon_consensus_engine.await; + let _ = tx.send(res); + }); + + let full_node = FullNode { + evm_config: node_adapter.evm.clone(), + pool: node_adapter.components.pool().clone(), + network: node_adapter.components.network().clone(), + provider: node_adapter.provider.clone(), + payload_builder: node_adapter.components.payload_builder().clone(), + task_executor: executor, + rpc_server_handles, + rpc_registry, + config, + data_dir, + }; + // Notify on node started + on_node_started.on_event(full_node.clone())?; + + let handle = NodeHandle { + node_exit_future: NodeExitFuture::new(rx, full_node.config.debug.terminate), + node: full_node, + }; + + Ok(handle) + } +} diff --git a/crates/node-builder/src/lib.rs b/crates/node-builder/src/lib.rs index f5d7012d112e0..11b56ba242f08 100644 --- a/crates/node-builder/src/lib.rs +++ b/crates/node-builder/src/lib.rs @@ -21,6 +21,9 @@ pub mod components; mod builder; pub use builder::*; +mod launch; +pub use launch::*; + mod handle; pub use handle::NodeHandle; diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 2b186b19c3efc..38f6ee4d92dd5 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -270,7 +270,7 @@ impl NodeConfig { /// necessary pub async fn max_block( &self, - network_client: &Client, + network_client: Client, provider: Provider, ) -> eyre::Result> where diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 1304d77d183fe..3fd158b6a442c 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -97,87 +97,3 @@ pub trait FullNodeComponents: FullNodeTypes + 'static { /// Returns the task executor. fn task_executor(&self) -> &TaskExecutor; } - -/// A type that encapsulates all the components of the node. -#[derive(Debug)] -pub struct FullNodeComponentsAdapter { - /// The EVM configuration of the node. - pub evm_config: Node::Evm, - /// The transaction pool of the node. - pub pool: Pool, - /// The network handle of the node. - pub network: NetworkHandle, - /// The provider of the node. - pub provider: Node::Provider, - /// The payload builder service handle of the node. - pub payload_builder: PayloadBuilderHandle, - /// The task executor of the node. - pub executor: TaskExecutor, -} - -impl FullNodeTypes for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type DB = Node::DB; - type Provider = Node::Provider; -} - -impl NodeTypes for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type Primitives = Node::Primitives; - type Engine = Node::Engine; - type Evm = Node::Evm; - - fn evm_config(&self) -> Self::Evm { - self.evm_config.clone() - } -} - -impl FullNodeComponents for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type Pool = Pool; - - fn pool(&self) -> &Self::Pool { - &self.pool - } - - fn provider(&self) -> &Self::Provider { - &self.provider - } - - fn network(&self) -> &NetworkHandle { - &self.network - } - - fn payload_builder(&self) -> &PayloadBuilderHandle { - &self.payload_builder - } - - fn task_executor(&self) -> &TaskExecutor { - &self.executor - } -} - -impl Clone for FullNodeComponentsAdapter -where - Pool: Clone, -{ - fn clone(&self) -> Self { - Self { - evm_config: self.evm_config.clone(), - pool: self.pool.clone(), - network: self.network.clone(), - provider: self.provider.clone(), - payload_builder: self.payload_builder.clone(), - executor: self.executor.clone(), - } - } -} From 76a3d8278ac96fc55f918fdb8d8fc9561b75c211 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 24 Apr 2024 16:23:45 -0400 Subject: [PATCH 311/700] feat: add thorough error message to state root error (#7607) --- crates/consensus/consensus/src/lib.rs | 7 +++++ .../interfaces/src/blockchain_tree/error.rs | 30 +++++++++++++++++++ crates/interfaces/src/executor.rs | 5 ++++ crates/stages-api/src/error.rs | 10 +++++++ crates/stages/src/stages/merkle.rs | 30 +++++++++++++++++-- 5 files changed, 79 insertions(+), 3 deletions(-) diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index b434272a414f0..2dee6b1245e20 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -259,6 +259,13 @@ pub enum ConsensusError { HeaderValidationError(#[from] HeaderValidationError), } +impl ConsensusError { + /// Returns `true` if the error is a state root error. + pub fn is_state_root_error(&self) -> bool { + matches!(self, ConsensusError::BodyStateRootDiff(_)) + } +} + /// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to. #[derive(thiserror::Error, Debug)] #[error("Consensus error: {0}, Invalid header: {1:?}")] diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index 34e01883513d6..44f1f50bcda7c 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -243,6 +243,36 @@ impl InsertBlockErrorKind { matches!(self, InsertBlockErrorKind::Consensus(_)) } + /// Returns true if this error is a state root error + pub fn is_state_root_error(&self) -> bool { + // we need to get the state root errors inside of the different variant branches + match self { + InsertBlockErrorKind::Execution(err) => { + matches!( + err, + BlockExecutionError::Validation(BlockValidationError::StateRoot { .. }) + ) + } + InsertBlockErrorKind::Canonical(err) => { + matches!( + err, + CanonicalError::Validation(BlockValidationError::StateRoot { .. }) | + CanonicalError::Provider( + ProviderError::StateRootMismatch(_) | + ProviderError::UnwindStateRootMismatch(_) + ) + ) + } + InsertBlockErrorKind::Provider(err) => { + matches!( + err, + ProviderError::StateRootMismatch(_) | ProviderError::UnwindStateRootMismatch(_) + ) + } + _ => false, + } + } + /// Returns true if the error is caused by an invalid block /// /// This is intended to be used to determine if the block should be marked as invalid. diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index 25e2f5710e47b..e8f7f40b152ed 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -153,4 +153,9 @@ impl BlockExecutionError { pub fn is_fatal(&self) -> bool { matches!(self, Self::CanonicalCommit { .. } | Self::CanonicalRevert { .. }) } + + /// Returns `true` if the error is a state root error. + pub fn is_state_root_error(&self) -> bool { + matches!(self, Self::Validation(BlockValidationError::StateRoot(_))) + } } diff --git a/crates/stages-api/src/error.rs b/crates/stages-api/src/error.rs index 3d7ae1d72d022..37fe2b3fdbc2d 100644 --- a/crates/stages-api/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -20,6 +20,16 @@ pub enum BlockErrorKind { Execution(#[from] executor::BlockExecutionError), } +impl BlockErrorKind { + /// Returns `true` if the error is a state root error. + pub fn is_state_root_error(&self) -> bool { + match self { + BlockErrorKind::Validation(err) => err.is_state_root_error(), + BlockErrorKind::Execution(err) => err.is_state_root_error(), + } + } +} + /// A stage execution error. #[derive(Error, Debug)] pub enum StageError { diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index bfdb9782b2deb..562cff1830b75 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -21,6 +21,24 @@ use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress}; use std::fmt::Debug; use tracing::*; +// TODO: automate the process outlined below so the user can just send in a debugging package +/// The error message that we include in invalid state root errors to tell users what information +/// they should include in a bug report, since true state root errors can be impossible to debug +/// with just basic logs. +pub const INVALID_STATE_ROOT_ERROR_MESSAGE: &str = r#" +Invalid state root error on new payload! +This is an error that likely requires a report to the reth team with additional information. +Please include the following information in your report: + * This error message + * The state root of the block that was rejected + * The output of `reth db stats --checksum` from the database that was being used. This will take a long time to run! + * 50-100 lines of logs before and after the first occurrence of this log message. Please search your log output for the first observed occurrence of MAGIC_STATE_ROOT. + * The debug logs from __the same time period__. To find the default location for these logs, run: + `reth --help | grep -A 4 'log.file.directory'` + +Once you have this information, please submit a github issue at https://github.com/paradigmxyz/reth/issues/new +"#; + /// The default threshold (in number of blocks) for switching from incremental trie building /// of changes to whole rebuild. pub const MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD: u64 = 5_000; @@ -196,7 +214,10 @@ impl Stage for MerkleStage { let progress = StateRoot::from_tx(tx) .with_intermediate_state(checkpoint.map(IntermediateStateRootState::from)) .root_with_progress() - .map_err(|e| StageError::Fatal(Box::new(e)))?; + .map_err(|e| { + error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "State root with progress failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); + StageError::Fatal(Box::new(e)) + })?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { updates.flush(tx)?; @@ -230,7 +251,10 @@ impl Stage for MerkleStage { debug!(target: "sync::stages::merkle::exec", current = ?current_block_number, target = ?to_block, "Updating trie"); let (root, updates) = StateRoot::incremental_root_with_updates(provider.tx_ref(), range) - .map_err(|e| StageError::Fatal(Box::new(e)))?; + .map_err(|e| { + error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); + StageError::Fatal(Box::new(e)) + })?; updates.flush(provider.tx_ref())?; let total_hashed_entries = (provider.count_entries::()? + @@ -325,7 +349,7 @@ fn validate_state_root( if got == expected.state_root { Ok(()) } else { - warn!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root"); + error!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); Err(StageError::Block { error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( GotExpected { got, expected: expected.state_root }.into(), From a22cf2189f51db26f6566be21cebc0336312b1f1 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 24 Apr 2024 16:24:10 -0400 Subject: [PATCH 312/700] fix: still delete headers from db in headers unwind (#7579) --- crates/stages/src/stages/headers.rs | 138 ++++++++++++++++++++++++---- 1 file changed, 122 insertions(+), 16 deletions(-) diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index 83dd710d0fd9c..fd14841284ec2 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -321,28 +321,48 @@ where ) -> Result { self.sync_gap.take(); + // First unwind the db tables, until the unwind_to block number. use the walker to unwind + // HeaderNumbers based on the index in CanonicalHeaders + provider.unwind_table_by_walker::( + input.unwind_to, + )?; + provider.unwind_table_by_num::(input.unwind_to)?; + provider.unwind_table_by_num::(input.unwind_to)?; + let unfinalized_headers_unwound = + provider.unwind_table_by_num::(input.unwind_to)?; + + // determine how many headers to unwind from the static files based on the highest block and + // the unwind_to block let static_file_provider = provider.static_file_provider(); let highest_block = static_file_provider .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); - let unwound_headers = highest_block - input.unwind_to; - - for block in (input.unwind_to + 1)..=highest_block { - let header_hash = static_file_provider - .block_hash(block)? - .ok_or(ProviderError::HeaderNotFound(block.into()))?; - - provider.tx_ref().delete::(header_hash, None)?; + let static_file_headers_to_unwind = highest_block - input.unwind_to; + for block_number in (input.unwind_to + 1)..=highest_block { + let hash = static_file_provider.block_hash(block_number)?; + // we have to delete from HeaderNumbers here as well as in the above unwind, since that + // mapping contains entries for both headers in the db and headers in static files + // + // so if we are unwinding past the lowest block in the db, we have to iterate through + // the HeaderNumbers entries that we'll delete in static files below + if let Some(header_hash) = hash { + provider.tx_ref().delete::(header_hash, None)?; + } } + // Now unwind the static files until the unwind_to block number let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - writer.prune_headers(unwound_headers)?; + writer.prune_headers(static_file_headers_to_unwind)?; + // Set the stage checkpoin entities processed based on how much we unwound - we add the + // headers unwound from static files and db let stage_checkpoint = input.checkpoint.headers_stage_checkpoint().map(|stage_checkpoint| HeadersCheckpoint { block_range: stage_checkpoint.block_range, progress: EntitiesCheckpoint { - processed: stage_checkpoint.progress.processed.saturating_sub(unwound_headers), + processed: stage_checkpoint.progress.processed.saturating_sub( + static_file_headers_to_unwind + unfinalized_headers_unwound as u64, + ), total: stage_checkpoint.progress.total, }, }); @@ -363,9 +383,12 @@ mod tests { stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; use assert_matches::assert_matches; - use reth_interfaces::test_utils::generators::random_header; - use reth_primitives::{stage::StageUnitCheckpoint, B256}; - use reth_provider::ProviderFactory; + use reth_interfaces::test_utils::generators::{self, random_header, random_header_range}; + use reth_primitives::{ + stage::StageUnitCheckpoint, BlockBody, SealedBlock, SealedBlockWithSenders, B256, + }; + use reth_provider::{BlockHashReader, BlockWriter, BundleStateWithReceipts, ProviderFactory}; + use reth_trie::{updates::TrieUpdates, HashedPostState}; use test_runner::HeadersTestRunner; mod test_runner { @@ -376,9 +399,7 @@ mod tests { use reth_downloaders::headers::reverse_headers::{ ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; - use reth_interfaces::test_utils::{ - generators, generators::random_header_range, TestHeaderDownloader, TestHeadersClient, - }; + use reth_interfaces::test_utils::{TestHeaderDownloader, TestHeadersClient}; use reth_provider::BlockNumReader; use tokio::sync::watch; @@ -551,6 +572,91 @@ mod tests { stage_test_suite!(HeadersTestRunner, headers); + /// Execute the stage with linear downloader, unwinds, and ensures that the database tables + /// along with the static files are cleaned up. + #[tokio::test] + async fn execute_with_linear_downloader_unwind() { + let mut runner = HeadersTestRunner::with_linear_downloader(); + let (checkpoint, previous_stage) = (1000, 1200); + let input = ExecInput { + target: Some(previous_stage), + checkpoint: Some(StageCheckpoint::new(checkpoint)), + }; + let headers = runner.seed_execution(input).expect("failed to seed execution"); + let rx = runner.execute(input); + + runner.client.extend(headers.iter().rev().map(|h| h.clone().unseal())).await; + + // skip `after_execution` hook for linear downloader + let tip = headers.last().unwrap(); + runner.send_tip(tip.hash()); + + let result = rx.await.unwrap(); + runner.db().factory.static_file_provider().commit().unwrap(); + assert_matches!(result, Ok(ExecOutput { checkpoint: StageCheckpoint { + block_number, + stage_checkpoint: Some(StageUnitCheckpoint::Headers(HeadersCheckpoint { + block_range: CheckpointBlockRange { + from, + to + }, + progress: EntitiesCheckpoint { + processed, + total, + } + })) + }, done: true }) if block_number == tip.number && + from == checkpoint && to == previous_stage && + // -1 because we don't need to download the local head + processed == checkpoint + headers.len() as u64 - 1 && total == tip.number + ); + assert!(runner.validate_execution(input, result.ok()).is_ok(), "validation failed"); + assert!(runner.stage().hash_collector.is_empty()); + assert!(runner.stage().header_collector.is_empty()); + + // let's insert some blocks using append_blocks_with_state + let sealed_headers = + random_header_range(&mut generators::rng(), tip.number..tip.number + 10, tip.hash()); + + // make them sealed blocks with senders by converting them to empty blocks + let sealed_blocks = sealed_headers + .iter() + .map(|header| { + SealedBlockWithSenders::new( + SealedBlock::new(header.clone(), BlockBody::default()), + vec![], + ) + .unwrap() + }) + .collect(); + + // append the blocks + let provider = runner.db().factory.provider_rw().unwrap(); + provider + .append_blocks_with_state( + sealed_blocks, + BundleStateWithReceipts::default(), + HashedPostState::default(), + TrieUpdates::default(), + None, + ) + .unwrap(); + provider.commit().unwrap(); + + // now we can unwind 10 blocks + let unwind_input = UnwindInput { + checkpoint: StageCheckpoint::new(tip.number + 10), + unwind_to: tip.number, + bad_block: None, + }; + + let unwind_output = runner.unwind(unwind_input).await.unwrap(); + assert_eq!(unwind_output.checkpoint.block_number, tip.number); + + // validate the unwind, ensure that the tables are cleaned up + assert!(runner.validate_unwind(unwind_input).is_ok()); + } + /// Execute the stage with linear downloader #[tokio::test] async fn execute_with_linear_downloader() { From 766e77a8113eb8b852c3fc9a028bdce881133c82 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 24 Apr 2024 17:00:25 -0400 Subject: [PATCH 313/700] chore: log failed tip fetch only after 20 tries (#7850) --- crates/node-core/src/node_config.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 38f6ee4d92dd5..608f12cad66f3 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -425,6 +425,7 @@ impl NodeConfig { Client: HeadersClient, { info!(target: "reth::cli", ?tip, "Fetching tip block from the network."); + let mut fetch_failures = 0; loop { match get_single_header(&client, tip).await { Ok(tip_header) => { @@ -432,7 +433,10 @@ impl NodeConfig { return Ok(tip_header); } Err(error) => { - error!(target: "reth::cli", %error, "Failed to fetch the tip. Retrying..."); + fetch_failures += 1; + if fetch_failures % 20 == 0 { + error!(target: "reth::cli", ?fetch_failures, %error, "Failed to fetch the tip. Retrying..."); + } } } } From 0e8e57318041afe129d24b150b414abf0ea3e625 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Wed, 24 Apr 2024 23:34:41 +0200 Subject: [PATCH 314/700] chore: rename exex example crate names (#7851) --- Cargo.lock | 72 +++++++++++++++--------------- examples/exex/minimal/Cargo.toml | 2 +- examples/exex/op-bridge/Cargo.toml | 2 +- 3 files changed, 38 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d96e0fe3bf1a5..291638c49f82d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2758,6 +2758,42 @@ dependencies = [ "tokio", ] +[[package]] +name = "exex-minimal" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-tracing", + "tokio", +] + +[[package]] +name = "exex-op-bridge" +version = "0.0.0" +dependencies = [ + "alloy-sol-types", + "eyre", + "futures", + "itertools 0.12.1", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "reth-tracing", + "rusqlite", + "tokio", +] + [[package]] name = "eyre" version = "0.6.12" @@ -4713,22 +4749,6 @@ dependencies = [ "unicase", ] -[[package]] -name = "minimal" -version = "0.0.0" -dependencies = [ - "eyre", - "futures", - "reth", - "reth-exex", - "reth-node-api", - "reth-node-core", - "reth-node-ethereum", - "reth-primitives", - "reth-tracing", - "tokio", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -5072,26 +5092,6 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" -[[package]] -name = "op-bridge" -version = "0.0.0" -dependencies = [ - "alloy-sol-types", - "eyre", - "futures", - "itertools 0.12.1", - "reth", - "reth-exex", - "reth-node-api", - "reth-node-core", - "reth-node-ethereum", - "reth-primitives", - "reth-provider", - "reth-tracing", - "rusqlite", - "tokio", -] - [[package]] name = "opaque-debug" version = "0.3.1" diff --git a/examples/exex/minimal/Cargo.toml b/examples/exex/minimal/Cargo.toml index fc6eba841a314..a7bcc327ac163 100644 --- a/examples/exex/minimal/Cargo.toml +++ b/examples/exex/minimal/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "minimal" +name = "exex-minimal" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/exex/op-bridge/Cargo.toml b/examples/exex/op-bridge/Cargo.toml index 3d87b2801765b..d8669e9147377 100644 --- a/examples/exex/op-bridge/Cargo.toml +++ b/examples/exex/op-bridge/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "op-bridge" +name = "exex-op-bridge" version = "0.0.0" publish = false edition.workspace = true From 4cef3809e499dfc23a242a3b9b617b36c01ec221 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Wed, 24 Apr 2024 23:58:26 +0200 Subject: [PATCH 315/700] docs: update examples readme (#7852) --- examples/README.md | 72 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 11 deletions(-) diff --git a/examples/README.md b/examples/README.md index 847325f9345eb..791851a46e547 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,17 +1,67 @@ -## Examples of how to use the Reth SDK +# Examples -This directory contains a number of examples showcasing various capabilities of -the `reth-*` crates. +These examples demonstrate the main features of some of Reth's crates and how to use them. -All examples can be executed with: - -``` -cargo run --example $name -``` - -A good starting point for the examples would be [`db-access`](db-access.rs) -and [`rpc-db`](rpc-db). +To run an example, use the command `cargo run -p `. If you've got an example you'd like to see here, please feel free to open an issue. Otherwise if you've got an example you'd like to add, please feel free to make a PR! + +## Node Builder + +| Example | Description | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| [Additional RPC namespace](./additional-rpc-namespace-in-cli) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | +| [Custom event hooks](./cli-extension-event-hooks) | Illustrates how to hook to various node lifecycle events | +| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | +| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | +| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | +| [Custom node](./custom-node) | Illustrates how to create a node with custom engine types | +| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | +| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | + +## ExEx + +| Example | Description | +| ---------------------------------- | --------------------------------------------------------------------------------- | +| [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | +| [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | + +## RPC + +| Example | Description | +| ----------------------- | --------------------------------------------------------------------------- | +| [DB over RPC](./rpc-db) | Illustrates how to run a standalone RPC server over a Rethdatabase instance | + +## Database + +| Example | Description | +| --------------------------- | --------------------------------------------------------------- | +| [DB access](./db-access.rs) | Illustrates how to access Reth's database in a separate process | + +## Network + +| Example | Description | +| ---------------------------------- | ------------------------------------------------------------ | +| [Standalone network](./network.rs) | Illustrates how to use the network as a standalone component | + +## Mempool + +| Example | Description | +| ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | +| [Trace pending transactions](./trace-transaction-cli) | Illustrates how to trace pending transactions as they arrive in the mempool | +| [Standalone txpool](./network-txpool.rs) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | + +## P2P + +| Example | Description | +| --------------------------- | ----------------------------------------------------------------- | +| [Manual P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer | +| [Polygon P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer on Polygon | + +## Misc + +| Example | Description | +| ---------------------------------- | ----------------------------------------------------------- | +| [Beacon API SSE](./beacon-api-sse) | Illustrates how to subscribe to beacon chain events via SSE | From 652be135c795e7a8518078de31f8b51e219f331e Mon Sep 17 00:00:00 2001 From: Mihir Wadekar Date: Thu, 25 Apr 2024 03:00:34 -0700 Subject: [PATCH 316/700] feat: adds panels for execution extension metrics (#7605) --- etc/grafana/dashboards/overview.json | 485 +++++++++++++++++++++++++++ 1 file changed, 485 insertions(+) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index e9b322f1b22c9..40c120e0fbcbf 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -7942,6 +7942,491 @@ ], "title": "RPC Throughput", "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 273 + }, + "id": 214, + "panels": [], + "title": "Execution Extensions", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of canonical state notifications sent to an ExEx.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 282 + }, + "id": 215, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_notifications_sent_total{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Total Notifications Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Notifications Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of events an ExEx has sent to the manager.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 282 + }, + "id": 216, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_events_sent_total{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Total Events Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Events Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Current and Max capacity of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 290 + }, + "id": 218, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_current_capacity{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Current size", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "max_over_time(reth_exex_manager_max_capacity{instance=~\"$instance\"}[1h])", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "C" + } + ], + "title": "Current and Max Capacity", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Current size of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 290 + }, + "id": 219, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_buffer_size{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "B" + } + ], + "title": "Buffer Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Number of ExExs on the node", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "align": "auto", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 298 + }, + "id": 220, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "8.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_num_exexs{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Number of ExExs", + "range": true, + "refId": "A" + } + ], + "title": "Number of ExExs", + "type": "stat" } ], "refresh": "30s", From 9f0874d59f7010cd553f0a150ea7370464e82f9e Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 25 Apr 2024 12:53:36 +0200 Subject: [PATCH 317/700] chore: add `reth-stages-api` to `CODEOWNERS` (#7865) --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index 8efa8da85062f..b7bd14f8e56e4 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -15,6 +15,7 @@ crates/rpc/ @mattsse @Rjected crates/rpc/rpc-types @mattsse @Rjected @Evalir crates/rpc/rpc-types-compat @mattsse @Rjected @Evalir crates/stages/ @onbjerg @rkrasiuk @shekhirin +crates/stages-api/ @onbjerg @rkrasiuk @shekhirin crates/static-file @joshieDo @shekhirin crates/storage/ @rakita @joshieDo @shekhirin crates/tasks @mattsse From d0382fb88d90e8d7cf30eb3a0bc4392e92e62d1f Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 25 Apr 2024 13:01:34 +0200 Subject: [PATCH 318/700] chore: add `trie-parallel` to `CODEOWNERS` (#7866) --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index b7bd14f8e56e4..d3d20e50104d5 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -22,4 +22,5 @@ crates/tasks @mattsse crates/tracing @onbjerg crates/transaction-pool/ @mattsse crates/trie @rkrasiuk +crates/trie-parallel @rkrasiuk .github/ @onbjerg @gakonst @DaniPopes From 90bf4005e0f43d4eb8d64f1cfd4887edef68080a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 25 Apr 2024 13:05:03 +0200 Subject: [PATCH 319/700] chore: merge consensus and revm owners (#7867) --- CODEOWNERS | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index d3d20e50104d5..f6ff05a6e0aac 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,16 +1,14 @@ * @gakonst bin/ @onbjerg crates/blockchain-tree @rakita @rkrasiuk -crates/consensus/auto-seal @mattsse -crates/consensus/beacon @rkrasiuk @mattsse @Rjected +crates/consensus @rkrasiuk @mattsse @Rjected crates/exex @onbjerg @shekhirin crates/metrics @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk crates/payload/ @mattsse @Rjected crates/prune @shekhirin @joshieDo -crates/revm/src/ @rakita -crates/revm/ @mattsse +crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected crates/rpc/rpc-types @mattsse @Rjected @Evalir crates/rpc/rpc-types-compat @mattsse @Rjected @Evalir From 132f7fbd80ba74da50734bd43b2ea9ab3f151155 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 25 Apr 2024 13:12:12 +0200 Subject: [PATCH 320/700] chore: remove `evalir` from `CODEOWNERS` (#7868) --- CODEOWNERS | 2 -- 1 file changed, 2 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index f6ff05a6e0aac..155d8581d79f6 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -10,8 +10,6 @@ crates/payload/ @mattsse @Rjected crates/prune @shekhirin @joshieDo crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected -crates/rpc/rpc-types @mattsse @Rjected @Evalir -crates/rpc/rpc-types-compat @mattsse @Rjected @Evalir crates/stages/ @onbjerg @rkrasiuk @shekhirin crates/stages-api/ @onbjerg @rkrasiuk @shekhirin crates/static-file @joshieDo @shekhirin From 0f7e3541b17e3ca70e33146d9b05b8f024ce6497 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 25 Apr 2024 13:36:53 +0200 Subject: [PATCH 321/700] chore: add `cli/*` to `CODEOWNERS` (#7870) --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index 155d8581d79f6..3ea162bf6ae75 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,6 +1,7 @@ * @gakonst bin/ @onbjerg crates/blockchain-tree @rakita @rkrasiuk +crates/cli/ @onbjerg @mattsse crates/consensus @rkrasiuk @mattsse @Rjected crates/exex @onbjerg @shekhirin crates/metrics @onbjerg From c7008deef8d7a86706673c25255cab795e6e3108 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 25 Apr 2024 13:47:27 +0200 Subject: [PATCH 322/700] fix(cli): set start header (#7725) Co-authored-by: Roman Krasiuk --- Cargo.lock | 8 ++++---- Makefile | 2 +- bin/reth/src/commands/import.rs | 20 ++++++++++++++++---- crates/net/downloaders/src/file_client.rs | 6 ------ 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 291638c49f82d..90dbf1fa3555a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8934,18 +8934,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", diff --git a/Makefile b/Makefile index 187de174d0746..82994b3c282c3 100644 --- a/Makefile +++ b/Makefile @@ -316,7 +316,7 @@ lint: make fmt && \ make lint-reth && \ make lint-op-reth && \ - make lint-other-targets \ + make lint-other-targets && \ make lint-codespell fix-lint-reth: diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 4542f10be28a2..0136e0e5e67f0 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -29,7 +29,10 @@ use reth_node_core::init::init_genesis; use reth_node_ethereum::EthEvmConfig; use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; -use reth_provider::{HeaderSyncMode, ProviderFactory, StageCheckpointReader}; +use reth_provider::{ + BlockNumReader, HeaderProvider, HeaderSyncMode, ProviderError, ProviderFactory, + StageCheckpointReader, +}; use reth_stages::{ prelude::*, stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage}, @@ -158,8 +161,7 @@ impl ImportCommand { "Importing chain file chunk" ); - // override the tip - let tip = file_client.tip().expect("file client has no tip"); + let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; info!(target: "reth::cli", "Chain file chunk read"); let (mut pipeline, events) = self @@ -221,15 +223,25 @@ impl ImportCommand { eyre::bail!("unable to import non canonical blocks"); } + // Retrieve latest header found in the database. + let last_block_number = provider_factory.last_block_number()?; + let local_head = provider_factory + .sealed_header(last_block_number)? + .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) .build(file_client.clone(), consensus.clone()) .into_task(); - header_downloader.update_local_head(file_client.start_header().unwrap()); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + header_downloader.update_local_head(local_head); header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) .build(file_client.clone(), consensus.clone(), provider_factory.clone()) .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. body_downloader .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) .expect("failed to set download range"); diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index b5b7aceae5c75..ce8f3898bc8fd 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -179,12 +179,6 @@ impl FileClient { self.headers.get(&self.max_block()?).map(|h| h.clone().seal_slow()) } - /// Clones and returns the lowest header of this client has or `None` if empty. Seals header - /// before returning. - pub fn start_header(&self) -> Option { - self.headers.get(&self.min_block()?).map(|h| h.clone().seal_slow()) - } - /// Returns true if all blocks are canonical (no gaps) pub fn has_canonical_blocks(&self) -> bool { if self.headers.is_empty() { From f6e68e28eb0bea3d30b6722b2a8eb9b52d34c95e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 25 Apr 2024 14:23:51 +0200 Subject: [PATCH 323/700] feat(op): timestamp below bedrock (#7768) --- Cargo.lock | 1 - crates/primitives/Cargo.toml | 1 - crates/primitives/src/header.rs | 25 +++++++++++++++++-------- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 90dbf1fa3555a..1695adafd3c96 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7156,7 +7156,6 @@ dependencies = [ "byteorder", "bytes", "c-kzg", - "cfg-if", "clap", "criterion", "derive_more", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 3e08655dbb852..e3828c913b6be 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -51,7 +51,6 @@ tempfile = { workspace = true, optional = true } thiserror.workspace = true zstd = { version = "0.13", features = ["experimental"], optional = true } roaring = "0.10.2" -cfg-if = "1.0.0" # `test-utils` feature hash-db = { version = "~0.15", optional = true } diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 899fcb368d54e..a06be26256ba1 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -776,6 +776,17 @@ impl SealedHeader { } // timestamp in past check + #[cfg(feature = "optimism")] + if chain_spec.is_bedrock_active_at_block(self.header.number) && + self.header.is_timestamp_in_past(parent.timestamp) + { + return Err(HeaderValidationError::TimestampIsInPast { + parent_timestamp: parent.timestamp, + timestamp: self.timestamp, + }) + } + + #[cfg(not(feature = "optimism"))] if self.header.is_timestamp_in_past(parent.timestamp) { return Err(HeaderValidationError::TimestampIsInPast { parent_timestamp: parent.timestamp, @@ -786,16 +797,14 @@ impl SealedHeader { // TODO Check difficulty increment between parent and self // Ace age did increment it by some formula that we need to follow. - cfg_if::cfg_if! { - if #[cfg(feature = "optimism")] { - // On Optimism, the gas limit can adjust instantly, so we skip this check - // if the optimism feature is enabled in the chain spec. - if !chain_spec.is_optimism() { - self.validate_gas_limit(parent, chain_spec)?; - } - } else { + if cfg!(feature = "optimism") { + // On Optimism, the gas limit can adjust instantly, so we skip this check + // if the optimism feature is enabled in the chain spec. + if !chain_spec.is_optimism() { self.validate_gas_limit(parent, chain_spec)?; } + } else { + self.validate_gas_limit(parent, chain_spec)?; } // EIP-1559 check base fee From 33e7e0208f25ef8d171a2a42adbceff73197bfd0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 14:25:54 +0200 Subject: [PATCH 324/700] fix: derank peers that responded with bad data (#7854) Co-authored-by: Oliver Nordbjerg --- crates/net/network/src/fetch/mod.rs | 76 +++++++++++++++++++++-------- 1 file changed, 57 insertions(+), 19 deletions(-) diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 19c605fb9a54e..1f85f242da188 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -77,8 +77,16 @@ impl StateFetcher { best_number: u64, timeout: Arc, ) { - self.peers - .insert(peer_id, Peer { state: PeerState::Idle, best_hash, best_number, timeout }); + self.peers.insert( + peer_id, + Peer { + state: PeerState::Idle, + best_hash, + best_number, + timeout, + last_response_likely_bad: false, + }, + ); } /// Removes the peer from the peer list, after which it is no longer available for future @@ -119,14 +127,29 @@ impl StateFetcher { } /// Returns the _next_ idle peer that's ready to accept a request, - /// prioritizing those with the lowest timeout/latency. - /// Once a peer has been yielded, it will be moved to the end of the map - fn next_peer(&mut self) -> Option { - self.peers - .iter() - .filter(|(_, peer)| peer.state.is_idle()) - .min_by_key(|(_, peer)| peer.timeout()) - .map(|(id, _)| *id) + /// prioritizing those with the lowest timeout/latency and those that recently responded with + /// adequate data. + fn next_best_peer(&mut self) -> Option { + let mut idle = self.peers.iter().filter(|(_, peer)| peer.state.is_idle()); + + let mut best_peer = idle.next()?; + + for maybe_better in idle { + // replace best peer if our current best peer sent us a bad response last time + if best_peer.1.last_response_likely_bad && !maybe_better.1.last_response_likely_bad { + best_peer = maybe_better; + continue + } + + // replace best peer if this peer has better rtt + if maybe_better.1.timeout() < best_peer.1.timeout() && + !maybe_better.1.last_response_likely_bad + { + best_peer = maybe_better; + } + } + + Some(*best_peer.0) } /// Returns the next action to return @@ -136,7 +159,7 @@ impl StateFetcher { return PollAction::NoRequests } - let Some(peer_id) = self.next_peer() else { return PollAction::NoPeersAvailable }; + let Some(peer_id) = self.next_best_peer() else { return PollAction::NoPeersAvailable }; let request = self.queued_requests.pop_front().expect("not empty"); let request = self.prepare_block_request(peer_id, request); @@ -249,6 +272,9 @@ impl StateFetcher { } if let Some(peer) = self.peers.get_mut(&peer_id) { + // update the peer's response state + peer.last_response_likely_bad = is_likely_bad_response; + // If the peer is still ready to accept new requests, we try to send a followup // request immediately. if peer.state.on_request_finished() && !is_error && !is_likely_bad_response { @@ -268,11 +294,16 @@ impl StateFetcher { peer_id: PeerId, res: RequestResult>, ) -> Option { + let is_likely_bad_response = res.as_ref().map_or(true, |bodies| bodies.is_empty()); + if let Some(resp) = self.inflight_bodies_requests.remove(&peer_id) { let _ = resp.response.send(res.map(|b| (peer_id, b).into())); } if let Some(peer) = self.peers.get_mut(&peer_id) { - if peer.state.on_request_finished() { + // update the peer's response state + peer.last_response_likely_bad = is_likely_bad_response; + + if peer.state.on_request_finished() && !is_likely_bad_response { return self.followup_request(peer_id) } } @@ -307,6 +338,13 @@ struct Peer { best_number: u64, /// Tracks the current timeout value we use for the peer. timeout: Arc, + /// Tracks whether the peer has recently responded with a likely bad response. + /// + /// This is used to de-rank the peer if there are other peers available. + /// This exists because empty responses may not be penalized (e.g. when blocks near the tip are + /// downloaded), but we still want to avoid requesting from the same peer again if it has the + /// lowest timeout. + last_response_likely_bad: bool, } impl Peer { @@ -462,17 +500,17 @@ mod tests { fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(1))); fetcher.new_active_peer(peer2, B256::random(), 2, Arc::new(AtomicU64::new(1))); - let first_peer = fetcher.next_peer().unwrap(); + let first_peer = fetcher.next_best_peer().unwrap(); assert!(first_peer == peer1 || first_peer == peer2); // Pending disconnect for first_peer fetcher.on_pending_disconnect(&first_peer); // first_peer now isn't idle, so we should get other peer - let second_peer = fetcher.next_peer().unwrap(); + let second_peer = fetcher.next_best_peer().unwrap(); assert!(first_peer == peer1 || first_peer == peer2); assert_ne!(first_peer, second_peer); // without idle peers, returns None fetcher.on_pending_disconnect(&second_peer); - assert_eq!(fetcher.next_peer(), None); + assert_eq!(fetcher.next_best_peer(), None); } #[tokio::test] @@ -491,13 +529,13 @@ mod tests { fetcher.new_active_peer(peer3, B256::random(), 3, Arc::new(AtomicU64::new(50))); // Must always get peer1 (lowest timeout) - assert_eq!(fetcher.next_peer(), Some(peer1)); - assert_eq!(fetcher.next_peer(), Some(peer1)); + assert_eq!(fetcher.next_best_peer(), Some(peer1)); + assert_eq!(fetcher.next_best_peer(), Some(peer1)); // peer2's timeout changes below peer1's peer2_timeout.store(10, Ordering::Relaxed); // Then we get peer 2 always (now lowest) - assert_eq!(fetcher.next_peer(), Some(peer2)); - assert_eq!(fetcher.next_peer(), Some(peer2)); + assert_eq!(fetcher.next_best_peer(), Some(peer2)); + assert_eq!(fetcher.next_best_peer(), Some(peer2)); } #[tokio::test] From 08cdd67cb00776047d6260ff1ba48260a71940b4 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 25 Apr 2024 19:28:17 +0700 Subject: [PATCH 325/700] fix(auto-seal): build dev blocks with withdrawals (#7857) --- crates/consensus/auto-seal/src/lib.rs | 32 ++++++++++++++++++-------- crates/consensus/auto-seal/src/task.rs | 12 +++++++--- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index f6de63979d3ad..c09dcbcc816e1 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -24,8 +24,8 @@ use reth_primitives::{ constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, Bloom, - ChainSpec, Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, B256, - EMPTY_OMMER_ROOT_HASH, U256, + ChainSpec, Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, + B256, U256, }; use reth_provider::{ BlockExecutor, BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, @@ -270,6 +270,8 @@ impl StorageInner { pub(crate) fn build_header_template( &self, transactions: &[TransactionSigned], + ommers: &[Header], + withdrawals: Option<&Withdrawals>, chain_spec: Arc, ) -> Header { let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); @@ -281,12 +283,12 @@ impl StorageInner { let mut header = Header { parent_hash: self.best_hash, - ommers_hash: EMPTY_OMMER_ROOT_HASH, + ommers_hash: proofs::calculate_ommers_root(ommers), beneficiary: Default::default(), state_root: Default::default(), transactions_root: Default::default(), receipts_root: Default::default(), - withdrawals_root: None, + withdrawals_root: withdrawals.map(|w| proofs::calculate_withdrawals_root(w)), logs_bloom: Default::default(), difficulty: U256::from(2), number: self.best_block + 1, @@ -420,6 +422,8 @@ impl StorageInner { pub(crate) fn build_and_execute( &mut self, transactions: Vec, + ommers: Vec
, + withdrawals: Option, client: &impl StateProviderFactory, chain_spec: Arc, evm_config: EvmConfig, @@ -427,11 +431,21 @@ impl StorageInner { where EvmConfig: ConfigureEvm, { - let header = self.build_header_template(&transactions, chain_spec.clone()); + let header = self.build_header_template( + &transactions, + &ommers, + withdrawals.as_ref(), + chain_spec.clone(), + ); - let block = Block { header, body: transactions, ommers: vec![], withdrawals: None } - .with_recovered_senders() - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; + let block = Block { + header, + body: transactions, + ommers: ommers.clone(), + withdrawals: withdrawals.clone(), + } + .with_recovered_senders() + .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); @@ -447,7 +461,7 @@ impl StorageInner { let (bundle_state, gas_used) = self.execute(&block, &mut executor)?; let Block { header, body, .. } = block.block; - let body = BlockBody { transactions: body, ommers: vec![], withdrawals: None }; + let body = BlockBody { transactions: body, ommers, withdrawals }; let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { let mut sum_blob_gas_used = 0; diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 53bfc6356c3ec..6009cd810a06d 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -3,7 +3,9 @@ use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; -use reth_primitives::{Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders}; +use reth_primitives::{ + Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders, Withdrawals, +}; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; use reth_rpc_types::engine::ForkchoiceState; use reth_stages_api::PipelineEvent; @@ -134,9 +136,13 @@ where (recovered.into_signed(), signer) }) .unzip(); + let ommers = vec![]; + let withdrawals = Some(Withdrawals::default()); match storage.build_and_execute( transactions.clone(), + ommers.clone(), + withdrawals.clone(), &client, chain_spec, evm_config, @@ -193,8 +199,8 @@ where let block = Block { header: new_header.clone().unseal(), body: transactions, - ommers: vec![], - withdrawals: None, + ommers, + withdrawals, }; let sealed_block = block.seal_slow(); From 1c17f08ad209a1ae120436461ceb2f7f5391811c Mon Sep 17 00:00:00 2001 From: Rodrigo Herrera Date: Thu, 25 Apr 2024 06:50:04 -0600 Subject: [PATCH 326/700] Replace TransactionKind with alloy_primitives::TxKind (#7859) --- Cargo.lock | 4 +- Cargo.toml | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 4 +- crates/consensus/common/src/validation.rs | 8 +- .../interfaces/src/test_utils/generators.rs | 9 +- crates/net/eth-wire-types/src/blocks.rs | 12 +- crates/net/eth-wire-types/src/transactions.rs | 60 ++----- crates/net/network/tests/it/requests.rs | 6 +- crates/optimism/evm/src/execute.rs | 10 +- crates/optimism/node/src/txpool.rs | 6 +- crates/primitives/src/lib.rs | 4 +- crates/primitives/src/revm/compat.rs | 4 +- crates/primitives/src/revm/env.rs | 24 +-- crates/primitives/src/transaction/eip1559.rs | 8 +- crates/primitives/src/transaction/eip2930.rs | 10 +- crates/primitives/src/transaction/eip4844.rs | 6 +- crates/primitives/src/transaction/legacy.rs | 8 +- crates/primitives/src/transaction/mod.rs | 158 +++--------------- crates/primitives/src/transaction/optimism.rs | 10 +- crates/revm/src/optimism/processor.rs | 10 +- crates/revm/src/processor.rs | 4 +- .../rpc-types-compat/src/transaction/mod.rs | 4 +- .../rpc-types-compat/src/transaction/typed.rs | 18 +- crates/rpc/rpc/src/eth/api/transactions.rs | 7 +- .../codecs/derive/src/compact/generator.rs | 7 +- .../storage/codecs/derive/src/compact/mod.rs | 2 +- crates/transaction-pool/src/test_utils/gen.rs | 10 +- .../transaction-pool/src/test_utils/mock.rs | 22 +-- crates/transaction-pool/src/traits.rs | 22 +-- 29 files changed, 162 insertions(+), 297 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1695adafd3c96..6b2e0d01727b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -262,9 +262,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99bbad0a6b588ef4aec1b5ddbbfdacd9ef04e00b979617765b03174318ee1f3a" +checksum = "50c715249705afa1e32be79dabfd35e2ef0f1cc02ad2cf48c9d1e20026ee637b" dependencies = [ "alloy-rlp", "arbitrary", diff --git a/Cargo.toml b/Cargo.toml index 7ef645f39e02b..73597b311a46d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = # eth alloy-chains = "0.1.15" -alloy-primitives = "0.7.0" +alloy-primitives = "0.7.1" alloy-dyn-abi = "0.7.0" alloy-sol-types = "0.7.0" alloy-rlp = "0.3.4" diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 08f588cd2d595..b1688fda91fd1 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1273,7 +1273,7 @@ mod tests { revm_primitives::AccountInfo, stage::StageCheckpoint, Account, Address, ChainSpecBuilder, Genesis, GenesisAccount, Header, Signature, - Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxKind, Withdrawals, B256, MAINNET, }; use reth_provider::{ @@ -1453,7 +1453,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce, gas_limit: 21_000, - to: TransactionKind::Call(Address::ZERO), + to: TxKind::Call(Address::ZERO), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() }), diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 94906188206ff..3ed01f63753e1 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -331,8 +331,8 @@ mod tests { }; use reth_primitives::{ hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, Bytes, - ChainSpecBuilder, Signature, TransactionKind, TransactionSigned, Withdrawal, Withdrawals, - MAINNET, U256, + ChainSpecBuilder, Signature, TransactionSigned, TxKind, Withdrawal, Withdrawals, MAINNET, + U256, }; use std::ops::RangeBounds; @@ -448,7 +448,7 @@ mod tests { nonce, gas_price: 0x28f000fff, gas_limit: 10, - to: TransactionKind::Call(Address::default()), + to: TxKind::Call(Address::default()), value: U256::from(3_u64), input: Bytes::from(vec![1, 2]), access_list: Default::default(), @@ -470,7 +470,7 @@ mod tests { max_priority_fee_per_gas: 0x28f000fff, max_fee_per_blob_gas: 0x7, gas_limit: 10, - to: TransactionKind::Call(Address::default()), + to: TxKind::Call(Address::default()), value: U256::from(3_u64), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index e601d96291055..0f1930b6005fe 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -4,8 +4,7 @@ use rand::{ }; use reth_primitives::{ proofs, sign_message, Account, Address, BlockNumber, Bytes, Header, Log, Receipt, SealedBlock, - SealedHeader, StorageEntry, Transaction, TransactionKind, TransactionSigned, TxLegacy, B256, - U256, + SealedHeader, StorageEntry, Transaction, TransactionSigned, TxKind, TxLegacy, B256, U256, }; use secp256k1::{KeyPair, Secp256k1}; use std::{ @@ -79,7 +78,7 @@ pub fn random_tx(rng: &mut R) -> Transaction { nonce: rng.gen::().into(), gas_price: rng.gen::().into(), gas_limit: rng.gen::().into(), - to: TransactionKind::Call(rng.gen()), + to: TxKind::Call(rng.gen()), value: U256::from(rng.gen::()), input: Bytes::default(), }) @@ -395,7 +394,7 @@ mod tests { chain_id: 1, nonce: 0x42, gas_limit: 44386, - to: TransactionKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), + to: TxKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), value: U256::from(0_u64), input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), max_fee_per_gas: 0x4a817c800, @@ -427,7 +426,7 @@ mod tests { nonce: 9, gas_price: 20 * 10_u128.pow(9), gas_limit: 21000, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(10_u128.pow(18)), input: Bytes::default(), }); diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index fa6365c206230..36b8e6e8ca9eb 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -131,8 +131,8 @@ mod tests { use crate::{message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{ - hex, BlockHashOrNumber, Header, HeadersDirection, Signature, Transaction, TransactionKind, - TransactionSigned, TxLegacy, U256, + hex, BlockHashOrNumber, Header, HeadersDirection, Signature, Transaction, + TransactionSigned, TxKind, TxLegacy, U256, }; use std::str::FromStr; @@ -383,7 +383,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -398,7 +398,7 @@ mod tests { nonce: 0x9u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), Signature { @@ -455,7 +455,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -471,7 +471,7 @@ mod tests { nonce: 0x9u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 5d48211be5bd6..f19bbdcc74431 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -80,8 +80,8 @@ mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{ - hex, PooledTransactionsElement, Signature, Transaction, TransactionKind, TransactionSigned, - TxEip1559, TxLegacy, U256, + hex, PooledTransactionsElement, Signature, Transaction, TransactionSigned, TxEip1559, + TxKind, TxLegacy, U256, }; use std::str::FromStr; @@ -130,9 +130,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -154,9 +152,7 @@ mod tests { nonce: 0x09u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), @@ -200,9 +196,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -224,9 +218,7 @@ mod tests { nonce: 0x09u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), @@ -271,9 +263,7 @@ mod tests { nonce: 15u64, gas_price: 2200000000, gas_limit: 34811u64, - to: TransactionKind::Call( - hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into(), - ), + to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()), value: U256::from(1234u64), input: Default::default(), }), @@ -296,9 +286,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000u64, - to: TransactionKind::Call( - hex!("61815774383099e24810ab832a5b2a5425c154d5").into(), - ), + to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()), value: U256::from(3000000000000000000u64), input: Default::default(), access_list: Default::default(), @@ -321,9 +309,7 @@ mod tests { nonce: 3u64, gas_price: 2000000000, gas_limit: 10000000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), @@ -345,9 +331,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(693361000000000u64), input: Default::default(), }), @@ -369,9 +353,7 @@ mod tests { nonce: 2u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), @@ -420,9 +402,7 @@ mod tests { nonce: 15u64, gas_price: 2200000000, gas_limit: 34811u64, - to: TransactionKind::Call( - hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into(), - ), + to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()), value: U256::from(1234u64), input: Default::default(), }), @@ -445,9 +425,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000u64, - to: TransactionKind::Call( - hex!("61815774383099e24810ab832a5b2a5425c154d5").into(), - ), + to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()), value: U256::from(3000000000000000000u64), input: Default::default(), access_list: Default::default(), @@ -470,9 +448,7 @@ mod tests { nonce: 3u64, gas_price: 2000000000, gas_limit: 10000000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), @@ -494,9 +470,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(693361000000000u64), input: Default::default(), }), @@ -518,9 +492,7 @@ mod tests { nonce: 2u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index decc9ee2507e6..4e36f191c81ed 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -12,8 +12,8 @@ use reth_network::{ }; use reth_network_api::{NetworkInfo, Peers}; use reth_primitives::{ - Block, BlockBody, Bytes, Header, HeadersDirection, Signature, Transaction, TransactionKind, - TransactionSigned, TxEip2930, U256, + Block, BlockBody, Bytes, Header, HeadersDirection, Signature, Transaction, TransactionSigned, + TxEip2930, TxKind, U256, }; use reth_provider::test_utils::MockEthProvider; use std::sync::Arc; @@ -25,7 +25,7 @@ pub fn rng_transaction(rng: &mut impl rand::RngCore) -> TransactionSigned { nonce: rng.gen(), gas_price: rng.gen(), gas_limit: rng.gen(), - to: TransactionKind::Create, + to: TxKind::Create, value: U256::from(rng.gen::()), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index ef87cce1dfd46..5b7d797da9395 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -539,7 +539,7 @@ mod tests { use super::*; use reth_primitives::{ b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, - Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, + Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, }; use reth_revm::{database::StateProviderDatabase, L1_BLOCK_CONTRACT}; use std::{collections::HashMap, str::FromStr}; @@ -610,7 +610,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), ..Default::default() }), Signature::default(), @@ -619,7 +619,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), gas_limit: 21_000, ..Default::default() }), @@ -690,7 +690,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), ..Default::default() }), Signature::default(), @@ -699,7 +699,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), gas_limit: 21_000, ..Default::default() }), diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 73097ce27d064..7ee1bb9ece3e2 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -202,8 +202,8 @@ pub struct OpL1BlockInfo { mod tests { use crate::txpool::OpTransactionValidator; use reth_primitives::{ - Signature, Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, - TxDeposit, MAINNET, U256, + Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxDeposit, TxKind, + MAINNET, U256, }; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ @@ -225,7 +225,7 @@ mod tests { let deposit_tx = Transaction::Deposit(TxDeposit { source_hash: Default::default(), from: signer, - to: TransactionKind::Create, + to: TxKind::Create, mint: None, value: U256::ZERO, gas_limit: 0u64, diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 167a645451397..9e77b4c15dbc4 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -97,7 +97,7 @@ pub use transaction::{ pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, AccessList, AccessListItem, IntoRecoveredTransaction, InvalidTransactionError, Signature, - Transaction, TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, + Transaction, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHashOrNumber, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, @@ -113,7 +113,7 @@ pub use alloy_primitives::{ eip191_hash_message, hex, hex_literal, keccak256, ruint, utils::format_ether, Address, BlockHash, BlockNumber, Bloom, BloomInput, Bytes, ChainId, Selector, StorageKey, - StorageValue, TxHash, TxIndex, TxNumber, B128, B256, B512, B64, U128, U256, U64, U8, + StorageValue, TxHash, TxIndex, TxKind, TxNumber, B128, B256, B512, B64, U128, U256, U64, U8, }; pub use reth_ethereum_forks::*; pub use revm_primitives::{self, JumpMap}; diff --git a/crates/primitives/src/revm/compat.rs b/crates/primitives/src/revm/compat.rs index 6c9474f7cf283..9727708823f7b 100644 --- a/crates/primitives/src/revm/compat.rs +++ b/crates/primitives/src/revm/compat.rs @@ -1,4 +1,4 @@ -use crate::{revm_primitives::AccountInfo, Account, Address, TransactionKind, KECCAK_EMPTY, U256}; +use crate::{revm_primitives::AccountInfo, Account, Address, TxKind, KECCAK_EMPTY, U256}; use revm::{ interpreter::gas::validate_initial_tx_gas, primitives::{MergeSpec, ShanghaiSpec}, @@ -34,7 +34,7 @@ pub fn into_revm_acc(reth_acc: Account) -> AccountInfo { #[inline] pub fn calculate_intrinsic_gas_after_merge( input: &[u8], - kind: &TransactionKind, + kind: &TxKind, access_list: &[(Address, Vec)], is_shanghai: bool, ) -> u64 { diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index edfc07f80a11a..b13a7018f7ce8 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -2,8 +2,8 @@ use crate::{ constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, recover_signer_unchecked, revm_primitives::{BlockEnv, Env, TransactTo, TxEnv}, - Address, Bytes, Chain, ChainSpec, Header, Transaction, TransactionKind, - TransactionSignedEcRecovered, B256, U256, + Address, Bytes, Chain, ChainSpec, Header, Transaction, TransactionSignedEcRecovered, TxKind, + B256, U256, }; #[cfg(feature = "optimism")] @@ -208,8 +208,8 @@ where tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -224,8 +224,8 @@ where tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -247,8 +247,8 @@ where tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -270,8 +270,8 @@ where tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -295,8 +295,8 @@ where tx_env.gas_price = U256::ZERO; tx_env.gas_priority_fee = None; match tx.to { - TransactionKind::Call(to) => tx_env.transact_to = TransactTo::Call(to), - TransactionKind::Create => tx_env.transact_to = TransactTo::create(), + TxKind::Call(to) => tx_env.transact_to = TransactTo::Call(to), + TxKind::Create => tx_env.transact_to = TransactTo::create(), } tx_env.value = tx.value; tx_env.data = tx.input.clone(); diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 229da9983c247..68da7d8d927df 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -1,5 +1,5 @@ use super::access_list::AccessList; -use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, B256, U256}; +use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; @@ -41,7 +41,7 @@ pub struct TxEip1559 { pub max_priority_fee_per_gas: u128, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -226,7 +226,7 @@ impl TxEip1559 { mod tests { use super::TxEip1559; use crate::{ - transaction::{signature::Signature, TransactionKind}, + transaction::{signature::Signature, TxKind}, AccessList, Address, Transaction, TransactionSigned, B256, U256, }; use std::str::FromStr; @@ -243,7 +243,7 @@ mod tests { chain_id: 1, nonce: 0x42, gas_limit: 44386, - to: TransactionKind::Call( hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), + to: TxKind::Call( hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), value: U256::ZERO, input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), max_fee_per_gas: 0x4a817c800, diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index fde594d7b3e91..86794a5126df0 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -1,5 +1,5 @@ use super::access_list::AccessList; -use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, B256, U256}; +use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; @@ -29,7 +29,7 @@ pub struct TxEip2930 { pub gas_limit: u64, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -189,7 +189,7 @@ impl TxEip2930 { mod tests { use super::TxEip2930; use crate::{ - transaction::{signature::Signature, TransactionKind}, + transaction::{signature::Signature, TxKind}, Address, Bytes, Transaction, TransactionSigned, U256, }; use alloy_rlp::{Decodable, Encodable}; @@ -202,7 +202,7 @@ mod tests { nonce: 0, gas_price: 1, gas_limit: 2, - to: TransactionKind::Create, + to: TxKind::Create, value: U256::from(3), input: Bytes::from(vec![1, 2]), access_list: Default::default(), @@ -225,7 +225,7 @@ mod tests { nonce: 0, gas_price: 1, gas_limit: 2, - to: TransactionKind::Call(Address::default()), + to: TxKind::Call(Address::default()), value: U256::from(3), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index a24a87b112a8d..0a3790abeca2b 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -1,7 +1,7 @@ use super::access_list::AccessList; use crate::{ - constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Bytes, ChainId, Signature, TransactionKind, - TxType, B256, U256, + constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Bytes, ChainId, Signature, TxKind, TxType, + B256, U256, }; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use reth_codecs::{main_codec, Compact}; @@ -54,7 +54,7 @@ pub struct TxEip4844 { pub max_priority_fee_per_gas: u128, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index eba89f93dcbe7..f2440e13cd48c 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -1,4 +1,4 @@ -use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, B256, U256}; +use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Encodable, Header}; use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; @@ -28,7 +28,7 @@ pub struct TxLegacy { pub gas_limit: u64, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -173,7 +173,7 @@ impl TxLegacy { mod tests { use super::TxLegacy; use crate::{ - transaction::{signature::Signature, TransactionKind}, + transaction::{signature::Signature, TxKind}, Address, Transaction, TransactionSigned, B256, U256, }; @@ -190,7 +190,7 @@ mod tests { nonce: 0x18, gas_price: 0xfa56ea00, gas_limit: 119902, - to: TransactionKind::Call( hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), + to: TxKind::Call( hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), value: U256::from(0x1c6bf526340000u64), input: hex!("f7d8c88300000000000000000000000000000000000000000000000000000000000cee6100000000000000000000000000000000000000000000000000000000000ac3e1").into(), }); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c2df133053b06..dae6ab0768405 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,6 +1,6 @@ #[cfg(any(feature = "arbitrary", feature = "zstd-codec"))] use crate::compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}; -use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, B256, U256}; +use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, TxKind, B256, U256}; use alloy_eips::eip2718::Eip2718Error; use alloy_rlp::{ @@ -176,9 +176,9 @@ impl Transaction { } } - /// Gets the transaction's [`TransactionKind`], which is the address of the recipient or - /// [`TransactionKind::Create`] if the transaction is a contract creation. - pub fn kind(&self) -> &TransactionKind { + /// Gets the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + pub fn kind(&self) -> &TxKind { match self { Transaction::Legacy(TxLegacy { to, .. }) | Transaction::Eip2930(TxEip2930 { to, .. }) | @@ -194,7 +194,7 @@ impl Transaction { /// /// Returns `None` if this is a `CREATE` transaction. pub fn to(&self) -> Option
{ - self.kind().to() + self.kind().to().copied() } /// Get the transaction's type @@ -641,7 +641,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, input: tx.input, })) @@ -655,7 +655,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, input: tx.input, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, @@ -677,7 +677,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, input: tx.input, @@ -698,7 +698,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, input: tx.input, @@ -829,118 +829,6 @@ impl Encodable for Transaction { } } -/// Whether or not the transaction is a contract creation. -#[derive_arbitrary(compact, rlp)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] -pub enum TransactionKind { - /// A transaction that creates a contract. - #[default] - Create, - /// A transaction that calls a contract or transfer. - Call(Address), -} - -impl TransactionKind { - /// Returns the address of the contract that will be called or will receive the transfer. - pub fn to(self) -> Option
{ - match self { - TransactionKind::Create => None, - TransactionKind::Call(to) => Some(to), - } - } - - /// Returns true if the transaction is a contract creation. - #[inline] - pub fn is_create(self) -> bool { - matches!(self, TransactionKind::Create) - } - - /// Returns true if the transaction is a contract call. - #[inline] - pub fn is_call(self) -> bool { - matches!(self, TransactionKind::Call(_)) - } - - /// Calculates a heuristic for the in-memory size of the [TransactionKind]. - #[inline] - fn size(self) -> usize { - mem::size_of::() - } -} - -impl From for TransactionKind { - fn from(kind: reth_rpc_types::TransactionKind) -> Self { - match kind { - reth_rpc_types::TransactionKind::Call(to) => Self::Call(to), - reth_rpc_types::TransactionKind::Create => Self::Create, - } - } -} - -impl Compact for TransactionKind { - fn to_compact(self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - match self { - TransactionKind::Create => 0, - TransactionKind::Call(address) => { - address.to_compact(buf); - 1 - } - } - } - - fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) { - match identifier { - 0 => (TransactionKind::Create, buf), - 1 => { - let (addr, buf) = Address::from_compact(buf, buf.len()); - (TransactionKind::Call(addr), buf) - } - _ => unreachable!("Junk data in database: unknown TransactionKind variant"), - } - } -} - -impl Encodable for TransactionKind { - /// This encodes the `to` field of a transaction request. - /// If the [TransactionKind] is a [TransactionKind::Call] it will encode the inner address: - /// `rlp(address)` - /// - /// If the [TransactionKind] is a [TransactionKind::Create] it will encode an empty list: - /// `rlp([])`, which is also - fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { - match self { - TransactionKind::Call(to) => to.encode(out), - TransactionKind::Create => out.put_u8(EMPTY_STRING_CODE), - } - } - - fn length(&self) -> usize { - match self { - TransactionKind::Call(to) => to.length(), - TransactionKind::Create => 1, // EMPTY_STRING_CODE is a single byte - } - } -} - -impl Decodable for TransactionKind { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - if let Some(&first) = buf.first() { - if first == EMPTY_STRING_CODE { - buf.advance(1); - Ok(TransactionKind::Create) - } else { - let addr =
::decode(buf)?; - Ok(TransactionKind::Call(addr)) - } - } else { - Err(RlpError::InputTooShort) - } - } -} - /// Signed transaction without its Hash. Used type for inserting into the DB. /// /// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. @@ -1856,10 +1744,10 @@ mod tests { use crate::{ hex, sign_message, transaction::{ - from_compact_zstd_unaware, signature::Signature, to_compact_ztd_unaware, - TransactionKind, TxEip1559, TxLegacy, MIN_LENGTH_EIP1559_TX_ENCODED, - MIN_LENGTH_EIP2930_TX_ENCODED, MIN_LENGTH_EIP4844_TX_ENCODED, - MIN_LENGTH_LEGACY_TX_ENCODED, PARALLEL_SENDER_RECOVERY_THRESHOLD, + from_compact_zstd_unaware, signature::Signature, to_compact_ztd_unaware, TxEip1559, + TxKind, TxLegacy, MIN_LENGTH_EIP1559_TX_ENCODED, MIN_LENGTH_EIP2930_TX_ENCODED, + MIN_LENGTH_EIP4844_TX_ENCODED, MIN_LENGTH_LEGACY_TX_ENCODED, + PARALLEL_SENDER_RECOVERY_THRESHOLD, }, Address, Bytes, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip2930, TxEip4844, B256, U256, @@ -1881,13 +1769,13 @@ mod tests { fn raw_kind_encoding_sanity() { // check the 0x80 encoding for Create let mut buf = Vec::new(); - TransactionKind::Create.encode(&mut buf); + TxKind::Create.encode(&mut buf); assert_eq!(buf, vec![0x80]); // check decoding let buf = [0x80]; - let decoded = TransactionKind::decode(&mut &buf[..]).unwrap(); - assert_eq!(decoded, TransactionKind::Create); + let decoded = TxKind::decode(&mut &buf[..]).unwrap(); + assert_eq!(decoded, TxKind::Create); } #[test] @@ -1963,7 +1851,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TransactionKind::Call( + to: TxKind::Call( Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), ), value: U256::from(1000000000000000u64), @@ -1985,7 +1873,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call(Address::from_slice( + to: TxKind::Call(Address::from_slice( &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], )), value: U256::from(693361000000000u64), @@ -2006,7 +1894,7 @@ mod tests { nonce: 3, gas_price: 2000000000, gas_limit: 10000000, - to: TransactionKind::Call(Address::from_slice( + to: TxKind::Call(Address::from_slice( &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], )), value: U256::from(1000000000000000u64), @@ -2028,7 +1916,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000, - to: TransactionKind::Call(Address::from_slice( + to: TxKind::Call(Address::from_slice( &hex!("61815774383099e24810ab832a5b2a5425c154d5")[..], )), value: U256::from(3000000000000000000u64), @@ -2050,7 +1938,7 @@ mod tests { nonce: 15, gas_price: 2200000000, gas_limit: 34811, - to: TransactionKind::Call(Address::from_slice( + to: TxKind::Call(Address::from_slice( &hex!("cf7f9e66af820a19257a2108375b180b0ec49167")[..], )), value: U256::from(1234), @@ -2339,7 +2227,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TransactionKind::Call( + to: TxKind::Call( Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), ), value: U256::from(1000000000000000u64), @@ -2388,7 +2276,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TransactionKind::Call( + to: TxKind::Call( Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), ), value: U256::from(1000000000000000u64), diff --git a/crates/primitives/src/transaction/optimism.rs b/crates/primitives/src/transaction/optimism.rs index 0001347b50517..f553f2aa69c6e 100644 --- a/crates/primitives/src/transaction/optimism.rs +++ b/crates/primitives/src/transaction/optimism.rs @@ -1,4 +1,4 @@ -use crate::{Address, Bytes, TransactionKind, TxType, B256, U256}; +use crate::{Address, Bytes, TxKind, TxType, B256, U256}; use alloy_rlp::{ length_of_length, Decodable, Encodable, Error as DecodeError, Header, EMPTY_STRING_CODE, }; @@ -16,7 +16,7 @@ pub struct TxDeposit { pub from: Address, /// The address of the recipient account, or the null (zero-length) address if the deposited /// transaction is a contract creation. - pub to: TransactionKind, + pub to: TxKind, /// The ETH value to mint on L2. pub mint: Option, /// The ETH value to send to the recipient account. @@ -169,7 +169,7 @@ mod tests { let original = TxDeposit { source_hash: B256::default(), from: Address::default(), - to: TransactionKind::default(), + to: TxKind::default(), mint: Some(100), value: U256::default(), gas_limit: 50000, @@ -189,7 +189,7 @@ mod tests { let tx_deposit = TxDeposit { source_hash: B256::default(), from: Address::default(), - to: TransactionKind::default(), + to: TxKind::default(), mint: Some(100), value: U256::default(), gas_limit: 50000, @@ -211,7 +211,7 @@ mod tests { let tx_deposit = TxDeposit { source_hash: B256::default(), from: Address::default(), - to: TransactionKind::default(), + to: TxKind::default(), mint: Some(100), value: U256::default(), gas_limit: 50000, diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs index 78940c8b50667..bd68023bee5d1 100644 --- a/crates/revm/src/optimism/processor.rs +++ b/crates/revm/src/optimism/processor.rs @@ -206,7 +206,7 @@ mod tests { }; use reth_primitives::{ b256, Account, Address, Block, ChainSpecBuilder, Header, Signature, StorageKey, - StorageValue, Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, + StorageValue, Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, }; use revm::L1_BLOCK_CONTRACT; use std::{collections::HashMap, str::FromStr, sync::Arc}; @@ -278,7 +278,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), ..Default::default() }), Signature::default(), @@ -287,7 +287,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), gas_limit: 21_000, ..Default::default() }), @@ -352,7 +352,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), ..Default::default() }), Signature::default(), @@ -361,7 +361,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), gas_limit: 21_000, ..Default::default() }), diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index f467b22a05c13..e6a85b77d397c 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -466,7 +466,7 @@ mod tests { bytes, constants::{BEACON_ROOTS_ADDRESS, EIP1559_INITIAL_BASE_FEE, SYSTEM_ADDRESS}, keccak256, Account, Bytes, ChainSpecBuilder, ForkCondition, Signature, Transaction, - TransactionKind, TxEip1559, MAINNET, + TxEip1559, TxKind, MAINNET, }; use revm::{Database, TransitionState}; use std::collections::HashMap; @@ -855,7 +855,7 @@ mod tests { chain_id, nonce: 1, gas_limit: 21_000, - to: TransactionKind::Call(Address::ZERO), + to: TxKind::Call(Address::ZERO), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() }), diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 745d32e349500..1004e93e25f80 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -2,8 +2,8 @@ use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; use reth_primitives::{ - BlockNumber, Transaction as PrimitiveTransaction, TransactionKind as PrimitiveTransactionKind, - TransactionSignedEcRecovered, TxType, B256, + BlockNumber, Transaction as PrimitiveTransaction, TransactionSignedEcRecovered, + TxKind as PrimitiveTransactionKind, TxType, B256, }; #[cfg(feature = "optimism")] use reth_rpc_types::optimism::OptimismTransactionFields; diff --git a/crates/rpc/rpc-types-compat/src/transaction/typed.rs b/crates/rpc/rpc-types-compat/src/transaction/typed.rs index 6b0ed52947bb6..03f502a208a29 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/typed.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/typed.rs @@ -16,7 +16,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: tx.kind.into(), + to: to_primitive_transaction_kind(tx.kind), value: tx.value, input: tx.input, }), @@ -25,7 +25,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: tx.kind.into(), + to: to_primitive_transaction_kind(tx.kind), value: tx.value, input: tx.input, access_list: tx.access_list, @@ -35,7 +35,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, max_fee_per_gas: tx.max_fee_per_gas.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: tx.kind.into(), + to: to_primitive_transaction_kind(tx.kind), value: tx.value, input: tx.input, access_list: tx.access_list, @@ -47,7 +47,7 @@ pub fn to_primitive_transaction( gas_limit: tx.gas_limit.to(), max_fee_per_gas: tx.max_fee_per_gas.to(), max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), - to: tx.kind.into(), + to: to_primitive_transaction_kind(tx.kind), value: tx.value, access_list: tx.access_list, blob_versioned_hashes: tx.blob_versioned_hashes, @@ -56,3 +56,13 @@ pub fn to_primitive_transaction( }), }) } + +/// Transforms a [reth_rpc_types::TransactionKind] into a [reth_primitives::TxKind] +pub fn to_primitive_transaction_kind( + kind: reth_rpc_types::TransactionKind, +) -> reth_primitives::TxKind { + match kind { + reth_rpc_types::TransactionKind::Call(to) => reth_primitives::TxKind::Call(to), + reth_rpc_types::TransactionKind::Create => reth_primitives::TxKind::Create, + } +} diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 43a75b68ba1d8..2188b8d255bbc 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -15,9 +15,10 @@ use reth_primitives::{ eip4844::calc_blob_gasprice, revm::env::{fill_block_env_with_coinbase, tx_env_with_recovered}, Address, BlockId, BlockNumberOrTag, Bytes, FromRecoveredPooledTransaction, Header, - IntoRecoveredTransaction, Receipt, SealedBlock, SealedBlockWithSenders, - TransactionKind::{Call, Create}, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, B256, U256, + IntoRecoveredTransaction, Receipt, SealedBlock, SealedBlockWithSenders, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, + TxKind::{Call, Create}, + B256, U256, }; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 8cd9070bb4b21..03dab1a144c4c 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -58,12 +58,7 @@ fn generate_from_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> To // it's hard to figure out with derive_macro which types have Bytes fields. // // This removes the requirement of the field to be placed last in the struct. - known_types.extend_from_slice(&[ - "TransactionKind", - "AccessList", - "Signature", - "CheckpointBlockRange", - ]); + known_types.extend_from_slice(&["TxKind", "AccessList", "Signature", "CheckpointBlockRange"]); // let mut handle = FieldListHandler::new(fields); let is_enum = fields.iter().any(|field| matches!(field, FieldTypes::EnumVariant(_))); diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index abc785edd3ed6..7614fa8328411 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -161,7 +161,7 @@ fn should_use_alt_impl(ftype: &String, segment: &syn::PathSegment) -> bool { /// length. pub fn get_bit_size(ftype: &str) -> u8 { match ftype { - "TransactionKind" | "bool" | "Option" | "Signature" => 1, + "TransactionKind" | "TxKind" | "bool" | "Option" | "Signature" => 1, "TxType" => 2, "u64" | "BlockNumber" | "TxNumber" | "ChainId" | "NumTransactions" => 4, "u128" => 5, diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 52a3127c79b41..2e3c71828dcd5 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -2,8 +2,8 @@ use crate::EthPooledTransaction; use rand::Rng; use reth_primitives::{ constants::MIN_PROTOCOL_BASE_FEE, sign_message, AccessList, Address, Bytes, Transaction, - TransactionKind, TransactionSigned, TryFromRecoveredTransaction, TxEip1559, TxEip4844, - TxLegacy, B256, MAINNET, U256, + TransactionSigned, TryFromRecoveredTransaction, TxEip1559, TxEip4844, TxKind, TxLegacy, B256, + MAINNET, U256, }; /// A generator for transactions for testing purposes. @@ -129,7 +129,7 @@ pub struct TransactionBuilder { /// processing. pub max_priority_fee_per_gas: u128, /// The recipient or contract address of the transaction. - pub to: TransactionKind, + pub to: TxKind, /// The value to be transferred in the transaction. pub value: U256, /// The list of addresses and storage keys that the transaction can access. @@ -246,7 +246,7 @@ impl TransactionBuilder { /// Sets the recipient or contract address for the transaction builder. pub const fn to(mut self, to: Address) -> Self { - self.to = TransactionKind::Call(to); + self.to = TxKind::Call(to); self } @@ -306,7 +306,7 @@ impl TransactionBuilder { /// Sets the recipient or contract address for the transaction, mutable reference version. pub fn set_to(&mut self, to: Address) -> &mut Self { - self.to = TransactionKind::Call(to); + self.to = TxKind::Call(to); self } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index d250b6c10bd31..7eda40e58e706 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -16,8 +16,8 @@ use reth_primitives::{ transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, Bytes, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, - TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, - TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, + TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip1559, + TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -105,7 +105,7 @@ pub enum MockTransaction { /// The gas limit for the transaction. gas_limit: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The value of the transaction. value: U256, /// The transaction input data. @@ -128,7 +128,7 @@ pub enum MockTransaction { /// The gas limit for the transaction. gas_limit: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The value of the transaction. value: U256, /// The access list associated with the transaction. @@ -155,7 +155,7 @@ pub enum MockTransaction { /// The gas limit for the transaction. gas_limit: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The value of the transaction. value: U256, /// The access list associated with the transaction. @@ -176,7 +176,7 @@ pub enum MockTransaction { /// The transaction nonce. nonce: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The gas limit for the transaction. gas_limit: u64, /// The transaction input data. @@ -213,7 +213,7 @@ impl MockTransaction { nonce: 0, gas_price: 0, gas_limit: 0, - to: TransactionKind::Call(Address::random()), + to: TxKind::Call(Address::random()), value: Default::default(), input: Default::default(), size: Default::default(), @@ -229,7 +229,7 @@ impl MockTransaction { max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, gas_limit: 0, - to: TransactionKind::Call(Address::random()), + to: TxKind::Call(Address::random()), value: Default::default(), input: Bytes::new(), accesslist: Default::default(), @@ -247,7 +247,7 @@ impl MockTransaction { max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_fee_per_blob_gas: DATA_GAS_PER_BLOB as u128, gas_limit: 0, - to: TransactionKind::Call(Address::random()), + to: TxKind::Call(Address::random()), value: Default::default(), input: Bytes::new(), accesslist: Default::default(), @@ -272,7 +272,7 @@ impl MockTransaction { hash: B256::random(), sender: Address::random(), nonce: 0, - to: TransactionKind::Call(Address::random()), + to: TxKind::Call(Address::random()), gas_limit: 0, input: Bytes::new(), value: Default::default(), @@ -671,7 +671,7 @@ impl PoolTransaction for MockTransaction { } /// Returns the transaction kind associated with the transaction. - fn kind(&self) -> &TransactionKind { + fn kind(&self) -> &TxKind { match self { MockTransaction::Legacy { to, .. } | MockTransaction::Eip1559 { to, .. } | diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 459c0bf10015d..c5603ec7bd01c 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -13,9 +13,9 @@ use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PeerId, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionKind, - TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip4844, TxHash, B256, - EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, + PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, + TryFromRecoveredTransaction, TxEip4844, TxHash, TxKind, B256, EIP1559_TX_TYPE_ID, + EIP4844_TX_TYPE_ID, U256, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -811,14 +811,14 @@ pub trait PoolTransaction: /// otherwise returns the gas price. fn priority_fee_or_price(&self) -> u128; - /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or - /// [`TransactionKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> &TransactionKind; + /// Returns the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + fn kind(&self) -> &TxKind; - /// Returns the recipient of the transaction if it is not a [TransactionKind::Create] + /// Returns the recipient of the transaction if it is not a [TxKind::Create] /// transaction. fn to(&self) -> Option
{ - (*self.kind()).to() + (*self.kind()).to().copied() } /// Returns the input data of this transaction. @@ -1056,9 +1056,9 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.priority_fee_or_price() } - /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or - /// [`TransactionKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> &TransactionKind { + /// Returns the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + fn kind(&self) -> &TxKind { self.transaction.kind() } From 79235a74fa5e4260e83a2ca63b74148cc1a29592 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 25 Apr 2024 15:14:20 +0200 Subject: [PATCH 327/700] chore: add `node-*` crates to `CODEOWNERS` (#7869) --- CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CODEOWNERS b/CODEOWNERS index 3ea162bf6ae75..be8243ea2c2c9 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -7,6 +7,9 @@ crates/exex @onbjerg @shekhirin crates/metrics @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk +crates/node-builder/ @mattsse @Rjected @onbjerg +crates/node-core/ @mattsse @Rjected @onbjerg +crates/node-ethereum/ @mattsse @Rjected crates/payload/ @mattsse @Rjected crates/prune @shekhirin @joshieDo crates/revm/ @mattsse @rakita From ed45c3c10d832eb0d6ad0ccc7c2c33d380c5d16f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 15:37:55 +0200 Subject: [PATCH 328/700] chore: some touchups (#7873) --- crates/payload/optimism/src/builder.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/crates/payload/optimism/src/builder.rs b/crates/payload/optimism/src/builder.rs index 1d1a2dadecdcb..7d8efa6899e2b 100644 --- a/crates/payload/optimism/src/builder.rs +++ b/crates/payload/optimism/src/builder.rs @@ -250,13 +250,15 @@ where } = config; debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + let mut cumulative_gas_used = 0; let block_gas_limit: u64 = attributes .gas_limit .unwrap_or_else(|| initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX)); let base_fee = initialized_block_env.basefee.to::(); - let mut executed_txs = Vec::new(); + let mut executed_txs = Vec::with_capacity(attributes.transactions.len()); + let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( base_fee, initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), @@ -288,11 +290,12 @@ where attributes.payload_attributes.timestamp, &mut db, ) - .map_err(|_| { + .map_err(|err| { + warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); PayloadBuilderError::other(OptimismPayloadBuilderError::ForceCreate2DeployerFail) })?; - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(attributes.transactions.len()); for sequencer_tx in &attributes.transactions { // Check if the job was cancelled, if so we can exit early. if cancel.is_cancelled() { @@ -300,7 +303,7 @@ where } // A sequencer's block should never contain blob transactions. - if matches!(sequencer_tx.tx_type(), TxType::Eip4844) { + if sequencer_tx.is_eip4844() { return Err(PayloadBuilderError::other( OptimismPayloadBuilderError::BlobTransactionRejected, )) @@ -398,11 +401,9 @@ where continue } - // A sequencer's block should never contain blob transactions. - if pool_tx.tx_type() == TxType::Eip4844 as u8 { - return Err(PayloadBuilderError::other( - OptimismPayloadBuilderError::BlobTransactionRejected, - )) + // A sequencer's block should never contain blob or deposit transactions from the pool. + if pool_tx.is_eip4844() || pool_tx.tx_type() == TxType::Deposit as u8 { + best_txs.mark_invalid(&pool_tx) } // check if the job was cancelled, if so we can exit early From 421888d22f038b0d304ac83efd600070f6c84ba4 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 25 Apr 2024 15:55:37 +0200 Subject: [PATCH 329/700] fix: log actual notification id in exex manager (#7874) --- crates/exex/src/manager.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 95b950f328496..1c9eaf9ef3096 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -280,12 +280,16 @@ impl Future for ExExManager { // it is a logic error for this to ever underflow since the manager manages the // notification IDs - let notification_id = exex + let notification_index = exex .next_notification_id .checked_sub(self.min_id) .expect("exex expected notification ID outside the manager's range"); - if let Some(notification) = self.buffer.get(notification_id) { - debug!(exex.id, notification_id, "sent notification to exex"); + if let Some(notification) = self.buffer.get(notification_index) { + debug!( + exex.id, + notification_id = exex.next_notification_id, + "sent notification to exex" + ); if let Poll::Ready(Err(err)) = exex.send(cx, notification) { // the channel was closed, which is irrecoverable for the manager return Poll::Ready(Err(err.into())) From 6f22621f4375cc58176f7ddef9182c9439e54153 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 25 Apr 2024 21:56:04 +0800 Subject: [PATCH 330/700] chore: rename BlockChain to Blockchain (#7840) Signed-off-by: jsvisa --- crates/blockchain-tree/src/block_indices.rs | 20 ++++++------- crates/blockchain-tree/src/blockchain_tree.rs | 30 +++++++++---------- crates/blockchain-tree/src/state.rs | 18 +++++------ crates/consensus/beacon/src/engine/mod.rs | 4 +-- .../interfaces/src/blockchain_tree/error.rs | 2 +- .../storage/provider/src/test_utils/blocks.rs | 6 ++-- 6 files changed, 40 insertions(+), 40 deletions(-) diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index a262148b9fed1..373b419b37536 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -1,6 +1,6 @@ //! Implementation of [`BlockIndices`] related to [`super::BlockchainTree`] -use super::state::BlockChainId; +use super::state::BlockchainId; use crate::canonical_chain::CanonicalChain; use linked_hash_set::LinkedHashSet; use reth_primitives::{BlockHash, BlockNumHash, BlockNumber, SealedBlockWithSenders}; @@ -39,7 +39,7 @@ pub struct BlockIndices { /// hashes. block_number_to_block_hashes: BTreeMap>, /// Block hashes and side chain they belong - blocks_to_chain: HashMap, + blocks_to_chain: HashMap, } impl BlockIndices { @@ -71,7 +71,7 @@ impl BlockIndices { } /// Return block to chain id - pub fn blocks_to_chain(&self) -> &HashMap { + pub fn blocks_to_chain(&self) -> &HashMap { &self.blocks_to_chain } @@ -119,14 +119,14 @@ impl BlockIndices { &mut self, block_number: BlockNumber, block_hash: BlockHash, - chain_id: BlockChainId, + chain_id: BlockchainId, ) { self.block_number_to_block_hashes.entry(block_number).or_default().insert(block_hash); self.blocks_to_chain.insert(block_hash, chain_id); } /// Insert block to chain and fork child indices of the new chain - pub(crate) fn insert_chain(&mut self, chain_id: BlockChainId, chain: &Chain) { + pub(crate) fn insert_chain(&mut self, chain_id: BlockchainId, chain: &Chain) { for (number, block) in chain.blocks().iter() { // add block -> chain_id index self.blocks_to_chain.insert(block.hash(), chain_id); @@ -139,7 +139,7 @@ impl BlockIndices { } /// Get the chain ID the block belongs to - pub(crate) fn get_blocks_chain_id(&self, block: &BlockHash) -> Option { + pub(crate) fn get_blocks_chain_id(&self, block: &BlockHash) -> Option { self.blocks_to_chain.get(block).cloned() } @@ -149,7 +149,7 @@ impl BlockIndices { pub(crate) fn update_block_hashes( &mut self, hashes: BTreeMap, - ) -> (BTreeSet, Vec) { + ) -> (BTreeSet, Vec) { // set new canonical hashes. self.canonical_chain.replace(hashes.clone()); @@ -218,7 +218,7 @@ impl BlockIndices { /// Remove chain from indices and return dependent chains that need to be removed. /// Does the cleaning of the tree and removing blocks from the chain. - pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { + pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { chain .blocks() .iter() @@ -234,7 +234,7 @@ impl BlockIndices { &mut self, block_number: BlockNumber, block_hash: BlockHash, - ) -> BTreeSet { + ) -> BTreeSet { // rm number -> block if let btree_map::Entry::Occupied(mut entry) = self.block_number_to_block_hashes.entry(block_number) @@ -327,7 +327,7 @@ impl BlockIndices { &mut self, finalized_block: BlockNumber, num_of_additional_canonical_hashes_to_retain: u64, - ) -> BTreeSet { + ) -> BTreeSet { // get finalized chains. blocks between [self.last_finalized,finalized_block). // Dont remove finalized_block, as sidechain can point to it. let finalized_blocks: Vec = self diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index b1688fda91fd1..b98cc664ad70d 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -2,7 +2,7 @@ use crate::{ metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, - state::{BlockChainId, TreeState}, + state::{BlockchainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, BundleStateData, TreeExternals, }; use reth_consensus::{Consensus, ConsensusError}; @@ -441,7 +441,7 @@ where fn try_insert_block_into_side_chain( &mut self, block: SealedBlockWithSenders, - chain_id: BlockChainId, + chain_id: BlockchainId, block_validation_kind: BlockValidationKind, ) -> Result { let block_num_hash = block.num_hash(); @@ -514,7 +514,7 @@ where /// # Note /// /// This is not cached in order to save memory. - fn all_chain_hashes(&self, chain_id: BlockChainId) -> BTreeMap { + fn all_chain_hashes(&self, chain_id: BlockchainId) -> BTreeMap { let mut chain_id = chain_id; let mut hashes = BTreeMap::new(); loop { @@ -553,7 +553,7 @@ where /// the block on /// /// Returns `None` if the chain is unknown. - fn canonical_fork(&self, chain_id: BlockChainId) -> Option { + fn canonical_fork(&self, chain_id: BlockchainId) -> Option { let mut chain_id = chain_id; let mut fork; loop { @@ -572,13 +572,13 @@ where /// Insert a chain into the tree. /// /// Inserts a chain into the tree and builds the block indices. - fn insert_chain(&mut self, chain: AppendableChain) -> Option { + fn insert_chain(&mut self, chain: AppendableChain) -> Option { self.state.insert_chain(chain) } /// Iterate over all child chains that depend on this block and return /// their ids. - fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { + fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { // Find all forks of given block. let mut dependent_block = self.block_indices().fork_to_child().get(block).cloned().unwrap_or_default(); @@ -609,7 +609,7 @@ where /// This method searches for any chain that depended on this block being part of the canonical /// chain. Each dependent chain's state is then updated with state entries removed from the /// plain state during the unwind. - fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { + fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { // iterate over all blocks in chain and find any fork blocks that are in tree. for (number, block) in chain.blocks().iter() { let hash = block.hash(); @@ -893,7 +893,7 @@ where /// The pending part of the chain is reinserted back into the tree with the same `chain_id`. fn remove_and_split_chain( &mut self, - chain_id: BlockChainId, + chain_id: BlockchainId, split_at: ChainSplitTarget, ) -> Option { let chain = self.state.chains.remove(&chain_id)?; @@ -1278,7 +1278,7 @@ mod tests { }; use reth_provider::{ test_utils::{ - blocks::BlockChainTestData, create_test_provider_factory_with_chain_spec, + blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, TestExecutorFactory, }, ProviderFactory, @@ -1339,7 +1339,7 @@ mod tests { /// Number of chains chain_num: Option, /// Check block to chain index - block_to_chain: Option>, + block_to_chain: Option>, /// Check fork to child index fork_to_child: Option>>, /// Pending blocks @@ -1354,7 +1354,7 @@ mod tests { self } - fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { + fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { self.block_to_chain = Some(block_to_chain); self } @@ -1602,7 +1602,7 @@ mod tests { #[test] fn sidechain_block_hashes() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let (block3, exec3) = data.blocks[2].clone(); @@ -1678,7 +1678,7 @@ mod tests { #[test] fn cached_trie_updates() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let (block3, exec3) = data.blocks[2].clone(); @@ -1766,7 +1766,7 @@ mod tests { #[test] fn test_side_chain_fork() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let genesis = data.genesis; @@ -1864,7 +1864,7 @@ mod tests { #[test] fn sanity_path() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let genesis = data.genesis; diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index f741df8ec20a4..5013be8c1ad56 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -10,7 +10,7 @@ pub(crate) struct TreeState { /// Keeps track of new unique identifiers for chains block_chain_id_generator: u64, /// The tracked chains and their current data. - pub(crate) chains: HashMap, + pub(crate) chains: HashMap, /// Indices to block and their connection to the canonical chain. /// /// This gets modified by the tree itself and is read from engine API/RPC to access the pending @@ -41,10 +41,10 @@ impl TreeState { /// Issues a new unique identifier for a new chain. #[inline] - fn next_id(&mut self) -> BlockChainId { + fn next_id(&mut self) -> BlockchainId { let id = self.block_chain_id_generator; self.block_chain_id_generator += 1; - BlockChainId(id) + BlockchainId(id) } /// Expose internal indices of the BlockchainTree. @@ -85,7 +85,7 @@ impl TreeState { /// Insert a chain into the tree. /// /// Inserts a chain into the tree and builds the block indices. - pub(crate) fn insert_chain(&mut self, chain: AppendableChain) -> Option { + pub(crate) fn insert_chain(&mut self, chain: AppendableChain) -> Option { if chain.is_empty() { return None } @@ -113,17 +113,17 @@ impl TreeState { /// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree]. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub struct BlockChainId(u64); +pub struct BlockchainId(u64); -impl From for u64 { - fn from(value: BlockChainId) -> Self { +impl From for u64 { + fn from(value: BlockchainId) -> Self { value.0 } } #[cfg(test)] -impl From for BlockChainId { +impl From for BlockchainId { fn from(value: u64) -> Self { - BlockChainId(value) + BlockchainId(value) } } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 5e22a48605648..72fc972971cf7 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -2354,7 +2354,7 @@ mod tests { genesis::{Genesis, GenesisAllocator}, Hardfork, U256, }; - use reth_provider::test_utils::blocks::BlockChainTestData; + use reth_provider::test_utils::blocks::BlockchainTestData; #[tokio::test] async fn new_payload_before_forkchoice() { @@ -2569,7 +2569,7 @@ mod tests { #[tokio::test] async fn payload_pre_merge() { - let data = BlockChainTestData::default(); + let data = BlockchainTestData::default(); let mut block1 = data.blocks[0].0.block.clone(); block1 .header diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index 44f1f50bcda7c..b636985766a10 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -18,7 +18,7 @@ pub enum BlockchainTreeError { last_finalized: BlockNumber, }, /// Thrown if no side chain could be found for the block. - #[error("blockChainId can't be found in BlockchainTree with internal index {chain_id}")] + #[error("chainId can't be found in BlockchainTree with internal index {chain_id}")] BlockSideChainIdConsistency { /// The internal identifier for the side chain. chain_id: u64, diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 39b6d3535b482..32ecb489758a2 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -62,14 +62,14 @@ const BLOCK_RLP: [u8; 610] = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd1 /// Test chain with genesis, blocks, execution results /// that have valid changesets. #[derive(Debug)] -pub struct BlockChainTestData { +pub struct BlockchainTestData { /// Genesis pub genesis: SealedBlock, /// Blocks with its execution result pub blocks: Vec<(SealedBlockWithSenders, BundleStateWithReceipts)>, } -impl BlockChainTestData { +impl BlockchainTestData { /// Create test data with two blocks that are connected, specifying their block numbers. pub fn default_from_number(first: BlockNumber) -> Self { let one = block1(first); @@ -85,7 +85,7 @@ impl BlockChainTestData { } } -impl Default for BlockChainTestData { +impl Default for BlockchainTestData { fn default() -> Self { let one = block1(1); let mut extended_state = one.1.clone(); From 29e5df81a46a476110116f65d0d4757391201968 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 15:56:42 +0200 Subject: [PATCH 331/700] chore: bidirectional eq for TxType (#7876) --- crates/primitives/src/transaction/tx_type.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 84a099cb76605..11df417d4bd4e 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -85,17 +85,17 @@ impl TryFrom for TxType { fn try_from(value: u8) -> Result { #[cfg(feature = "optimism")] - if value == TxType::Deposit as u8 { + if value == TxType::Deposit { return Ok(TxType::Deposit) } - if value == TxType::Legacy as u8 { + if value == TxType::Legacy { return Ok(TxType::Legacy) - } else if value == TxType::Eip2930 as u8 { + } else if value == TxType::Eip2930 { return Ok(TxType::Eip2930) - } else if value == TxType::Eip1559 as u8 { + } else if value == TxType::Eip1559 { return Ok(TxType::Eip1559) - } else if value == TxType::Eip4844 as u8 { + } else if value == TxType::Eip4844 { return Ok(TxType::Eip4844) } @@ -175,6 +175,12 @@ impl PartialEq for TxType { } } +impl PartialEq for u8 { + fn eq(&self, other: &TxType) -> bool { + *self == *other as u8 + } +} + #[cfg(test)] mod tests { use super::*; From 1c81fae4d1acd73c21577ec593f6a1f15a3b07b9 Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:12:30 +0200 Subject: [PATCH 332/700] refactor: rename some examples (#7881) --- Cargo.lock | 96 +++++++++---------- Cargo.toml | 8 +- examples/README.md | 10 +- .../Cargo.toml | 2 +- .../src/main.rs | 0 .../Cargo.toml | 2 +- .../src/main.rs | 2 +- .../Cargo.toml | 2 +- .../src/main.rs | 2 +- .../Cargo.toml | 2 +- .../src/main.rs | 2 +- 11 files changed, 64 insertions(+), 64 deletions(-) rename examples/{custom-node => custom-engine-types}/Cargo.toml (95%) rename examples/{custom-node => custom-engine-types}/src/main.rs (100%) rename examples/{additional-rpc-namespace-in-cli => node-custom-rpc}/Cargo.toml (90%) rename examples/{additional-rpc-namespace-in-cli => node-custom-rpc}/src/main.rs (97%) rename examples/{cli-extension-event-hooks => node-event-hooks}/Cargo.toml (82%) rename examples/{cli-extension-event-hooks => node-event-hooks}/src/main.rs (95%) rename examples/{trace-transaction-cli => txpool-tracing}/Cargo.toml (88%) rename examples/{trace-transaction-cli => txpool-tracing}/src/main.rs (96%) diff --git a/Cargo.lock b/Cargo.lock index 6b2e0d01727b5..343697508a18d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,19 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "additional-rpc-namespace-in-cli" -version = "0.0.0" -dependencies = [ - "clap", - "eyre", - "jsonrpsee", - "reth", - "reth-node-ethereum", - "reth-transaction-pool", - "tokio", -] - [[package]] name = "addr2line" version = "0.21.0" @@ -1564,14 +1551,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" -[[package]] -name = "cli-extension-event-hooks" -version = "0.0.0" -dependencies = [ - "reth", - "reth-node-ethereum", -] - [[package]] name = "coins-bip32" version = "0.8.7" @@ -2010,47 +1989,47 @@ dependencies = [ ] [[package]] -name = "custom-evm" +name = "custom-engine-types" version = "0.0.0" dependencies = [ "eyre", "reth", + "reth-basic-payload-builder", + "reth-ethereum-payload-builder", "reth-node-api", "reth-node-core", "reth-node-ethereum", + "reth-payload-builder", "reth-primitives", + "reth-rpc-types", "reth-tracing", + "serde", + "thiserror", "tokio", ] [[package]] -name = "custom-inspector" +name = "custom-evm" version = "0.0.0" dependencies = [ - "clap", - "futures-util", + "eyre", "reth", + "reth-node-api", + "reth-node-core", "reth-node-ethereum", + "reth-primitives", + "reth-tracing", + "tokio", ] [[package]] -name = "custom-node" +name = "custom-inspector" version = "0.0.0" dependencies = [ - "eyre", + "clap", + "futures-util", "reth", - "reth-basic-payload-builder", - "reth-ethereum-payload-builder", - "reth-node-api", - "reth-node-core", "reth-node-ethereum", - "reth-payload-builder", - "reth-primitives", - "reth-rpc-types", - "reth-tracing", - "serde", - "thiserror", - "tokio", ] [[package]] @@ -4904,6 +4883,27 @@ dependencies = [ "libc", ] +[[package]] +name = "node-custom-rpc" +version = "0.0.0" +dependencies = [ + "clap", + "eyre", + "jsonrpsee", + "reth", + "reth-node-ethereum", + "reth-transaction-pool", + "tokio", +] + +[[package]] +name = "node-event-hooks" +version = "0.0.0" +dependencies = [ + "reth", + "reth-node-ethereum", +] + [[package]] name = "nom" version = "7.1.3" @@ -9277,16 +9277,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" -[[package]] -name = "trace-transaction-cli" -version = "0.0.0" -dependencies = [ - "clap", - "futures-util", - "reth", - "reth-node-ethereum", -] - [[package]] name = "tracing" version = "0.1.40" @@ -9531,6 +9521,16 @@ dependencies = [ "toml", ] +[[package]] +name = "txpool-tracing" +version = "0.0.0" +dependencies = [ + "clap", + "futures-util", + "reth", + "reth-node-ethereum", +] + [[package]] name = "typenum" version = "1.17.0" diff --git a/Cargo.toml b/Cargo.toml index 73597b311a46d..7b9b63e956ab4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,17 +71,17 @@ members = [ "crates/trie/", "crates/trie-parallel/", "examples/", - "examples/additional-rpc-namespace-in-cli/", + "examples/node-custom-rpc/", "examples/beacon-api-sse/", - "examples/cli-extension-event-hooks/", + "examples/node-event-hooks/", "examples/custom-evm/", - "examples/custom-node/", + "examples/custom-engine-types/", "examples/custom-node-components/", "examples/custom-dev-node/", "examples/custom-payload-builder/", "examples/manual-p2p/", "examples/rpc-db/", - "examples/trace-transaction-cli/", + "examples/txpool-tracing/", "examples/polygon-p2p/", "examples/custom-inspector/", "examples/exex/minimal/", diff --git a/examples/README.md b/examples/README.md index 791851a46e547..db0bdb999bd5a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -11,13 +11,13 @@ to make a PR! ## Node Builder | Example | Description | -| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | -| [Additional RPC namespace](./additional-rpc-namespace-in-cli) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | -| [Custom event hooks](./cli-extension-event-hooks) | Illustrates how to hook to various node lifecycle events | +|---------------------------------------------------------------| ------------------------------------------------------------------------------------------------ | +| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | +| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | | [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | | [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | | [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | -| [Custom node](./custom-node) | Illustrates how to create a node with custom engine types | +| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | | [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | | [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | @@ -50,7 +50,7 @@ to make a PR! | Example | Description | | ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | -| [Trace pending transactions](./trace-transaction-cli) | Illustrates how to trace pending transactions as they arrive in the mempool | +| [Trace pending transactions](./txpool-tracing) | Illustrates how to trace pending transactions as they arrive in the mempool | | [Standalone txpool](./network-txpool.rs) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | ## P2P diff --git a/examples/custom-node/Cargo.toml b/examples/custom-engine-types/Cargo.toml similarity index 95% rename from examples/custom-node/Cargo.toml rename to examples/custom-engine-types/Cargo.toml index 9d41edafd47d7..7386313068a08 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "custom-node" +name = "custom-engine-types" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/custom-node/src/main.rs b/examples/custom-engine-types/src/main.rs similarity index 100% rename from examples/custom-node/src/main.rs rename to examples/custom-engine-types/src/main.rs diff --git a/examples/additional-rpc-namespace-in-cli/Cargo.toml b/examples/node-custom-rpc/Cargo.toml similarity index 90% rename from examples/additional-rpc-namespace-in-cli/Cargo.toml rename to examples/node-custom-rpc/Cargo.toml index 960dd86d02bb5..f1c5d95d9f1d2 100644 --- a/examples/additional-rpc-namespace-in-cli/Cargo.toml +++ b/examples/node-custom-rpc/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "additional-rpc-namespace-in-cli" +name = "node-custom-rpc" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/additional-rpc-namespace-in-cli/src/main.rs b/examples/node-custom-rpc/src/main.rs similarity index 97% rename from examples/additional-rpc-namespace-in-cli/src/main.rs rename to examples/node-custom-rpc/src/main.rs index a4713f931c940..08b27d3ac4a67 100644 --- a/examples/additional-rpc-namespace-in-cli/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -3,7 +3,7 @@ //! Run with //! //! ```not_rust -//! cargo run -p additional-rpc-namespace-in-cli -- node --http --ws --enable-ext +//! cargo run -p node-custom-rpc -- node --http --ws --enable-ext //! ``` //! //! This installs an additional RPC method `txpoolExt_transactionCount` that can be queried via [cast](https://github.com/foundry-rs/foundry) diff --git a/examples/cli-extension-event-hooks/Cargo.toml b/examples/node-event-hooks/Cargo.toml similarity index 82% rename from examples/cli-extension-event-hooks/Cargo.toml rename to examples/node-event-hooks/Cargo.toml index 8664057e7d85f..eb36722aadee1 100644 --- a/examples/cli-extension-event-hooks/Cargo.toml +++ b/examples/node-event-hooks/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "cli-extension-event-hooks" +name = "node-event-hooks" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/cli-extension-event-hooks/src/main.rs b/examples/node-event-hooks/src/main.rs similarity index 95% rename from examples/cli-extension-event-hooks/src/main.rs rename to examples/node-event-hooks/src/main.rs index 9f09d7a3cb1fd..b9cd53298b4b8 100644 --- a/examples/cli-extension-event-hooks/src/main.rs +++ b/examples/node-event-hooks/src/main.rs @@ -4,7 +4,7 @@ //! Run with //! //! ```not_rust -//! cargo run -p cli-extension-event-hooks -- node +//! cargo run -p node-event-hooks -- node //! ``` //! //! This launch the regular reth node and also print: diff --git a/examples/trace-transaction-cli/Cargo.toml b/examples/txpool-tracing/Cargo.toml similarity index 88% rename from examples/trace-transaction-cli/Cargo.toml rename to examples/txpool-tracing/Cargo.toml index 3f681c2defff8..220e5d8d523e0 100644 --- a/examples/trace-transaction-cli/Cargo.toml +++ b/examples/txpool-tracing/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "trace-transaction-cli" +name = "txpool-tracing" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/trace-transaction-cli/src/main.rs b/examples/txpool-tracing/src/main.rs similarity index 96% rename from examples/trace-transaction-cli/src/main.rs rename to examples/txpool-tracing/src/main.rs index ab72c272006ab..85a5b795aad73 100644 --- a/examples/trace-transaction-cli/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -3,7 +3,7 @@ //! Run with //! //! ```not_rust -//! cargo run --release -p trace-transaction-cli -- node --http --ws --recipients 0x....,0x.... +//! cargo run --release -p txpool-tracing -- node --http --ws --recipients 0x....,0x.... //! ``` //! //! If no recipients are specified, all transactions will be traced. From 35ac20b8e4a17658d2110b30ef0049d5841e1c55 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 17:49:43 +0200 Subject: [PATCH 333/700] feat: LaunchContext helpers (#7884) --- crates/node-builder/src/builder/mod.rs | 2 +- crates/node-builder/src/launch/common.rs | 351 ++++++++++++++++++ .../src/{launch.rs => launch/mod.rs} | 260 ++++++------- 3 files changed, 460 insertions(+), 153 deletions(-) create mode 100644 crates/node-builder/src/launch/common.rs rename crates/node-builder/src/{launch.rs => launch/mod.rs} (67%) diff --git a/crates/node-builder/src/builder/mod.rs b/crates/node-builder/src/builder/mod.rs index 44bb60588872b..9649360ebe919 100644 --- a/crates/node-builder/src/builder/mod.rs +++ b/crates/node-builder/src/builder/mod.rs @@ -449,7 +449,7 @@ where ) -> eyre::Result, CB::Components>>> { let Self { builder, task_executor, data_dir } = self; - let launcher = DefaultNodeLauncher { task_executor, data_dir }; + let launcher = DefaultNodeLauncher::new(task_executor, data_dir); builder.launch_with(launcher).await } diff --git a/crates/node-builder/src/launch/common.rs b/crates/node-builder/src/launch/common.rs new file mode 100644 index 0000000000000..765673bf07e79 --- /dev/null +++ b/crates/node-builder/src/launch/common.rs @@ -0,0 +1,351 @@ +//! Helper types that can be used by launchers. + +use eyre::Context; +use rayon::ThreadPoolBuilder; +use reth_config::PruneConfig; +use reth_db::{database::Database, database_metrics::DatabaseMetrics}; +use reth_node_core::{ + cli::config::RethRpcConfig, + dirs::{ChainPath, DataDirPath}, + node_config::NodeConfig, +}; +use reth_primitives::{Chain, ChainSpec, Head, B256}; +use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_rpc::JwtSecret; +use reth_tasks::TaskExecutor; +use reth_tracing::tracing::{error, info}; +use std::{cmp::max, sync::Arc, thread::available_parallelism}; + +/// Reusable setup for launching a node. +/// +/// This provides commonly used boilerplate for launching a node. +#[derive(Debug, Clone)] +pub struct LaunchContext { + /// The task executor for the node. + pub task_executor: TaskExecutor, + /// The data directory for the node. + pub data_dir: ChainPath, +} + +impl LaunchContext { + /// Create a new instance of the default node launcher. + pub const fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { + Self { task_executor, data_dir } + } + + /// Attaches a database to the launch context. + pub fn with(self, database: DB) -> LaunchContextWith { + LaunchContextWith { inner: self, attachment: database } + } + + /// Loads the reth config with the configured `data_dir` and overrides settings according to the + /// `config`. + /// + /// Attaches both the `NodeConfig` and the loaded `reth.toml` config to the launch context. + pub fn with_loaded_toml_config( + self, + config: NodeConfig, + ) -> eyre::Result> { + let toml_config = self.load_toml_config(&config)?; + Ok(self.with(WithConfigs { config, toml_config })) + } + + /// Loads the reth config with the configured `data_dir` and overrides settings according to the + /// `config`. + pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { + let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path()); + + let mut toml_config = confy::load_path::(&config_path) + .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; + + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); + + // Update the config with the command line arguments + toml_config.peers.trusted_nodes_only = config.network.trusted_only; + + if !config.network.trusted_peers.is_empty() { + info!(target: "reth::cli", "Adding trusted nodes"); + config.network.trusted_peers.iter().for_each(|peer| { + toml_config.peers.trusted_nodes.insert(*peer); + }); + } + + Ok(toml_config) + } + + /// Configure global settings this includes: + /// + /// - Raising the file descriptor limit + /// - Configuring the global rayon thread pool + pub fn configure_globals(&self) { + // Raise the fd limit of the process. + // Does not do anything on windows. + let _ = fdlimit::raise_fd_limit(); + + // Limit the global rayon thread pool, reserving 2 cores for the rest of the system + let _ = ThreadPoolBuilder::new() + .num_threads( + available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), + ) + .build_global() + .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); + } +} + +/// A [LaunchContext] along with an additional value. +/// +/// This can be used to sequentially attach additional values to the type during the launch process. +/// +/// The type provides common boilerplate for launching a node depending on the additional value. +#[derive(Debug, Clone)] +pub struct LaunchContextWith { + /// The wrapped launch context. + pub inner: LaunchContext, + /// The additional attached value. + pub attachment: T, +} + +impl LaunchContextWith { + /// Configure global settings this includes: + /// + /// - Raising the file descriptor limit + /// - Configuring the global rayon thread pool + pub fn configure_globals(&self) { + self.inner.configure_globals(); + } + + /// Returns the data directory. + pub fn data_dir(&self) -> &ChainPath { + &self.inner.data_dir + } + + /// Returns the task executor. + pub fn task_executor(&self) -> &TaskExecutor { + &self.inner.task_executor + } + + /// Attaches another value to the launch context. + pub fn attach(self, attachment: A) -> LaunchContextWith> { + LaunchContextWith { + inner: self.inner, + attachment: Attached::new(self.attachment, attachment), + } + } +} + +impl LaunchContextWith> { + /// Get a reference to the left value. + pub const fn left(&self) -> &L { + &self.attachment.left + } + + /// Get a reference to the right value. + pub const fn right(&self) -> &R { + &self.attachment.right + } + + /// Get a mutable reference to the right value. + pub fn left_mut(&mut self) -> &mut L { + &mut self.attachment.left + } + + /// Get a mutable reference to the right value. + pub fn right_mut(&mut self) -> &mut R { + &mut self.attachment.right + } +} +impl LaunchContextWith> { + /// Returns the attached [NodeConfig]. + pub const fn node_config(&self) -> &NodeConfig { + &self.left().config + } + + /// Returns the attached [NodeConfig]. + pub fn node_config_mut(&mut self) -> &mut NodeConfig { + &mut self.left_mut().config + } + + /// Returns the attached toml config [reth_config::Config]. + pub const fn toml_config(&self) -> &reth_config::Config { + &self.left().toml_config + } + + /// Returns the attached toml config [reth_config::Config]. + pub fn toml_config_mut(&mut self) -> &mut reth_config::Config { + &mut self.left_mut().toml_config + } + + /// Returns the configured chain spec. + pub fn chain_spec(&self) -> Arc { + self.node_config().chain.clone() + } + + /// Get the hash of the genesis block. + pub fn genesis_hash(&self) -> B256 { + self.node_config().chain.genesis_hash() + } + + /// Returns the chain identifier of the node. + pub fn chain_id(&self) -> Chain { + self.node_config().chain.chain + } + + /// Returns true if the node is configured as --dev + pub fn is_dev(&self) -> bool { + self.node_config().dev.dev + } + + /// Returns the configured [PruneConfig] + pub fn prune_config(&self) -> eyre::Result> { + Ok(self.node_config().prune_config()?.or_else(|| self.toml_config().prune.clone())) + } + + /// Returns the initial pipeline target, based on whether or not the node is running in + /// `debug.tip` mode, `debug.continuous` mode, or neither. + /// + /// If running in `debug.tip` mode, the configured tip is returned. + /// Otherwise, if running in `debug.continuous` mode, the genesis hash is returned. + /// Otherwise, `None` is returned. This is what the node will do by default. + pub fn initial_pipeline_target(&self) -> Option { + self.node_config().initial_pipeline_target(self.genesis_hash()) + } + + /// Loads the JWT secret for the engine API + pub fn auth_jwt_secret(&self) -> eyre::Result { + let default_jwt_path = self.data_dir().jwt_path(); + let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?; + Ok(secret) + } +} + +impl LaunchContextWith> +where + DB: Clone, +{ + /// Returns the [ProviderFactory] for the attached database. + pub fn create_provider_factory(&self) -> eyre::Result> { + let factory = ProviderFactory::new( + self.right().clone(), + self.chain_spec(), + self.data_dir().static_files_path(), + )? + .with_static_files_metrics(); + + Ok(factory) + } + + /// Creates a new [ProviderFactory] and attaches it to the launch context. + pub fn with_provider_factory( + self, + ) -> eyre::Result>>> { + let factory = self.create_provider_factory()?; + let ctx = LaunchContextWith { + inner: self.inner, + attachment: self.attachment.map_right(|_| factory), + }; + + Ok(ctx) + } +} + +impl LaunchContextWith>> +where + DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, +{ + /// Returns access to the underlying database. + pub fn database(&self) -> &DB { + self.right().db_ref() + } + + /// Returns the configured ProviderFactory. + pub fn provider_factory(&self) -> &ProviderFactory { + self.right() + } + + /// Returns the static file provider to interact with the static files. + pub fn static_file_provider(&self) -> StaticFileProvider { + self.right().static_file_provider() + } + + /// Starts the prometheus endpoint. + pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { + let prometheus_handle = self.node_config().install_prometheus_recorder()?; + self.node_config() + .start_metrics_endpoint( + prometheus_handle, + self.database().clone(), + self.static_file_provider(), + self.task_executor().clone(), + ) + .await + } + + /// Fetches the head block from the database. + /// + /// If the database is empty, returns the genesis block. + pub fn lookup_head(&self) -> eyre::Result { + self.node_config() + .lookup_head(self.provider_factory().clone()) + .wrap_err("the head block is missing") + } +} + +/// Joins two attachments together. +#[derive(Clone, Copy, Debug)] +pub struct Attached { + left: L, + right: R, +} + +impl Attached { + /// Creates a new `Attached` with the given values. + pub const fn new(left: L, right: R) -> Self { + Self { left, right } + } + + /// Maps the left value to a new value. + pub fn map_left(self, f: F) -> Attached + where + F: FnOnce(L) -> T, + { + Attached::new(f(self.left), self.right) + } + + /// Maps the right value to a new value. + pub fn map_right(self, f: F) -> Attached + where + F: FnOnce(R) -> T, + { + Attached::new(self.left, f(self.right)) + } + + /// Get a reference to the left value. + pub const fn left(&self) -> &L { + &self.left + } + + /// Get a reference to the right value. + pub const fn right(&self) -> &R { + &self.right + } + + /// Get a mutable reference to the right value. + pub fn left_mut(&mut self) -> &mut R { + &mut self.right + } + + /// Get a mutable reference to the right value. + pub fn right_mut(&mut self) -> &mut R { + &mut self.right + } +} + +/// Helper container type to bundle the initial [NodeConfig] and the loaded settings from the +/// reth.toml config +#[derive(Debug, Clone)] +pub struct WithConfigs { + /// The configured, usually derived from the CLI. + pub config: NodeConfig, + /// The loaded reth.toml config. + pub toml_config: reth_config::Config, +} diff --git a/crates/node-builder/src/launch.rs b/crates/node-builder/src/launch/mod.rs similarity index 67% rename from crates/node-builder/src/launch.rs rename to crates/node-builder/src/launch/mod.rs index 645598adafd37..6181e0c98120e 100644 --- a/crates/node-builder/src/launch.rs +++ b/crates/node-builder/src/launch/mod.rs @@ -7,9 +7,7 @@ use crate::{ node::FullNode, BuilderContext, NodeBuilderWithComponents, NodeHandle, RethFullAdapter, }; -use eyre::Context; use futures::{future, future::Either, stream, stream_select, StreamExt}; -use rayon::ThreadPoolBuilder; use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, @@ -29,32 +27,35 @@ use reth_interfaces::p2p::either::EitherDownloader; use reth_network::NetworkEvents; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_core::{ - cli::config::RethRpcConfig, dirs::{ChainPath, DataDirPath}, engine_api_store::EngineApiStore, engine_skip_fcu::EngineApiSkipFcu, exit::NodeExitFuture, init::init_genesis, - node_config::NodeConfig, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::format_ether; -use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory}; +use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; use reth_prune::PrunerBuilder; use reth_revm::EvmProcessorFactory; use reth_rpc_engine_api::EngineApi; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{debug, error, info}; +use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; -use std::{cmp::max, future::Future, sync::Arc, thread::available_parallelism}; +use std::{future::Future, sync::Arc}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; -/// Launches a new node. +pub mod common; +pub use common::LaunchContext; + +/// A general purpose trait that launches a new node of any kind. /// /// Acts as a node factory. /// /// This is essentially the launch logic for a node. +/// +/// See also [DefaultNodeLauncher] and [NodeBuilderWithComponents::launch_with] pub trait LaunchNode { /// The node type that is created. type Node; @@ -67,37 +68,13 @@ pub trait LaunchNode { #[derive(Debug)] pub struct DefaultNodeLauncher { /// The task executor for the node. - pub task_executor: TaskExecutor, - /// The data directory for the node. - pub data_dir: ChainPath, + pub ctx: LaunchContext, } impl DefaultNodeLauncher { /// Create a new instance of the default node launcher. pub fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { - Self { task_executor, data_dir } - } - - /// Loads the reth config with the given datadir root - fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { - let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path()); - - let mut toml_config = confy::load_path::(&config_path) - .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; - - info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); - - // Update the config with the command line arguments - toml_config.peers.trusted_nodes_only = config.network.trusted_only; - - if !config.network.trusted_peers.is_empty() { - info!(target: "reth::cli", "Adding trusted nodes"); - config.network.trusted_peers.iter().for_each(|peer| { - toml_config.peers.trusted_nodes.insert(*peer); - }); - } - - Ok(toml_config) + Self { ctx: LaunchContext::new(task_executor, data_dir) } } } @@ -114,6 +91,7 @@ where self, target: NodeBuilderWithComponents, CB>, ) -> eyre::Result { + let Self { ctx } = self; let NodeBuilderWithComponents { adapter: NodeTypesAdapter { types, database }, components_builder, @@ -121,74 +99,53 @@ where config, } = target; - // get config from file - let reth_config = self.load_toml_config(&config)?; - - let Self { task_executor, data_dir } = self; - - // Raise the fd limit of the process. - // Does not do anything on windows. - fdlimit::raise_fd_limit()?; + // configure globals + ctx.configure_globals(); - // Limit the global rayon thread pool, reserving 2 cores for the rest of the system - let _ = ThreadPoolBuilder::new() - .num_threads( - available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), - ) - .build_global() - .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); + let mut ctx = ctx + // load the toml config + .with_loaded_toml_config(config)? + // attach the database + .attach(database.clone()) + // Create the provider factory + .with_provider_factory()?; - let provider_factory = ProviderFactory::new( - database.clone(), - Arc::clone(&config.chain), - data_dir.static_files_path(), - )? - .with_static_files_metrics(); info!(target: "reth::cli", "Database opened"); - let prometheus_handle = config.install_prometheus_recorder()?; - config - .start_metrics_endpoint( - prometheus_handle, - database.clone(), - provider_factory.static_file_provider(), - task_executor.clone(), - ) - .await?; + ctx.start_prometheus_endpoint().await?; - debug!(target: "reth::cli", chain=%config.chain.chain, -genesis=?config.chain.genesis_hash(), "Initializing genesis"); + debug!(target: "reth::cli", chain=%ctx.chain_id(), genesis=?ctx.genesis_hash(), "Initializing genesis"); - let genesis_hash = init_genesis(provider_factory.clone())?; + init_genesis(ctx.provider_factory().clone())?; - info!(target: "reth::cli", "\n{}", config.chain.display_hardforks()); + info!(target: "reth::cli", "\n{}", ctx.chain_spec().display_hardforks()); // setup the consensus instance - let consensus: Arc = if config.dev.dev { - Arc::new(AutoSealConsensus::new(Arc::clone(&config.chain))) + let consensus: Arc = if ctx.is_dev() { + Arc::new(AutoSealConsensus::new(ctx.chain_spec())) } else { - Arc::new(BeaconConsensus::new(Arc::clone(&config.chain))) + Arc::new(BeaconConsensus::new(ctx.chain_spec())) }; debug!(target: "reth::cli", "Spawning stages metrics listener task"); let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); - task_executor.spawn_critical("stages metrics listener task", sync_metrics_listener); + ctx.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener); - let prune_config = config.prune_config()?.or_else(|| reth_config.prune.clone()); + let prune_config = ctx.prune_config()?; // Configure the blockchain tree for the node let evm_config = types.evm_config(); let tree_config = BlockchainTreeConfig::default(); let tree_externals = TreeExternals::new( - provider_factory.clone(), + ctx.provider_factory().clone(), consensus.clone(), - EvmProcessorFactory::new(config.chain.clone(), evm_config.clone()), + EvmProcessorFactory::new(ctx.chain_spec(), evm_config.clone()), ); let tree = BlockchainTree::new( tree_externals, tree_config, - prune_config.as_ref().map(|config| config.segments.clone()), + prune_config.as_ref().map(|prune| prune.segments.clone()), )? .with_sync_metrics_tx(sync_metrics_tx.clone()); @@ -197,40 +154,30 @@ genesis=?config.chain.genesis_hash(), "Initializing genesis"); debug!(target: "reth::cli", "configured blockchain tree"); // fetch the head block from the database - let head = - config.lookup_head(provider_factory.clone()).wrap_err("the head block is missing")?; + let head = ctx.lookup_head()?; // setup the blockchain provider let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; + BlockchainProvider::new(ctx.provider_factory().clone(), blockchain_tree.clone())?; - let ctx = BuilderContext::new( + let builder_ctx = BuilderContext::new( head, - blockchain_db, - task_executor, - data_dir, - config, - reth_config, + blockchain_db.clone(), + ctx.task_executor().clone(), + ctx.data_dir().clone(), + ctx.node_config().clone(), + ctx.toml_config().clone(), evm_config.clone(), ); debug!(target: "reth::cli", "creating components"); - let components = components_builder.build_components(&ctx).await?; - - let BuilderContext { - provider: blockchain_db, - executor, - data_dir, - mut config, - mut reth_config, - .. - } = ctx; + let components = components_builder.build_components(&builder_ctx).await?; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; let node_adapter = NodeAdapter { components, - task_executor: executor.clone(), + task_executor: ctx.task_executor().clone(), provider: blockchain_db.clone(), evm: evm_config.clone(), }; @@ -250,16 +197,16 @@ genesis=?config.chain.genesis_hash(), "Initializing genesis"); let context = ExExContext { head, provider: blockchain_db.clone(), - task_executor: executor.clone(), - data_dir: data_dir.clone(), - config: config.clone(), - reth_config: reth_config.clone(), + task_executor: ctx.task_executor().clone(), + data_dir: ctx.data_dir().clone(), + config: ctx.node_config().clone(), + reth_config: ctx.toml_config().clone(), pool: node_adapter.components.pool().clone(), events, notifications, }; - let executor = executor.clone(); + let executor = ctx.task_executor().clone(); exexs.push(async move { debug!(target: "reth::cli", id, "spawning exex"); let span = reth_tracing::tracing::info_span!("exex", id); @@ -287,21 +234,24 @@ genesis=?config.chain.genesis_hash(), "Initializing genesis"); // todo(onbjerg): rm magic number let exex_manager = ExExManager::new(exex_handles, 1024); let exex_manager_handle = exex_manager.handle(); - executor.spawn_critical("exex manager", async move { + ctx.task_executor().spawn_critical("exex manager", async move { exex_manager.await.expect("exex manager crashed"); }); // send notifications from the blockchain tree to exex manager let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); let mut handle = exex_manager_handle.clone(); - executor.spawn_critical("exex manager blockchain tree notifications", async move { - while let Ok(notification) = canon_state_notifications.recv().await { - handle.send_async(notification.into()).await.expect( - "blockchain tree notification could not be sent to exex + ctx.task_executor().spawn_critical( + "exex manager blockchain tree notifications", + async move { + while let Ok(notification) = canon_state_notifications.recv().await { + handle.send_async(notification.into()).await.expect( + "blockchain tree notification could not be sent to exex manager", - ); - } - }); + ); + } + }, + ); info!(target: "reth::cli", "ExEx Manager started"); @@ -314,52 +264,59 @@ manager", let network_client = node_adapter.network().fetch_client().await?; let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); - if let Some(skip_fcu_threshold) = config.debug.skip_fcu { + if let Some(skip_fcu_threshold) = ctx.node_config().debug.skip_fcu { debug!(target: "reth::cli", "spawning skip FCU task"); let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); - executor.spawn_critical( + ctx.task_executor().spawn_critical( "skip FCU interceptor", engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), ); consensus_engine_rx = skip_fcu_rx; } - if let Some(store_path) = config.debug.engine_api_store.clone() { + if let Some(store_path) = ctx.node_config().debug.engine_api_store.clone() { debug!(target: "reth::cli", "spawning engine API store"); let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); let engine_api_store = EngineApiStore::new(store_path); - executor.spawn_critical( + ctx.task_executor().spawn_critical( "engine api interceptor", engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx), ); consensus_engine_rx = engine_intercept_rx; }; - let max_block = config.max_block(network_client.clone(), provider_factory.clone()).await?; + let max_block = ctx + .node_config() + .max_block(network_client.clone(), ctx.provider_factory().clone()) + .await?; let mut hooks = EngineHooks::new(); let static_file_producer = StaticFileProducer::new( - provider_factory.clone(), - provider_factory.static_file_provider(), + ctx.provider_factory().clone(), + ctx.static_file_provider(), prune_config.clone().unwrap_or_default().segments, ); let static_file_producer_events = static_file_producer.lock().events(); - hooks.add(StaticFileHook::new(static_file_producer.clone(), Box::new(executor.clone()))); + hooks.add(StaticFileHook::new( + static_file_producer.clone(), + Box::new(ctx.task_executor().clone()), + )); info!(target: "reth::cli", "StaticFileProducer initialized"); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to - if reth_config.stages.etl.dir.is_none() { - reth_config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + if ctx.toml_config_mut().stages.etl.dir.is_none() { + ctx.toml_config_mut().stages.etl.dir = + Some(EtlConfig::from_datadir(&ctx.data_dir().data_dir_path())); } // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - let (mut pipeline, client) = if config.dev.dev { + let (mut pipeline, client) = if ctx.is_dev() { info!(target: "reth::cli", "Starting Reth in dev mode"); - for (idx, (address, alloc)) in config.chain.genesis.alloc.iter().enumerate() { + for (idx, (address, alloc)) in ctx.chain_spec().genesis.alloc.iter().enumerate() { info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); } @@ -368,9 +325,9 @@ address.to_string(), format_ether(alloc.balance)); let pending_transactions_listener = node_adapter.components.pool().pending_transactions_listener(); - let mining_mode = if let Some(interval) = config.dev.block_time { + let mining_mode = if let Some(interval) = ctx.node_config().dev.block_time { MiningMode::interval(interval) - } else if let Some(max_transactions) = config.dev.block_max_transactions { + } else if let Some(max_transactions) = ctx.node_config().dev.block_max_transactions { MiningMode::instant(max_transactions, pending_transactions_listener) } else { info!(target: "reth::cli", "No mining mode specified, defaulting to @@ -379,7 +336,7 @@ ReadyTransaction"); }; let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( - Arc::clone(&config.chain), + ctx.chain_spec(), blockchain_db.clone(), node_adapter.components.pool().clone(), consensus_engine_tx.clone(), @@ -390,12 +347,12 @@ ReadyTransaction"); .build(); let mut pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, + ctx.node_config(), + &ctx.toml_config().stages, client.clone(), Arc::clone(&consensus), - provider_factory.clone(), - &executor, + ctx.provider_factory().clone(), + ctx.task_executor(), sync_metrics_tx, prune_config.clone(), max_block, @@ -408,17 +365,17 @@ ReadyTransaction"); let pipeline_events = pipeline.events(); task.set_pipeline_events(pipeline_events); debug!(target: "reth::cli", "Spawning auto mine task"); - executor.spawn(Box::pin(task)); + ctx.task_executor().spawn(Box::pin(task)); (pipeline, EitherDownloader::Left(client)) } else { let pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, + ctx.node_config(), + &ctx.toml_config().stages, network_client.clone(), Arc::clone(&consensus), - provider_factory.clone(), - &executor, + ctx.provider_factory().clone(), + ctx.task_executor(), sync_metrics_tx, prune_config.clone(), max_block, @@ -433,22 +390,22 @@ ReadyTransaction"); let pipeline_events = pipeline.events(); - let initial_target = config.initial_pipeline_target(genesis_hash); + let initial_target = ctx.initial_pipeline_target(); let prune_config = prune_config.unwrap_or_default(); let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) .max_reorg_depth(tree_config.max_reorg_depth() as usize) - .prune_delete_limit(config.chain.prune_delete_limit) + .prune_delete_limit(ctx.chain_spec().prune_delete_limit) .timeout(PrunerBuilder::DEFAULT_TIMEOUT); if let Some(exex_manager_handle) = &exex_manager_handle { pruner_builder = pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } - let mut pruner = pruner_builder.build(provider_factory.clone()); + let mut pruner = pruner_builder.build(ctx.provider_factory().clone()); let pruner_events = pruner.events(); - hooks.add(PruneHook::new(pruner, Box::new(executor.clone()))); + hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor().clone()))); info!(target: "reth::cli", ?prune_config, "Pruner initialized"); // Configure the consensus engine @@ -456,10 +413,10 @@ ReadyTransaction"); client, pipeline, blockchain_db.clone(), - Box::new(executor.clone()), + Box::new(ctx.task_executor().clone()), Box::new(node_adapter.components.network().clone()), max_block, - config.debug.continuous, + ctx.node_config().debug.continuous, node_adapter.components.payload_builder().clone(), initial_target, reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, @@ -473,7 +430,7 @@ ReadyTransaction"); node_adapter.components.network().event_listener().map(Into::into), beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), - if config.debug.tip.is_none() && !config.dev.dev { + if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) .map(Into::into), @@ -484,7 +441,7 @@ ReadyTransaction"); pruner_events.map(Into::into), static_file_producer_events.map(Into::into) ); - executor.spawn_critical( + ctx.task_executor().spawn_critical( "events task", node::handle_events( Some(node_adapter.components.network().clone()), @@ -496,39 +453,38 @@ ReadyTransaction"); let engine_api = EngineApi::new( blockchain_db.clone(), - config.chain.clone(), + ctx.chain_spec(), beacon_engine_handle, node_adapter.components.payload_builder().clone().into(), - Box::new(executor.clone()), + Box::new(ctx.task_executor().clone()), ); info!(target: "reth::cli", "Engine API handler initialized"); // extract the jwt secret from the args if possible - let default_jwt_path = data_dir.jwt_path(); - let jwt_secret = config.rpc.auth_jwt_secret(default_jwt_path)?; + let jwt_secret = ctx.auth_jwt_secret()?; // adjust rpc port numbers based on instance number - config.adjust_instance_ports(); + ctx.node_config_mut().adjust_instance_ports(); // Start RPC servers let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( node_adapter.clone(), engine_api, - &config, + ctx.node_config(), jwt_secret, rpc, ) .await?; // in dev mode we generate 20 random dev-signer accounts - if config.dev.dev { + if ctx.is_dev() { rpc_registry.eth_api().with_dev_accounts(); } // Run consensus engine to completion let (tx, rx) = oneshot::channel(); info!(target: "reth::cli", "Starting consensus engine"); - executor.spawn_critical_blocking("consensus engine", async move { + ctx.task_executor().spawn_critical_blocking("consensus engine", async move { let res = beacon_consensus_engine.await; let _ = tx.send(res); }); @@ -539,11 +495,11 @@ ReadyTransaction"); network: node_adapter.components.network().clone(), provider: node_adapter.provider.clone(), payload_builder: node_adapter.components.payload_builder().clone(), - task_executor: executor, + task_executor: ctx.task_executor().clone(), rpc_server_handles, rpc_registry, - config, - data_dir, + config: ctx.node_config().clone(), + data_dir: ctx.data_dir().clone(), }; // Notify on node started on_node_started.on_event(full_node.clone())?; From 9567b256c8cb4a8cd439bd7e6e7b1be3b032efde Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Thu, 25 Apr 2024 23:51:31 +0800 Subject: [PATCH 334/700] feat: support max_request_body_size (#7880) --- crates/rpc/ipc/src/server/ipc.rs | 22 ++++++--- crates/rpc/ipc/src/server/mod.rs | 80 +++++++++++++++++++++++++++++++- 2 files changed, 93 insertions(+), 9 deletions(-) diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index 1fd600c033772..daf7d1dc0e626 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -11,7 +11,10 @@ use jsonrpsee::{ JsonRawValue, }, server::middleware::rpc::RpcServiceT, - types::{error::ErrorCode, ErrorObject, Id, InvalidRequest, Notification, Request}, + types::{ + error::{reject_too_big_request, ErrorCode}, + ErrorObject, Id, InvalidRequest, Notification, Request, + }, BatchResponseBuilder, MethodResponse, ResponsePayload, }; use tokio::sync::OwnedSemaphorePermit; @@ -124,6 +127,7 @@ pub(crate) async fn call_with_service( request: String, rpc_service: S, max_response_body_size: usize, + max_request_body_size: usize, conn: Arc, ) -> Option where @@ -143,9 +147,17 @@ where }) .unwrap_or(Kind::Single); + let data = request.into_bytes(); + if data.len() > max_request_body_size { + return Some(batch_response_error( + Id::Null, + reject_too_big_request(max_request_body_size as u32), + )); + } + // Single request or notification let res = if matches!(request_kind, Kind::Single) { - let response = process_single_request(request.into_bytes(), &rpc_service).await; + let response = process_single_request(data, &rpc_service).await; match response { Some(response) if response.is_method_call() => Some(response.to_result()), _ => { @@ -155,11 +167,7 @@ where } } } else { - process_batch_request( - Batch { data: request.into_bytes(), rpc_service }, - max_response_body_size, - ) - .await + process_batch_request(Batch { data, rpc_service }, max_response_body_size).await }; drop(conn); diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 7afb6bb7d158e..5301c7d2198b2 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -378,6 +378,7 @@ where }; let max_response_body_size = self.inner.max_response_body_size as usize; + let max_request_body_size = self.inner.max_request_body_size as usize; let rpc_service = self.rpc_middleware.service(RpcService::new( self.inner.methods.clone(), max_response_body_size, @@ -392,7 +393,14 @@ where // work to a separate task takes the pressure off the connection so all concurrent responses // are also serialized concurrently and the connection can focus on read+write let f = tokio::task::spawn(async move { - ipc::call_with_service(request, rpc_service, max_response_body_size, conn).await + ipc::call_with_service( + request, + rpc_service, + max_response_body_size, + max_request_body_size, + conn, + ) + .await }); Box::pin(async move { f.await.map_err(|err| err.into()) }) @@ -780,7 +788,11 @@ mod tests { use crate::client::IpcClientBuilder; use futures::future::{select, Either}; use jsonrpsee::{ - core::client::{ClientT, Subscription, SubscriptionClientT}, + core::{ + client, + client::{ClientT, Error, Subscription, SubscriptionClientT}, + params::BatchRequestBuilder, + }, rpc_params, types::Request, PendingSubscriptionSink, RpcModule, SubscriptionMessage, @@ -834,6 +846,46 @@ mod tests { } } + #[tokio::test] + async fn can_set_the_max_response_body_size() { + let endpoint = dummy_endpoint(); + let server = Builder::default().max_response_body_size(100).build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "a".repeat(101)).unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let response: Result = client.request("anything", rpc_params![]).await; + assert!(response.unwrap_err().to_string().contains("Exceeded max limit of")); + } + + #[tokio::test] + async fn can_set_the_max_request_body_size() { + let endpoint = dummy_endpoint(); + let server = Builder::default().max_request_body_size(100).build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "succeed").unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let response: Result = + client.request("anything", rpc_params!["a".repeat(101)]).await; + assert!(response.is_err()); + let mut batch_request_builder = BatchRequestBuilder::new(); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + // the raw request string is: + // [{"jsonrpc":"2.0","id":0,"method":"anything"},{"jsonrpc":"2.0","id":1, \ + // "method":"anything"},{"jsonrpc":"2.0","id":2,"method":"anything"}]" + // which is 136 bytes, more than 100 bytes. + let response: Result, Error> = + client.batch_request(batch_request_builder).await; + assert!(response.is_err()); + } + #[tokio::test] async fn test_rpc_request() { let endpoint = dummy_endpoint(); @@ -849,6 +901,30 @@ mod tests { assert_eq!(response, msg); } + #[tokio::test] + async fn test_batch_request() { + let endpoint = dummy_endpoint(); + let server = Builder::default().build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "ok").unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let mut batch_request_builder = BatchRequestBuilder::new(); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let result = client + .batch_request(batch_request_builder) + .await + .unwrap() + .into_ok() + .unwrap() + .collect::>(); + assert_eq!(result, vec!["ok", "ok", "ok"]); + } + #[tokio::test] async fn test_ipc_modules() { reth_tracing::init_test_tracing(); From 6d1aab53806903c3707d9843ba1a433c1cf61f5d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 25 Apr 2024 18:44:02 +0200 Subject: [PATCH 335/700] refactor: minor `Signature` refactors (#7888) --- crates/primitives/src/transaction/signature.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 84ae2915f621e..8cd57dc7f8bff 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -33,7 +33,7 @@ impl Signature { /// signature. #[cfg(feature = "optimism")] pub const fn optimism_deposit_tx_signature() -> Self { - Signature { r: U256::ZERO, s: U256::ZERO, odd_y_parity: false } + Self { r: U256::ZERO, s: U256::ZERO, odd_y_parity: false } } } @@ -52,7 +52,7 @@ impl Compact for Signature { let r = U256::from_le_slice(&buf[0..32]); let s = U256::from_le_slice(&buf[32..64]); buf.advance(64); - (Signature { r, s, odd_y_parity: identifier != 0 }, buf) + (Self { r, s, odd_y_parity: identifier != 0 }, buf) } } @@ -112,17 +112,17 @@ impl Signature { // // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock if v == 0 && r.is_zero() && s.is_zero() { - return Ok((Signature { r, s, odd_y_parity: false }, None)) + return Ok((Self { r, s, odd_y_parity: false }, None)) } return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) } let odd_y_parity = v == 28; - Ok((Signature { r, s, odd_y_parity }, None)) + Ok((Self { r, s, odd_y_parity }, None)) } else { // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 let odd_y_parity = ((v - 35) % 2) != 0; let chain_id = (v - 35) >> 1; - Ok((Signature { r, s, odd_y_parity }, Some(chain_id))) + Ok((Self { r, s, odd_y_parity }, Some(chain_id))) } } @@ -140,7 +140,7 @@ impl Signature { /// Decodes the `odd_y_parity`, `r`, `s` values without a RLP header. pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Signature { + Ok(Self { odd_y_parity: Decodable::decode(buf)?, r: Decodable::decode(buf)?, s: Decodable::decode(buf)?, From 57e3f40dda4411074264bbcd1d84d4a4f7799ca3 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 25 Apr 2024 18:45:11 +0200 Subject: [PATCH 336/700] chore: unpin cc (#7891) --- Cargo.lock | 5 +++-- crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 343697508a18d..256d5e4b6b95a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1416,12 +1416,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] diff --git a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml index cebae37b33fb0..fbdad4c510729 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml +++ b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml @@ -15,6 +15,5 @@ name = "reth_mdbx_sys" libc = "0.2" [build-dependencies] -## temp pin -cc = "=1.0.83" +cc = "1.0" bindgen = { version = "0.69", default-features = false, features = ["runtime"] } From d312dbbea4ed10127aecc3507db69b381acc416e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 25 Apr 2024 19:32:12 +0200 Subject: [PATCH 337/700] fix: chain spec for op mainnet (#7883) --- crates/ethereum-forks/src/hardfork.rs | 3 +++ crates/primitives/src/chain/spec.rs | 16 ++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/ethereum-forks/src/hardfork.rs b/crates/ethereum-forks/src/hardfork.rs index 6ccb306973cb5..41d1f13021a09 100644 --- a/crates/ethereum-forks/src/hardfork.rs +++ b/crates/ethereum-forks/src/hardfork.rs @@ -73,6 +73,9 @@ pub enum Hardfork { // Upcoming /// Prague: Prague, + /// Fjord: + #[cfg(feature = "optimism")] + Fjord, } impl Hardfork { diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index fb657b5f93a36..a1ae18ad01ea3 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -270,10 +270,10 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), fork_timestamps: ForkTimestamps::default() - .shanghai(1699981200) - .canyon(1699981200) - .cancun(1707238800) - .ecotone(1707238800), + .shanghai(1704992401) + .canyon(1704992401) + .cancun(1710374401) + .ecotone(1710374401), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -286,12 +286,12 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { (Hardfork::Istanbul, ForkCondition::Block(0)), (Hardfork::MuirGlacier, ForkCondition::Block(0)), (Hardfork::Berlin, ForkCondition::Block(3950000)), - (Hardfork::London, ForkCondition::Block(3950000)), - (Hardfork::ArrowGlacier, ForkCondition::Block(3950000)), - (Hardfork::GrayGlacier, ForkCondition::Block(3950000)), + (Hardfork::London, ForkCondition::Block(105235063)), + (Hardfork::ArrowGlacier, ForkCondition::Block(105235063)), + (Hardfork::GrayGlacier, ForkCondition::Block(105235063)), ( Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(3950000), total_difficulty: U256::from(0) }, + ForkCondition::TTD { fork_block: Some(105235063), total_difficulty: U256::from(0) }, ), (Hardfork::Bedrock, ForkCondition::Block(105235063)), (Hardfork::Regolith, ForkCondition::Timestamp(0)), From 663a7185e6c391109466f62a6fc68205121676a6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 20:18:46 +0200 Subject: [PATCH 338/700] chore: more LaunchContext helpers (#7894) --- crates/consensus/auto-seal/src/mode.rs | 11 +++ crates/node-builder/src/launch/common.rs | 95 +++++++++++++++++++++-- crates/node-builder/src/launch/mod.rs | 75 +++++------------- crates/node-core/src/args/pruning_args.rs | 46 ++++++----- crates/node-core/src/node_config.rs | 4 +- 5 files changed, 143 insertions(+), 88 deletions(-) diff --git a/crates/consensus/auto-seal/src/mode.rs b/crates/consensus/auto-seal/src/mode.rs index 809455311e13f..b124010e62814 100644 --- a/crates/consensus/auto-seal/src/mode.rs +++ b/crates/consensus/auto-seal/src/mode.rs @@ -62,6 +62,17 @@ impl MiningMode { } } +impl fmt::Display for MiningMode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let kind = match self { + MiningMode::None => "None", + MiningMode::Auto(_) => "Auto", + MiningMode::FixedBlockTime(_) => "FixedBlockTime", + }; + write!(f, "{kind}") + } +} + /// A miner that's supposed to create a new block every `interval`, mining all transactions that are /// ready at that time. /// diff --git a/crates/node-builder/src/launch/common.rs b/crates/node-builder/src/launch/common.rs index 765673bf07e79..f4d2a931c4223 100644 --- a/crates/node-builder/src/launch/common.rs +++ b/crates/node-builder/src/launch/common.rs @@ -1,20 +1,28 @@ //! Helper types that can be used by launchers. +use std::{cmp::max, sync::Arc, thread::available_parallelism}; + use eyre::Context; use rayon::ThreadPoolBuilder; -use reth_config::PruneConfig; +use tokio::sync::mpsc::Receiver; + +use reth_auto_seal_consensus::MiningMode; +use reth_config::{config::EtlConfig, PruneConfig}; use reth_db::{database::Database, database_metrics::DatabaseMetrics}; +use reth_interfaces::p2p::headers::client::HeadersClient; use reth_node_core::{ cli::config::RethRpcConfig, dirs::{ChainPath, DataDirPath}, + init::{init_genesis, InitDatabaseError}, node_config::NodeConfig, }; -use reth_primitives::{Chain, ChainSpec, Head, B256}; +use reth_primitives::{BlockNumber, Chain, ChainSpec, Head, PruneModes, B256}; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_prune::PrunerBuilder; use reth_rpc::JwtSecret; +use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{error, info}; -use std::{cmp::max, sync::Arc, thread::available_parallelism}; /// Reusable setup for launching a node. /// @@ -73,6 +81,12 @@ impl LaunchContext { Ok(toml_config) } + /// Convenience function to [Self::configure_globals] + pub fn with_configured_globals(self) -> Self { + self.configure_globals(); + self + } + /// Configure global settings this includes: /// /// - Raising the file descriptor limit @@ -155,6 +169,31 @@ impl LaunchContextWith> { } } impl LaunchContextWith> { + /// Adjust certain settings in the config to make sure they are set correctly + /// + /// This includes: + /// - Making sure the ETL dir is set to the datadir + /// - RPC settings are adjusted to the correct port + pub fn with_adjusted_configs(self) -> Self { + self.ensure_etl_datadir().with_adjusted_rpc_instance_ports() + } + + /// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + pub fn ensure_etl_datadir(mut self) -> Self { + if self.toml_config_mut().stages.etl.dir.is_none() { + self.toml_config_mut().stages.etl.dir = + Some(EtlConfig::from_datadir(&self.data_dir().data_dir_path())) + } + + self + } + + /// Change rpc port numbers based on the instance number. + pub fn with_adjusted_rpc_instance_ports(mut self) -> Self { + self.node_config_mut().adjust_instance_ports(); + self + } + /// Returns the attached [NodeConfig]. pub const fn node_config(&self) -> &NodeConfig { &self.left().config @@ -196,8 +235,20 @@ impl LaunchContextWith> { } /// Returns the configured [PruneConfig] - pub fn prune_config(&self) -> eyre::Result> { - Ok(self.node_config().prune_config()?.or_else(|| self.toml_config().prune.clone())) + pub fn prune_config(&self) -> Option { + self.node_config().prune_config().or_else(|| self.toml_config().prune.clone()) + } + + /// Returns the configured [PruneModes] + pub fn prune_modes(&self) -> Option { + self.prune_config().map(|config| config.segments) + } + + /// Returns an initialized [PrunerBuilder] based on the configured [PruneConfig] + pub fn pruner_builder(&self) -> PrunerBuilder { + PrunerBuilder::new(self.prune_config().unwrap_or_default()) + .prune_delete_limit(self.chain_spec().prune_delete_limit) + .timeout(PrunerBuilder::DEFAULT_TIMEOUT) } /// Returns the initial pipeline target, based on whether or not the node is running in @@ -216,6 +267,17 @@ impl LaunchContextWith> { let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?; Ok(secret) } + + /// Returns the [MiningMode] intended for --dev mode. + pub fn dev_mining_mode(&self, pending_transactions_listener: Receiver) -> MiningMode { + if let Some(interval) = self.node_config().dev.block_time { + MiningMode::interval(interval) + } else if let Some(max_transactions) = self.node_config().dev.block_max_transactions { + MiningMode::instant(max_transactions, pending_transactions_listener) + } else { + MiningMode::instant(1, pending_transactions_listener) + } + } } impl LaunchContextWith> @@ -267,6 +329,29 @@ where self.right().static_file_provider() } + /// Creates a new [StaticFileProducer] with the attached database. + pub fn static_file_producer(&self) -> StaticFileProducer { + StaticFileProducer::new( + self.provider_factory().clone(), + self.static_file_provider(), + self.prune_modes().unwrap_or_default(), + ) + } + + /// Write the genesis block and state if it has not already been written + pub fn init_genesis(&self) -> Result { + init_genesis(self.provider_factory().clone()) + } + + /// Returns the max block that the node should run to, looking it up from the network if + /// necessary + pub async fn max_block(&self, client: C) -> eyre::Result> + where + C: HeadersClient, + { + self.node_config().max_block(client, self.provider_factory().clone()).await + } + /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { let prometheus_handle = self.node_config().install_prometheus_recorder()?; diff --git a/crates/node-builder/src/launch/mod.rs b/crates/node-builder/src/launch/mod.rs index 6181e0c98120e..00304816cf18d 100644 --- a/crates/node-builder/src/launch/mod.rs +++ b/crates/node-builder/src/launch/mod.rs @@ -8,7 +8,7 @@ use crate::{ BuilderContext, NodeBuilderWithComponents, NodeHandle, RethFullAdapter, }; use futures::{future, future::Either, stream, stream_select, StreamExt}; -use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; +use reth_auto_seal_consensus::AutoSealConsensus; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensus, BeaconConsensusEngine, @@ -16,7 +16,6 @@ use reth_beacon_consensus::{ use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; -use reth_config::config::EtlConfig; use reth_consensus::Consensus; use reth_db::{ database::Database, @@ -31,15 +30,12 @@ use reth_node_core::{ engine_api_store::EngineApiStore, engine_skip_fcu::EngineApiSkipFcu, exit::NodeExitFuture, - init::init_genesis, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::format_ether; use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; -use reth_prune::PrunerBuilder; use reth_revm::EvmProcessorFactory; use reth_rpc_engine_api::EngineApi; -use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; @@ -99,14 +95,14 @@ where config, } = target; - // configure globals - ctx.configure_globals(); - - let mut ctx = ctx + let ctx = ctx + .with_configured_globals() // load the toml config .with_loaded_toml_config(config)? // attach the database .attach(database.clone()) + // ensure certain settings take effect + .with_adjusted_configs() // Create the provider factory .with_provider_factory()?; @@ -115,8 +111,7 @@ where ctx.start_prometheus_endpoint().await?; debug!(target: "reth::cli", chain=%ctx.chain_id(), genesis=?ctx.genesis_hash(), "Initializing genesis"); - - init_genesis(ctx.provider_factory().clone())?; + ctx.init_genesis()?; info!(target: "reth::cli", "\n{}", ctx.chain_spec().display_hardforks()); @@ -132,8 +127,6 @@ where let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); ctx.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener); - let prune_config = ctx.prune_config()?; - // Configure the blockchain tree for the node let evm_config = types.evm_config(); let tree_config = BlockchainTreeConfig::default(); @@ -142,12 +135,8 @@ where consensus.clone(), EvmProcessorFactory::new(ctx.chain_spec(), evm_config.clone()), ); - let tree = BlockchainTree::new( - tree_externals, - tree_config, - prune_config.as_ref().map(|prune| prune.segments.clone()), - )? - .with_sync_metrics_tx(sync_metrics_tx.clone()); + let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? + .with_sync_metrics_tx(sync_metrics_tx.clone()); let canon_state_notification_sender = tree.canon_state_notification_sender(); let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); @@ -286,17 +275,10 @@ manager", consensus_engine_rx = engine_intercept_rx; }; - let max_block = ctx - .node_config() - .max_block(network_client.clone(), ctx.provider_factory().clone()) - .await?; + let max_block = ctx.max_block(network_client.clone()).await?; let mut hooks = EngineHooks::new(); - let static_file_producer = StaticFileProducer::new( - ctx.provider_factory().clone(), - ctx.static_file_provider(), - prune_config.clone().unwrap_or_default().segments, - ); + let static_file_producer = ctx.static_file_producer(); let static_file_producer_events = static_file_producer.lock().events(); hooks.add(StaticFileHook::new( static_file_producer.clone(), @@ -304,12 +286,6 @@ manager", )); info!(target: "reth::cli", "StaticFileProducer initialized"); - // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to - if ctx.toml_config_mut().stages.etl.dir.is_none() { - ctx.toml_config_mut().stages.etl.dir = - Some(EtlConfig::from_datadir(&ctx.data_dir().data_dir_path())); - } - // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); @@ -322,18 +298,9 @@ address.to_string(), format_ether(alloc.balance)); } // install auto-seal - let pending_transactions_listener = - node_adapter.components.pool().pending_transactions_listener(); - - let mining_mode = if let Some(interval) = ctx.node_config().dev.block_time { - MiningMode::interval(interval) - } else if let Some(max_transactions) = ctx.node_config().dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) - } else { - info!(target: "reth::cli", "No mining mode specified, defaulting to -ReadyTransaction"); - MiningMode::instant(1, pending_transactions_listener) - }; + let mining_mode = + ctx.dev_mining_mode(node_adapter.components.pool().pending_transactions_listener()); + info!(target: "reth::cli", mode=%mining_mode, "configuring dev mining mode"); let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( ctx.chain_spec(), @@ -354,7 +321,7 @@ ReadyTransaction"); ctx.provider_factory().clone(), ctx.task_executor(), sync_metrics_tx, - prune_config.clone(), + ctx.prune_config(), max_block, static_file_producer, evm_config, @@ -377,7 +344,7 @@ ReadyTransaction"); ctx.provider_factory().clone(), ctx.task_executor(), sync_metrics_tx, - prune_config.clone(), + ctx.prune_config(), max_block, static_file_producer, evm_config, @@ -392,11 +359,8 @@ ReadyTransaction"); let initial_target = ctx.initial_pipeline_target(); - let prune_config = prune_config.unwrap_or_default(); - let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) - .max_reorg_depth(tree_config.max_reorg_depth() as usize) - .prune_delete_limit(ctx.chain_spec().prune_delete_limit) - .timeout(PrunerBuilder::DEFAULT_TIMEOUT); + let mut pruner_builder = + ctx.pruner_builder().max_reorg_depth(tree_config.max_reorg_depth() as usize); if let Some(exex_manager_handle) = &exex_manager_handle { pruner_builder = pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); @@ -405,8 +369,8 @@ ReadyTransaction"); let mut pruner = pruner_builder.build(ctx.provider_factory().clone()); let pruner_events = pruner.events(); + info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor().clone()))); - info!(target: "reth::cli", ?prune_config, "Pruner initialized"); // Configure the consensus engine let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( @@ -463,9 +427,6 @@ ReadyTransaction"); // extract the jwt secret from the args if possible let jwt_secret = ctx.auth_jwt_secret()?; - // adjust rpc port numbers based on instance number - ctx.node_config_mut().adjust_instance_ports(); - // Start RPC servers let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( node_adapter.clone(), diff --git a/crates/node-core/src/args/pruning_args.rs b/crates/node-core/src/args/pruning_args.rs index 52605338ee95d..4adc721586ba5 100644 --- a/crates/node-core/src/args/pruning_args.rs +++ b/crates/node-core/src/args/pruning_args.rs @@ -5,7 +5,6 @@ use reth_config::config::PruneConfig; use reth_primitives::{ ChainSpec, PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; -use std::sync::Arc; /// Parameters for pruning and full node #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] @@ -19,31 +18,30 @@ pub struct PruningArgs { impl PruningArgs { /// Returns pruning configuration. - pub fn prune_config(&self, chain_spec: Arc) -> eyre::Result> { - Ok(if self.full { - Some(PruneConfig { - block_interval: 5, - segments: PruneModes { - sender_recovery: Some(PruneMode::Full), - transaction_lookup: None, - receipts: chain_spec + pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option { + if !self.full { + return None; + } + Some(PruneConfig { + block_interval: 5, + segments: PruneModes { + sender_recovery: Some(PruneMode::Full), + transaction_lookup: None, + receipts: chain_spec + .deposit_contract + .as_ref() + .map(|contract| PruneMode::Before(contract.block)), + account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + receipts_log_filter: ReceiptsLogPruneConfig( + chain_spec .deposit_contract .as_ref() - .map(|contract| PruneMode::Before(contract.block)), - account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - receipts_log_filter: ReceiptsLogPruneConfig( - chain_spec - .deposit_contract - .as_ref() - .map(|contract| (contract.address, PruneMode::Before(contract.block))) - .into_iter() - .collect(), - ), - }, - }) - } else { - None + .map(|contract| (contract.address, PruneMode::Before(contract.block))) + .into_iter() + .collect(), + ), + }, }) } } diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 608f12cad66f3..c25395e07246e 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -262,8 +262,8 @@ impl NodeConfig { } /// Returns pruning configuration. - pub fn prune_config(&self) -> eyre::Result> { - self.pruning.prune_config(Arc::clone(&self.chain)) + pub fn prune_config(&self) -> Option { + self.pruning.prune_config(&self.chain) } /// Returns the max block that the node should run to, looking it up from the network if From 16ae640615d603514e22565bc217345871569996 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 20:29:06 +0200 Subject: [PATCH 339/700] chore: decrease local pending block expiration time (#7896) --- crates/rpc/rpc/src/eth/api/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index c23dfe1acd0c5..6c936808e9964 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -355,7 +355,7 @@ where let now = Instant::now(); *lock = Some(PendingBlock { block: pending_block.clone(), - expires_at: now + Duration::from_secs(3), + expires_at: now + Duration::from_secs(1), }); Ok(Some(pending_block)) From 062b3d76b94ce54ef56c2c421a3e11340365e20f Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Thu, 25 Apr 2024 20:36:09 +0200 Subject: [PATCH 340/700] refactor: move network-txpool.rs example to its own folder (#7892) Co-authored-by: Oliver Nordbjerg --- Cargo.lock | 11 +++++++++++ Cargo.toml | 1 + examples/Cargo.toml | 7 +------ examples/README.md | 6 +++--- examples/network-txpool/Cargo.toml | 13 +++++++++++++ .../src/main.rs} | 2 +- 6 files changed, 30 insertions(+), 10 deletions(-) create mode 100644 examples/network-txpool/Cargo.toml rename examples/{network-txpool.rs => network-txpool/src/main.rs} (98%) diff --git a/Cargo.lock b/Cargo.lock index 256d5e4b6b95a..0df1d9fd35310 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4864,6 +4864,17 @@ dependencies = [ "unsigned-varint 0.7.2", ] +[[package]] +name = "network-txpool" +version = "0.0.0" +dependencies = [ + "eyre", + "reth-network", + "reth-provider", + "reth-transaction-pool", + "tokio", +] + [[package]] name = "nibble_vec" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 7b9b63e956ab4..04e26fc4418a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,6 +80,7 @@ members = [ "examples/custom-dev-node/", "examples/custom-payload-builder/", "examples/manual-p2p/", + "examples/network-txpool/", "examples/rpc-db/", "examples/txpool-tracing/", "examples/polygon-p2p/", diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 82b6be45ad2e1..2379e9a0fe5b0 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -31,9 +31,4 @@ path = "db-access.rs" [[example]] name = "network" -path = "network.rs" - -[[example]] -name = "network-txpool" -path = "network-txpool.rs" - +path = "network.rs" \ No newline at end of file diff --git a/examples/README.md b/examples/README.md index db0bdb999bd5a..dcec15d358dcc 100644 --- a/examples/README.md +++ b/examples/README.md @@ -48,10 +48,10 @@ to make a PR! ## Mempool -| Example | Description | -| ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | +| Example | Description | +|------------------------------------------------------| -------------------------------------------------------------------------------------------------------------------------- | | [Trace pending transactions](./txpool-tracing) | Illustrates how to trace pending transactions as they arrive in the mempool | -| [Standalone txpool](./network-txpool.rs) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | +| [Standalone txpool](./network-txpool) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | ## P2P diff --git a/examples/network-txpool/Cargo.toml b/examples/network-txpool/Cargo.toml new file mode 100644 index 0000000000000..12544a8f30df4 --- /dev/null +++ b/examples/network-txpool/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "network-txpool" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth-provider = { workspace = true, features = ["test-utils"] } +eyre.workspace = true +tokio.workspace = true +reth-network.workspace = true +reth-transaction-pool.workspace = true diff --git a/examples/network-txpool.rs b/examples/network-txpool/src/main.rs similarity index 98% rename from examples/network-txpool.rs rename to examples/network-txpool/src/main.rs index 0af120a89d963..6f8d69eab0217 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool/src/main.rs @@ -4,7 +4,7 @@ //! Run with //! //! ```not_rust -//! cargo run --example network-txpool +//! cargo run --release -p network-txpool -- node //! ``` use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; From 844bcb86b708fc6bfa369aa2702766232c9e219b Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Thu, 25 Apr 2024 20:48:23 +0200 Subject: [PATCH 341/700] refactor: replace rpc TransactionKind with alloy_primitives::TxKind (#7885) Co-authored-by: Oliver Nordbjerg --- .../rpc-types-compat/src/transaction/typed.rs | 18 +--- .../rpc-types/src/eth/transaction/typed.rs | 90 ++----------------- crates/rpc/rpc-types/src/lib.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 5 +- 4 files changed, 13 insertions(+), 102 deletions(-) diff --git a/crates/rpc/rpc-types-compat/src/transaction/typed.rs b/crates/rpc/rpc-types-compat/src/transaction/typed.rs index 03f502a208a29..b119a0956c4ba 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/typed.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/typed.rs @@ -16,7 +16,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, input: tx.input, }), @@ -25,7 +25,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, input: tx.input, access_list: tx.access_list, @@ -35,7 +35,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, max_fee_per_gas: tx.max_fee_per_gas.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, input: tx.input, access_list: tx.access_list, @@ -47,7 +47,7 @@ pub fn to_primitive_transaction( gas_limit: tx.gas_limit.to(), max_fee_per_gas: tx.max_fee_per_gas.to(), max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, access_list: tx.access_list, blob_versioned_hashes: tx.blob_versioned_hashes, @@ -56,13 +56,3 @@ pub fn to_primitive_transaction( }), }) } - -/// Transforms a [reth_rpc_types::TransactionKind] into a [reth_primitives::TxKind] -pub fn to_primitive_transaction_kind( - kind: reth_rpc_types::TransactionKind, -) -> reth_primitives::TxKind { - match kind { - reth_rpc_types::TransactionKind::Call(to) => reth_primitives::TxKind::Call(to), - reth_rpc_types::TransactionKind::Create => reth_primitives::TxKind::Create, - } -} diff --git a/crates/rpc/rpc-types/src/eth/transaction/typed.rs b/crates/rpc/rpc-types/src/eth/transaction/typed.rs index bf995c3532ed9..6526bc2b6cc10 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/typed.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/typed.rs @@ -2,10 +2,8 @@ //! transaction deserialized from the json input of an RPC call. Depending on what fields are set, //! it can be converted into the container type [`TypedTransactionRequest`]. -use alloy_primitives::{Address, Bytes, B256, U256}; -use alloy_rlp::{Buf, BufMut, Decodable, Encodable, Error as RlpError, EMPTY_STRING_CODE}; +use alloy_primitives::{Bytes, TxKind, B256, U256}; use alloy_rpc_types::{AccessList, BlobTransactionSidecar}; -use serde::{Deserialize, Serialize}; /// Container type for various Ethereum transaction requests /// @@ -36,7 +34,7 @@ pub struct LegacyTransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -57,7 +55,7 @@ pub struct EIP2930TransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -80,7 +78,7 @@ pub struct EIP1559TransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -103,7 +101,7 @@ pub struct EIP4844TransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -117,81 +115,3 @@ pub struct EIP4844TransactionRequest { /// Sidecar information for the transaction pub sidecar: BlobTransactionSidecar, } - -/// Represents the `to` field of a transaction request -/// -/// This determines what kind of transaction this is -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum TransactionKind { - /// Transaction will call this address or transfer funds to this address - Call(Address), - /// No `to` field set, this transaction will create a contract - Create, -} - -// == impl TransactionKind == - -impl TransactionKind { - /// If this transaction is a call this returns the address of the callee - pub fn as_call(&self) -> Option<&Address> { - match self { - TransactionKind::Call(to) => Some(to), - TransactionKind::Create => None, - } - } -} - -impl Encodable for TransactionKind { - /// This encodes the `to` field of a transaction request. - /// If the [TransactionKind] is a [TransactionKind::Call] it will encode the inner address: - /// `rlp(address)` - /// - /// If the [TransactionKind] is a [TransactionKind::Create] it will encode an empty list: - /// `rlp([])`, which is also - fn encode(&self, out: &mut dyn BufMut) { - match self { - TransactionKind::Call(to) => to.encode(out), - TransactionKind::Create => [].encode(out), - } - } - fn length(&self) -> usize { - match self { - TransactionKind::Call(to) => to.length(), - TransactionKind::Create => [].length(), - } - } -} - -impl Decodable for TransactionKind { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - if let Some(&first) = buf.first() { - if first == EMPTY_STRING_CODE { - buf.advance(1); - Ok(TransactionKind::Create) - } else { - let addr =
::decode(buf)?; - Ok(TransactionKind::Call(addr)) - } - } else { - Err(RlpError::InputTooShort) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn raw_kind_encoding_sanity() { - // check the 0x80 encoding for Create - let mut buf = Vec::new(); - TransactionKind::Create.encode(&mut buf); - assert_eq!(buf, vec![0x80]); - - // check decoding - let buf = [0x80]; - let decoded = TransactionKind::decode(&mut &buf[..]).unwrap(); - assert_eq!(decoded, TransactionKind::Create); - } -} diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 68ad11c6ebdfe..0adcab0f3301c 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -38,7 +38,7 @@ pub use eth::{ ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, }, error::ToRpcError, - transaction::{self, TransactionKind, TransactionRequest, TypedTransactionRequest}, + transaction::{self, TransactionRequest, TypedTransactionRequest}, }; pub use mev::*; diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 2188b8d255bbc..1ca8ed1195cf9 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -8,6 +8,7 @@ use crate::{ }, EthApi, EthApiSpec, }; +use alloy_primitives::TxKind as RpcTransactionKind; use async_trait::async_trait; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; @@ -33,8 +34,8 @@ use reth_rpc_types::{ LegacyTransactionRequest, }, AnyReceiptEnvelope, AnyTransactionReceipt, Index, Log, ReceiptWithBloom, Transaction, - TransactionInfo, TransactionKind as RpcTransactionKind, TransactionReceipt, TransactionRequest, - TypedTransactionRequest, WithOtherFields, + TransactionInfo, TransactionReceipt, TransactionRequest, TypedTransactionRequest, + WithOtherFields, }; use reth_rpc_types_compat::transaction::from_recovered_with_block_context; use reth_transaction_pool::{TransactionOrigin, TransactionPool}; From 3ad3bbc593782280c4d34ed41388373b77f7b3bd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 21:23:21 +0200 Subject: [PATCH 342/700] chore: more launch builder style function (#7897) --- crates/node-builder/src/launch/common.rs | 22 ++++++++++++++++++++++ crates/node-builder/src/launch/mod.rs | 23 +++++++++++++---------- 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/crates/node-builder/src/launch/common.rs b/crates/node-builder/src/launch/common.rs index f4d2a931c4223..c57e12cf668b0 100644 --- a/crates/node-builder/src/launch/common.rs +++ b/crates/node-builder/src/launch/common.rs @@ -145,6 +145,16 @@ impl LaunchContextWith { attachment: Attached::new(self.attachment, attachment), } } + + /// Consumes the type and calls a function with a reference to the context. + // Returns the context again + pub fn inspect(self, f: F) -> Self + where + F: FnOnce(&Self), + { + f(&self); + self + } } impl LaunchContextWith> { @@ -338,6 +348,12 @@ where ) } + /// Convenience function to [Self::init_genesis] + pub fn with_genesis(self) -> Result { + init_genesis(self.provider_factory().clone())?; + Ok(self) + } + /// Write the genesis block and state if it has not already been written pub fn init_genesis(&self) -> Result { init_genesis(self.provider_factory().clone()) @@ -352,6 +368,12 @@ where self.node_config().max_block(client, self.provider_factory().clone()).await } + /// Convenience function to [Self::start_prometheus_endpoint] + pub async fn with_prometheus(self) -> eyre::Result { + self.start_prometheus_endpoint().await?; + Ok(self) + } + /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { let prometheus_handle = self.node_config().install_prometheus_recorder()?; diff --git a/crates/node-builder/src/launch/mod.rs b/crates/node-builder/src/launch/mod.rs index 00304816cf18d..408e47cd7a29a 100644 --- a/crates/node-builder/src/launch/mod.rs +++ b/crates/node-builder/src/launch/mod.rs @@ -95,6 +95,7 @@ where config, } = target; + // setup the launch context let ctx = ctx .with_configured_globals() // load the toml config @@ -104,16 +105,18 @@ where // ensure certain settings take effect .with_adjusted_configs() // Create the provider factory - .with_provider_factory()?; - - info!(target: "reth::cli", "Database opened"); - - ctx.start_prometheus_endpoint().await?; - - debug!(target: "reth::cli", chain=%ctx.chain_id(), genesis=?ctx.genesis_hash(), "Initializing genesis"); - ctx.init_genesis()?; - - info!(target: "reth::cli", "\n{}", ctx.chain_spec().display_hardforks()); + .with_provider_factory()? + .inspect(|_| { + info!(target: "reth::cli", "Database opened"); + }) + .with_prometheus().await? + .inspect(|this| { + debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis"); + }) + .with_genesis()? + .inspect(|this| { + info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); + }); // setup the consensus instance let consensus: Arc = if ctx.is_dev() { From e2e5201d8a0599116a0e73a818062bf6958efda2 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 25 Apr 2024 21:50:38 +0200 Subject: [PATCH 343/700] chore(deps): bump enr, discv5, secp256k1 (#7000) Co-authored-by: Emilia Hane --- Cargo.lock | 75 +++---- Cargo.toml | 16 +- .../interfaces/src/test_utils/generators.rs | 14 +- crates/net/discv4/Cargo.toml | 3 +- crates/net/discv4/src/lib.rs | 4 +- crates/net/discv4/src/proto.rs | 183 ++++-------------- crates/net/discv5/Cargo.toml | 3 +- crates/net/discv5/src/enr.rs | 15 +- crates/net/discv5/src/lib.rs | 3 +- crates/net/dns/Cargo.toml | 5 +- crates/net/dns/src/lib.rs | 64 +++++- crates/net/dns/src/tree.rs | 2 +- crates/net/ecies/src/algorithm.rs | 6 +- crates/net/network/Cargo.toml | 6 +- crates/primitives/Cargo.toml | 8 +- crates/primitives/src/genesis.rs | 24 +-- crates/primitives/src/transaction/mod.rs | 4 +- crates/primitives/src/transaction/util.rs | 4 +- 18 files changed, 169 insertions(+), 270 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0df1d9fd35310..901d836cd4c1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2383,11 +2383,13 @@ dependencies = [ [[package]] name = "discv5" -version = "0.4.1" -source = "git+https://github.com/sigp/discv5?rev=04ac004#04ac0042a345a9edf93b090007e5d31c008261ed" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cafb8ed8d460b7d1c8d4c970270d45ecb5e283179a3945143196624c55cda6ac" dependencies = [ "aes 0.7.5", "aes-gcm", + "alloy-rlp", "arrayvec", "delay_map", "enr", @@ -2402,7 +2404,6 @@ dependencies = [ "more-asserts", "parking_lot 0.11.2", "rand 0.8.5", - "rlp", "smallvec", "socket2 0.4.10", "tokio", @@ -2577,10 +2578,11 @@ checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" [[package]] name = "enr" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" +checksum = "4ab656b89cdd15051d92d0931888103508de14ef9e51177c86d478dfa551ce0f" dependencies = [ + "alloy-rlp", "base64 0.21.7", "bytes", "ed25519-dalek", @@ -2588,8 +2590,7 @@ dependencies = [ "k256", "log", "rand 0.8.5", - "rlp", - "secp256k1 0.27.0", + "secp256k1", "serde", "sha3", "zeroize", @@ -4561,7 +4562,7 @@ dependencies = [ "reth-eth-wire", "reth-network", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "tokio", ] @@ -5445,7 +5446,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde_json", "tokio", "tokio-stream", @@ -6288,7 +6289,7 @@ dependencies = [ "reth-net-nat", "reth-network", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "serde", "tempfile", "toml", @@ -6365,8 +6366,7 @@ dependencies = [ "reth-net-nat", "reth-primitives", "reth-tracing", - "rlp", - "secp256k1 0.27.0", + "secp256k1", "serde", "thiserror", "tokio", @@ -6391,8 +6391,7 @@ dependencies = [ "reth-metrics", "reth-primitives", "reth-tracing", - "rlp", - "secp256k1 0.27.0", + "secp256k1", "thiserror", "tokio", "tracing", @@ -6407,11 +6406,12 @@ dependencies = [ "enr", "linked_hash_set", "parking_lot 0.12.1", + "rand 0.8.5", "reth-net-common", "reth-primitives", "reth-tracing", "schnellru", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_with", "thiserror", @@ -6474,7 +6474,7 @@ dependencies = [ "reth-provider", "reth-rpc", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde_json", "tokio", "tokio-stream", @@ -6501,7 +6501,7 @@ dependencies = [ "rand 0.8.5", "reth-net-common", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "sha2 0.10.8", "sha3", "thiserror", @@ -6545,7 +6545,7 @@ dependencies = [ "reth-net-common", "reth-primitives", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde", "snap", "test-fuzz", @@ -6572,7 +6572,7 @@ dependencies = [ "reth-net-common", "reth-primitives", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde", "test-fuzz", "thiserror", @@ -6700,7 +6700,7 @@ dependencies = [ "reth-eth-wire-types", "reth-network-api", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "thiserror", "tokio", "tracing", @@ -6849,7 +6849,7 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "schnellru", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "serial_test", @@ -6995,7 +6995,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "shellexpand", @@ -7189,7 +7189,7 @@ dependencies = [ "revm", "revm-primitives", "roaring", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "serde_with", @@ -7315,7 +7315,7 @@ dependencies = [ "revm-inspectors", "revm-primitives", "schnellru", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "tempfile", @@ -7438,7 +7438,7 @@ dependencies = [ "proptest", "proptest-derive", "rand 0.8.5", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "serde_with", @@ -7718,7 +7718,7 @@ dependencies = [ "once_cell", "revm-primitives", "ripemd", - "secp256k1 0.28.2", + "secp256k1", "sha2 0.10.8", "substrate-bn", ] @@ -8151,17 +8151,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "secp256k1" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" -dependencies = [ - "rand 0.8.5", - "secp256k1-sys 0.8.1", - "serde", -] - [[package]] name = "secp256k1" version = "0.28.2" @@ -8169,16 +8158,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ "rand 0.8.5", - "secp256k1-sys 0.9.2", -] - -[[package]] -name = "secp256k1-sys" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" -dependencies = [ - "cc", + "secp256k1-sys", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 04e26fc4418a0..954fd85d88a31 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -272,13 +272,8 @@ reth-trie-parallel = { path = "crates/trie-parallel" } reth-node-events = { path = "crates/node/events" } # revm -revm = { version = "8.0.0", features = [ - "std", - "secp256k1", -], default-features = false } -revm-primitives = { version = "3.1.0", features = [ - "std", -], default-features = false } +revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } +revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "dc614ee" } # eth @@ -359,7 +354,7 @@ http = "0.2.8" http-body = "0.4.5" # p2p -discv5 = { git = "https://github.com/sigp/discv5", rev = "04ac004" } +discv5 = "0.6.0" igd-next = "0.14.3" # rpc @@ -368,11 +363,12 @@ jsonrpsee-core = "0.22" jsonrpsee-types = "0.22" # crypto -secp256k1 = { version = "0.27.0", default-features = false, features = [ +secp256k1 = { version = "0.28", default-features = false, features = [ "global-context", "recovery", ] } -enr = { version = "=0.10.0", default-features = false, features = ["k256"] } +# TODO: Remove `k256` feature: https://github.com/sigp/enr/pull/74 +enr = { version = "0.12.0", default-features = false, features = ["k256", "rust-secp256k1"] } # for eip-4844 c-kzg = "1.0.0" diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index 0f1930b6005fe..506358276c740 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -6,7 +6,7 @@ use reth_primitives::{ proofs, sign_message, Account, Address, BlockNumber, Bytes, Header, Log, Receipt, SealedBlock, SealedHeader, StorageEntry, Transaction, TransactionSigned, TxKind, TxLegacy, B256, U256, }; -use secp256k1::{KeyPair, Secp256k1}; +use secp256k1::{Keypair, Secp256k1}; use std::{ cmp::{max, min}, collections::{hash_map::DefaultHasher, BTreeMap}, @@ -91,22 +91,22 @@ pub fn random_tx(rng: &mut R) -> Transaction { /// - There is no guarantee that the nonce is not used twice for the same account pub fn random_signed_tx(rng: &mut R) -> TransactionSigned { let secp = Secp256k1::new(); - let key_pair = KeyPair::new(&secp, rng); + let key_pair = Keypair::new(&secp, rng); let tx = random_tx(rng); sign_tx_with_key_pair(key_pair, tx) } /// Signs the [Transaction] with the given key pair. -pub fn sign_tx_with_key_pair(key_pair: KeyPair, tx: Transaction) -> TransactionSigned { +pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionSigned { let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); TransactionSigned::from_transaction_and_signature(tx, signature) } -/// Generates a set of [KeyPair]s based on the desired count. -pub fn generate_keys(rng: &mut R, count: usize) -> Vec { +/// Generates a set of [Keypair]s based on the desired count. +pub fn generate_keys(rng: &mut R, count: usize) -> Vec { let secp = Secp256k1::new(); - (0..count).map(|_| KeyPair::new(&secp, rng)).collect() + (0..count).map(|_| Keypair::new(&secp, rng)).collect() } /// Generate a random block filled with signed transactions (generated using @@ -404,7 +404,7 @@ mod tests { let signature_hash = tx.signature_hash(); for _ in 0..100 { - let key_pair = KeyPair::new(&secp, &mut rand::thread_rng()); + let key_pair = Keypair::new(&secp, &mut rand::thread_rng()); let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), signature_hash) diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index fa0e284ff2c16..9a7cb943d1f01 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -21,8 +21,7 @@ reth-net-nat.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } discv5.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } -rlp = "0.5" # needed for enr +enr.workspace = true # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } tokio-stream.workspace = true diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 48e25c163826a..3ac6bfa8cf871 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -39,7 +39,7 @@ use discv5::{ }; use enr::Enr; use parking_lot::Mutex; -use proto::{EnrRequest, EnrResponse, EnrWrapper}; +use proto::{EnrRequest, EnrResponse}; use reth_primitives::{bytes::Bytes, hex, ForkId, PeerId, B256}; use secp256k1::SecretKey; use std::{ @@ -1279,7 +1279,7 @@ impl Discv4Service { self.send_packet( Message::EnrResponse(EnrResponse { request_hash, - enr: EnrWrapper::new(self.local_eip_868_enr.clone()), + enr: self.local_eip_868_enr.clone(), }), remote_addr, ); diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 8bbb84b62964d..bdca3bfb4de82 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -1,10 +1,8 @@ //! Discovery v4 protocol implementation. use crate::{error::DecodePacketError, EnrForkIdEntry, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; -use alloy_rlp::{ - length_of_length, Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable, -}; -use enr::{Enr, EnrKey}; +use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable}; +use enr::Enr; use reth_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, keccak256, pk2id, ForkId, NodeRecord, B256, @@ -112,8 +110,7 @@ impl Message { // Sign the payload with the secret key using recoverable ECDSA let signature: RecoverableSignature = SECP256K1.sign_ecdsa_recoverable( - &secp256k1::Message::from_slice(keccak256(&payload).as_ref()) - .expect("B256.len() == MESSAGE_SIZE"), + &secp256k1::Message::from_digest(keccak256(&payload).0), secret_key, ); @@ -158,7 +155,7 @@ impl Message { let recoverable_sig = RecoverableSignature::from_compact(signature, recovery_id)?; // recover the public key - let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_slice())?; + let msg = secp256k1::Message::from_digest(keccak256(&packet[97..]).0); let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?; let node_id = pk2id(&pk); @@ -234,85 +231,6 @@ pub struct Neighbours { pub expire: u64, } -/// Passthrough newtype to [`Enr`]. -/// -/// We need to wrap the ENR type because of Rust's orphan rules not allowing -/// implementing a foreign trait on a foreign type. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct EnrWrapper(Enr); - -impl EnrWrapper { - /// Creates a new instance of [`EnrWrapper`]. - pub fn new(enr: Enr) -> Self { - EnrWrapper(enr) - } -} - -impl Encodable for EnrWrapper -where - K: EnrKey, -{ - fn encode(&self, out: &mut dyn BufMut) { - let payload_length = self.0.signature().length() + - self.0.seq().length() + - self.0.iter().fold(0, |acc, (k, v)| acc + k.as_slice().length() + v.len()); - - let header = Header { list: true, payload_length }; - header.encode(out); - - self.0.signature().encode(out); - self.0.seq().encode(out); - - for (k, v) in self.0.iter() { - // Keys are byte data - k.as_slice().encode(out); - // Values are raw RLP encoded data - out.put_slice(v); - } - } - - fn length(&self) -> usize { - let payload_length = self.0.signature().length() + - self.0.seq().length() + - self.0.iter().fold(0, |acc, (k, v)| acc + k.as_slice().length() + v.len()); - payload_length + length_of_length(payload_length) - } -} - -fn to_alloy_rlp_error(e: rlp::DecoderError) -> RlpError { - match e { - rlp::DecoderError::RlpIsTooShort => RlpError::InputTooShort, - rlp::DecoderError::RlpInvalidLength => RlpError::Overflow, - rlp::DecoderError::RlpExpectedToBeList => RlpError::UnexpectedString, - rlp::DecoderError::RlpExpectedToBeData => RlpError::UnexpectedList, - rlp::DecoderError::RlpDataLenWithZeroPrefix | - rlp::DecoderError::RlpListLenWithZeroPrefix => RlpError::LeadingZero, - rlp::DecoderError::RlpInvalidIndirection => RlpError::NonCanonicalSize, - rlp::DecoderError::RlpIncorrectListLen => { - RlpError::Custom("incorrect list length when decoding rlp") - } - rlp::DecoderError::RlpIsTooBig => RlpError::Custom("rlp is too big"), - rlp::DecoderError::RlpInconsistentLengthAndData => { - RlpError::Custom("inconsistent length and data when decoding rlp") - } - rlp::DecoderError::Custom(s) => RlpError::Custom(s), - } -} - -impl Decodable for EnrWrapper { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let enr = as rlp::Decodable>::decode(&rlp::Rlp::new(buf)) - .map_err(to_alloy_rlp_error) - .map(EnrWrapper::new); - if enr.is_ok() { - // Decode was successful, advance buffer - let header = Header::decode(buf)?; - buf.advance(header.payload_length); - } - enr - } -} - /// A [ENRRequest packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrrequest-packet-0x05). /// /// This packet is used to request the current version of a node's Ethereum Node Record (ENR). @@ -327,12 +245,12 @@ pub struct EnrRequest { /// /// This packet is used to respond to an ENRRequest packet and includes the requested ENR along with /// the hash of the original request. -#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable)] +#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] pub struct EnrResponse { /// The hash of the ENRRequest packet being replied to. pub request_hash: B256, /// The ENR (Ethereum Node Record) for the responding node. - pub enr: EnrWrapper, + pub enr: Enr, } // === impl EnrResponse === @@ -342,37 +260,11 @@ impl EnrResponse { /// /// See also pub fn eth_fork_id(&self) -> Option { - let mut maybe_fork_id = self.enr.0.get_raw_rlp(b"eth")?; + let mut maybe_fork_id = self.enr.get_raw_rlp(b"eth")?; EnrForkIdEntry::decode(&mut maybe_fork_id).ok().map(|entry| entry.fork_id) } } -impl Decodable for EnrResponse { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let b = &mut &**buf; - let rlp_head = Header::decode(b)?; - if !rlp_head.list { - return Err(RlpError::UnexpectedString) - } - // let started_len = b.len(); - let this = Self { - request_hash: alloy_rlp::Decodable::decode(b)?, - enr: EnrWrapper::::decode(b)?, - }; - // TODO: `Decodable` can be derived once we have native alloy_rlp decoding for ENR: - // Skipping the size check here is fine since the `buf` is the UDP datagram - // let consumed = started_len - b.len(); - // if consumed != rlp_head.payload_length { - // return Err(alloy_rlp::Error::ListLengthMismatch { - // expected: rlp_head.payload_length, - // got: consumed, - // }) - // } - *buf = *b; - Ok(this) - } -} - /// Represents a Ping packet. /// /// A [Ping packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#ping-packet-0x01). @@ -750,7 +642,6 @@ mod tests { #[test] fn encode_decode_enr_msg() { - use self::EnrWrapper; use alloy_rlp::Decodable; use enr::secp256k1::SecretKey; use std::net::Ipv4Addr; @@ -770,7 +661,7 @@ mod tests { let forkentry = EnrForkIdEntry { fork_id }; forkentry.encode(&mut buf); builder.add_value_rlp("eth", buf.into()); - EnrWrapper::new(builder.build(&key).unwrap()) + builder.build(&key).unwrap() }; let enr_response = EnrResponse { request_hash: rng.gen(), enr }; @@ -789,30 +680,25 @@ mod tests { #[test] fn encode_known_rlp_enr() { - use self::EnrWrapper; use alloy_rlp::Decodable; use enr::{secp256k1::SecretKey, EnrPublicKey}; use std::net::Ipv4Addr; - let valid_record = - hex!("f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f" - ); - let signature = - hex!("7098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c" - ); + let valid_record = hex!("f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f"); + let signature = hex!("7098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c"); let expected_pubkey = hex!("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138"); - let enr = EnrWrapper::::decode(&mut &valid_record[..]).unwrap(); - let pubkey = enr.0.public_key().encode(); + let enr = Enr::::decode(&mut &valid_record[..]).unwrap(); + let pubkey = enr.public_key().encode(); - assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); - assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); - assert_eq!(enr.0.tcp4(), None); - assert_eq!(enr.0.signature(), &signature[..]); + assert_eq!(enr.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); + assert_eq!(enr.id(), Some(String::from("v4"))); + assert_eq!(enr.udp4(), Some(DEFAULT_DISCOVERY_PORT)); + assert_eq!(enr.tcp4(), None); + assert_eq!(enr.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); - assert!(enr.0.verify()); + assert!(enr.verify()); assert_eq!(&alloy_rlp::encode(&enr)[..], &valid_record[..]); @@ -833,19 +719,19 @@ mod tests { hex!("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138"); let mut valid_record_buf = valid_record.as_slice(); - let enr = EnrWrapper::::decode(&mut valid_record_buf).unwrap(); - let pubkey = enr.0.public_key().encode(); + let enr = Enr::::decode(&mut valid_record_buf).unwrap(); + let pubkey = enr.public_key().encode(); // Byte array must be consumed after enr has finished decoding assert!(valid_record_buf.is_empty()); - assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); - assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); - assert_eq!(enr.0.tcp4(), None); - assert_eq!(enr.0.signature(), &signature[..]); + assert_eq!(enr.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); + assert_eq!(enr.id(), Some(String::from("v4"))); + assert_eq!(enr.udp4(), Some(DEFAULT_DISCOVERY_PORT)); + assert_eq!(enr.tcp4(), None); + assert_eq!(enr.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); - assert!(enr.0.verify()); + assert!(enr.verify()); } // test vector from the enr library rlp encoding tests @@ -863,20 +749,23 @@ mod tests { let mut builder = Enr::builder(); builder.ip(ip.into()); builder.tcp4(tcp); - EnrWrapper::new(builder.build(&key).unwrap()) + builder.build(&key).unwrap() }; let mut encoded_bytes = &alloy_rlp::encode(&enr)[..]; - let decoded_enr = EnrWrapper::::decode(&mut encoded_bytes).unwrap(); + let decoded_enr = Enr::::decode(&mut encoded_bytes).unwrap(); // Byte array must be consumed after enr has finished decoding assert!(encoded_bytes.is_empty()); assert_eq!(decoded_enr, enr); - assert_eq!(decoded_enr.0.id(), Some("v4".into())); - assert_eq!(decoded_enr.0.ip4(), Some(ip)); - assert_eq!(decoded_enr.0.tcp4(), Some(tcp)); - assert_eq!(decoded_enr.0.public_key().encode(), key.public().encode()); - assert!(decoded_enr.0.verify()); + assert_eq!(decoded_enr.id(), Some("v4".into())); + assert_eq!(decoded_enr.ip4(), Some(ip)); + assert_eq!(decoded_enr.tcp4(), Some(tcp)); + assert_eq!( + decoded_enr.public_key().encode(), + key.public_key(secp256k1::SECP256K1).encode() + ); + assert!(decoded_enr.verify()); } } diff --git a/crates/net/discv5/Cargo.toml b/crates/net/discv5/Cargo.toml index 03b856be9a00f..705ea17a8fbb0 100644 --- a/crates/net/discv5/Cargo.toml +++ b/crates/net/discv5/Cargo.toml @@ -18,9 +18,8 @@ reth-metrics.workspace = true # ethereum alloy-rlp.workspace = true -rlp = "0.5.2" discv5 = { workspace = true, features = ["libp2p"] } -enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } +enr.workspace = true multiaddr = { version = "0.18", default-features = false } libp2p-identity = "0.2" secp256k1.workspace = true diff --git a/crates/net/discv5/src/enr.rs b/crates/net/discv5/src/enr.rs index b810c1dc63e69..088baf18e8ab5 100644 --- a/crates/net/discv5/src/enr.rs +++ b/crates/net/discv5/src/enr.rs @@ -41,30 +41,25 @@ pub struct EnrCombinedKeyWrapper(pub discv5::Enr); impl From> for EnrCombinedKeyWrapper { fn from(value: Enr) -> Self { - let encoded_enr = rlp::encode(&value); - let enr = rlp::decode::(&encoded_enr).unwrap(); - - Self(enr) + let encoded_enr = alloy_rlp::encode(&value); + Self(alloy_rlp::Decodable::decode(&mut &encoded_enr[..]).unwrap()) } } impl From for Enr { fn from(val: EnrCombinedKeyWrapper) -> Self { - let EnrCombinedKeyWrapper(enr) = val; - let encoded_enr = rlp::encode(&enr); - - rlp::decode::>(&encoded_enr).unwrap() + let encoded_enr = alloy_rlp::encode(&val.0); + alloy_rlp::Decodable::decode(&mut &encoded_enr[..]).unwrap() } } #[cfg(test)] mod tests { + use super::*; use alloy_rlp::Encodable; use discv5::enr::{CombinedKey, EnrKey}; use reth_primitives::{Hardfork, NodeRecord, MAINNET}; - use super::*; - #[test] fn discv5_discv4_id_conversion() { let discv5_pk = CombinedKey::generate_secp256k1().public(); diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 7e9fd81b1ba51..de74f3fee9e0a 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -377,8 +377,6 @@ impl Discv5 { pub fn on_discv5_update(&mut self, update: discv5::Event) -> Option { match update { discv5::Event::SocketUpdated(_) | discv5::Event::TalkRequest(_) | - // `EnrAdded` not used in discv5 codebase - discv5::Event::EnrAdded { .. } | // `Discovered` not unique discovered peers discv5::Event::Discovered(_) => None, discv5::Event::NodeInserted { replaced: _, .. } => { @@ -404,6 +402,7 @@ impl Discv5 { self.on_discovered_peer(&enr, remote_socket) } + _ => None, } } diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 003a6cad74443..18d7bf81519b4 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -17,9 +17,8 @@ reth-primitives.workspace = true reth-net-common.workspace = true # ethereum -alloy-rlp.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } +enr.workspace = true # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } @@ -39,8 +38,10 @@ serde = { workspace = true, optional = true } serde_with = { version = "3.3.0", optional = true } [dev-dependencies] +alloy-rlp.workspace = true tokio = { workspace = true, features = ["sync", "rt", "rt-multi-thread"] } reth-tracing.workspace = true +rand.workspace = true [features] default = ["serde"] diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index 6db9c9ee2b2d6..03c72e33016f3 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -392,8 +392,6 @@ pub enum DnsDiscoveryEvent { /// Converts an [Enr] into a [NodeRecord] fn convert_enr_node_record(enr: &Enr) -> Option { - use alloy_rlp::Decodable; - let node_record = NodeRecord { address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, @@ -402,8 +400,7 @@ fn convert_enr_node_record(enr: &Enr) -> Option } .into_ipv4_mapped(); - let mut maybe_fork_id = enr.get(b"eth")?; - let fork_id = ForkId::decode(&mut maybe_fork_id).ok(); + let fork_id = enr.get_decodable::(b"eth").transpose().ok()?; Some(DnsNodeRecordUpdate { node_record, fork_id, enr: enr.clone() }) } @@ -412,12 +409,63 @@ fn convert_enr_node_record(enr: &Enr) -> Option mod tests { use super::*; use crate::tree::TreeRootEntry; - use alloy_rlp::Encodable; + use alloy_rlp::{Decodable, Encodable}; use enr::EnrKey; - use reth_primitives::{Chain, Hardfork, MAINNET}; + use reth_primitives::{Chain, ForkHash, Hardfork, MAINNET}; use secp256k1::rand::thread_rng; use std::{future::poll_fn, net::Ipv4Addr}; + #[test] + fn test_convert_enr_node_record() { + // rig + let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); + let enr = Enr::builder() + .ip("127.0.0.1".parse().unwrap()) + .udp4(9000) + .tcp4(30303) + .add_value(b"eth", &MAINNET.latest_fork_id()) + .build(&secret_key) + .unwrap(); + + // test + let node_record_update = convert_enr_node_record(&enr).unwrap(); + + assert_eq!(node_record_update.node_record.address, "127.0.0.1".parse::().unwrap()); + assert_eq!(node_record_update.node_record.tcp_port, 30303); + assert_eq!(node_record_update.node_record.udp_port, 9000); + assert_eq!(node_record_update.fork_id, Some(MAINNET.latest_fork_id())); + assert_eq!(node_record_update.enr, enr); + } + + #[test] + fn test_decode_and_convert_enr_node_record() { + // rig + + let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); + let enr = Enr::builder() + .ip("127.0.0.1".parse().unwrap()) + .udp4(9000) + .tcp4(30303) + .add_value(b"eth", &MAINNET.latest_fork_id()) + .add_value(b"opstack", &ForkId { hash: ForkHash(rand::random()), next: rand::random() }) + .build(&secret_key) + .unwrap(); + + let mut encoded_enr = vec![]; + enr.encode(&mut encoded_enr); + + // test + let decoded_enr = Enr::decode(&mut &encoded_enr[..]).unwrap(); + + let node_record_update = convert_enr_node_record(&decoded_enr).unwrap(); + + assert_eq!(node_record_update.node_record.address, "127.0.0.1".parse::().unwrap()); + assert_eq!(node_record_update.node_record.tcp_port, 30303); + assert_eq!(node_record_update.node_record.udp_port, 9000); + assert_eq!(node_record_update.fork_id, Some(MAINNET.latest_fork_id())); + assert_eq!(node_record_update.enr, enr); + } + #[tokio::test] async fn test_start_root_sync() { reth_tracing::init_test_tracing(); @@ -461,10 +509,8 @@ mod tests { resolver.insert(link.domain.clone(), root.to_string()); let mut builder = Enr::builder(); - let mut buf = Vec::new(); let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier).unwrap(); - fork_id.encode(&mut buf); - builder.ip4(Ipv4Addr::LOCALHOST).udp4(30303).tcp4(30303).add_value(b"eth", &buf); + builder.ip4(Ipv4Addr::LOCALHOST).udp4(30303).tcp4(30303).add_value(b"eth", &fork_id); let enr = builder.build(&secret_key).unwrap(); resolver.insert(format!("{}.{}", root.enr_root.clone(), link.domain), enr.to_base64()); diff --git a/crates/net/dns/src/tree.rs b/crates/net/dns/src/tree.rs index 53220f694e52a..614d5f1d23bab 100644 --- a/crates/net/dns/src/tree.rs +++ b/crates/net/dns/src/tree.rs @@ -22,7 +22,7 @@ use crate::error::{ ParseEntryResult, }; use data_encoding::{BASE32_NOPAD, BASE64URL_NOPAD}; -use enr::{Enr, EnrError, EnrKey, EnrKeyUnambiguous, EnrPublicKey}; +use enr::{Enr, EnrKey, EnrKeyUnambiguous, EnrPublicKey, Error as EnrError}; use reth_primitives::{hex, Bytes}; use secp256k1::SecretKey; #[cfg(feature = "serde")] diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 5dce7fee69024..bd1eb1d328f82 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -399,7 +399,7 @@ impl ECIES { let msg = x ^ self.nonce; let (rec_id, sig) = SECP256K1 .sign_ecdsa_recoverable( - &secp256k1::Message::from_slice(msg.as_slice()).unwrap(), + &secp256k1::Message::from_digest(msg.0), &self.ephemeral_secret_key, ) .serialize_compact(); @@ -473,7 +473,7 @@ impl ECIES { let x = ecdh_x(&self.remote_public_key.unwrap(), &self.secret_key); self.remote_ephemeral_public_key = Some(SECP256K1.recover_ecdsa( - &secp256k1::Message::from_slice((x ^ self.remote_nonce.unwrap()).as_ref()).unwrap(), + &secp256k1::Message::from_digest((x ^ self.remote_nonce.unwrap()).0), &signature, )?); self.ephemeral_shared_secret = @@ -631,7 +631,7 @@ impl ECIES { let tag = self.egress_mac.as_mut().unwrap().digest(); out.reserve(ECIES::header_len()); - out.extend_from_slice(&header); + out.extend_from_slice(&header[..]); out.extend_from_slice(tag.as_slice()); } diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index dbf7f5fa2b335..c06ff15182447 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -96,11 +96,7 @@ criterion = { workspace = true, features = ["async_tokio", "html_reports"] } [features] default = ["serde"] serde = ["dep:serde", "dep:humantime-serde", "secp256k1/serde", "enr/serde", "dep:serde_json"] -test-utils = [ - "reth-provider/test-utils", - "dep:tempfile", - "reth-transaction-pool/test-utils", -] +test-utils = ["reth-provider/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils"] geth-tests = [] [[bench]] diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index e3828c913b6be..ab6b44303bc40 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -27,7 +27,7 @@ alloy-trie = { workspace = true, features = ["serde"] } nybbles = { workspace = true, features = ["serde", "rlp"] } alloy-genesis.workspace = true alloy-eips.workspace = true -enr = { workspace = true, features = ["rust-secp256k1"] } +enr.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } @@ -105,12 +105,10 @@ arbitrary = [ "dep:arbitrary", "dep:proptest", "dep:proptest-derive", - "zstd-codec" + "zstd-codec", ] c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg", "dep:sha2", "dep:tempfile"] -zstd-codec = [ - "dep:zstd" -] +zstd-codec = ["dep:zstd"] clap = ["dep:clap"] optimism = [ "reth-codecs/optimism", diff --git a/crates/primitives/src/genesis.rs b/crates/primitives/src/genesis.rs index 52b24facbf79f..991b01bd7e653 100644 --- a/crates/primitives/src/genesis.rs +++ b/crates/primitives/src/genesis.rs @@ -13,7 +13,7 @@ mod allocator { use alloy_genesis::GenesisAccount; use secp256k1::{ rand::{thread_rng, RngCore}, - KeyPair, Secp256k1, + Keypair, Secp256k1, }; use std::collections::{hash_map::Entry, BTreeMap, HashMap}; @@ -73,9 +73,9 @@ mod allocator { /// Add a funded account to the genesis alloc. /// /// Returns the key pair for the account and the account's address. - pub fn new_funded_account(&mut self, balance: U256) -> (KeyPair, Address) { + pub fn new_funded_account(&mut self, balance: U256) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); @@ -90,9 +90,9 @@ mod allocator { &mut self, balance: U256, code: Bytes, - ) -> (KeyPair, Address) { + ) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert( @@ -110,9 +110,9 @@ mod allocator { &mut self, balance: U256, storage: BTreeMap, - ) -> (KeyPair, Address) { + ) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert( @@ -130,9 +130,9 @@ mod allocator { &mut self, code: Bytes, storage: BTreeMap, - ) -> (KeyPair, Address) { + ) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert( @@ -146,9 +146,9 @@ mod allocator { /// Adds an account with code to the genesis alloc. /// /// Returns the key pair for the account and the account's address. - pub fn new_account_with_code(&mut self, code: Bytes) -> (KeyPair, Address) { + pub fn new_account_with_code(&mut self, code: Bytes) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert(address, GenesisAccount::default().with_code(Some(code))); @@ -169,7 +169,7 @@ mod allocator { /// Returns the key pair for the account and the account's address. pub fn add_account(&mut self, account: GenesisAccount) -> Address { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert(address, account); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index dae6ab0768405..a4ec978a3fda3 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1755,7 +1755,7 @@ mod tests { use alloy_primitives::{address, b256, bytes}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_codecs::Compact; - use secp256k1::{KeyPair, Secp256k1}; + use secp256k1::{Keypair, Secp256k1}; use std::str::FromStr; #[test] @@ -2048,7 +2048,7 @@ mod tests { tx.set_chain_id(chain_id % (u64::MAX / 2 - 36)); } - let key_pair = KeyPair::new(&secp, &mut rng); + let key_pair = Keypair::new(&secp, &mut rng); let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 638064c12f103..b4a2db7f6b52e 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -18,7 +18,7 @@ pub(crate) mod secp256k1 { let sig = RecoverableSignature::from_compact(&sig[0..64], RecoveryId::from_i32(sig[64] as i32)?)?; - let public = SECP256K1.recover_ecdsa(&Message::from_slice(&msg[..32])?, &sig)?; + let public = SECP256K1.recover_ecdsa(&Message::from_digest(*msg), &sig)?; Ok(public_key_to_address(public)) } @@ -26,7 +26,7 @@ pub(crate) mod secp256k1 { /// Returns the corresponding signature. pub fn sign_message(secret: B256, message: B256) -> Result { let sec = SecretKey::from_slice(secret.as_ref())?; - let s = SECP256K1.sign_ecdsa_recoverable(&Message::from_slice(&message[..])?, &sec); + let s = SECP256K1.sign_ecdsa_recoverable(&Message::from_digest(message.0), &sec); let (rec_id, data) = s.serialize_compact(); let signature = Signature { From adf1d25a891c4b9ad41830e43fd3be55749408a7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 25 Apr 2024 22:31:01 +0200 Subject: [PATCH 344/700] feat(op): cmd init at block (#7784) Co-authored-by: Oliver Nordbjerg Co-authored-by: Roman Krasiuk --- Cargo.lock | 2 + bin/reth/Cargo.toml | 1 + bin/reth/src/cli/mod.rs | 8 +- bin/reth/src/commands/init_state.rs | 107 ++++++++ bin/reth/src/commands/mod.rs | 1 + crates/node-core/Cargo.toml | 7 +- crates/node-core/src/init.rs | 252 +++++++++++++++++- .../bundle_state_with_receipts.rs | 4 +- .../src/bundle_state/state_changes.rs | 1 + .../src/bundle_state/state_reverts.rs | 26 +- 10 files changed, 391 insertions(+), 18 deletions(-) create mode 100644 bin/reth/src/commands/init_state.rs diff --git a/Cargo.lock b/Cargo.lock index 901d836cd4c1a..edc487bad18cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6118,6 +6118,7 @@ dependencies = [ "serde_json", "similar-asserts", "tempfile", + "thiserror", "tikv-jemallocator", "tokio", "toml", @@ -6995,6 +6996,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", + "reth-trie", "secp256k1", "serde", "serde_json", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ff251546456d0..5e47506db9bfc 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -104,6 +104,7 @@ itertools.workspace = true rayon.workspace = true boyer-moore-magiclen = "0.2.16" ahash = "0.8" +thiserror.workspace = true # p2p discv5.workspace = true diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 34fd09456beb0..9c81b0aec17c9 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -6,8 +6,8 @@ use crate::{ LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, node, node::NoArgs, p2p, - recover, stage, test_vectors, + config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, node, node::NoArgs, + p2p, recover, stage, test_vectors, }, version::{LONG_VERSION, SHORT_VERSION}, }; @@ -145,6 +145,7 @@ impl Cli { runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) } Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), @@ -176,6 +177,9 @@ pub enum Commands { /// Initialize the database from a genesis file. #[command(name = "init")] Init(init_cmd::InitCommand), + /// Initialize the database from a state dump file. + #[command(name = "init-state")] + InitState(init_state::InitStateCommand), /// This syncs RLP encoded blocks from a file. #[command(name = "import")] Import(import::ImportCommand), diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs new file mode 100644 index 0000000000000..c05f064b31c5e --- /dev/null +++ b/bin/reth/src/commands/init_state.rs @@ -0,0 +1,107 @@ +//! Command that initializes the node from a genesis file. + +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, +}; +use clap::Parser; +use reth_db::{database::Database, init_db}; +use reth_node_core::init::{init_from_state_dump, init_genesis}; +use reth_primitives::{ChainSpec, B256}; +use reth_provider::ProviderFactory; + +use std::{fs::File, io::BufReader, path::PathBuf, sync::Arc}; +use tracing::info; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct InitStateCommand { + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = chain_help(), + default_value = SUPPORTED_CHAINS[0], + value_parser = genesis_value_parser + )] + chain: Arc, + + /// JSONL file with state dump. + /// + /// Must contain accounts in following format, additional account fields are ignored. Can + /// also contain { "root": \ } as first line. + /// { + /// "balance": "\", + /// "nonce": \, + /// "code": "\", + /// "storage": { + /// "\": "\", + /// .. + /// }, + /// "address": "\", + /// } + /// + /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + /// and including the non-genesis block to init chain at. See 'import' command. + #[arg(long, value_name = "STATE_DUMP_FILE", verbatim_doc_comment, default_value = None)] + state: Option, + + #[command(flatten)] + db: DatabaseArgs, +} + +impl InitStateCommand { + /// Execute the `init` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "reth init starting"); + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + let db_path = data_dir.db_path(); + info!(target: "reth::cli", path = ?db_path, "Opening database"); + let db = Arc::new(init_db(&db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); + + let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?; + + info!(target: "reth::cli", "Writing genesis block"); + + let hash = match self.state { + Some(path) => init_at_state(path, provider_factory)?, + None => init_genesis(provider_factory)?, + }; + + info!(target: "reth::cli", hash = ?hash, "Genesis block written"); + Ok(()) + } +} + +/// Initialize chain with state at specific block, from a file with state dump. +pub fn init_at_state( + state_dump_path: PathBuf, + factory: ProviderFactory, +) -> eyre::Result { + info!(target: "reth::cli", + path=?state_dump_path, + "Opening state dump"); + + let file = File::open(state_dump_path)?; + let reader = BufReader::new(file); + + init_from_state_dump(reader, factory) +} diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index 278531f716176..03d5a8287ed00 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -7,6 +7,7 @@ pub mod dump_genesis; pub mod import; pub mod init_cmd; +pub mod init_state; pub mod node; pub mod p2p; diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index d6df37f09c331..4bce2908da751 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -32,6 +32,7 @@ reth-network-api.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true reth-tasks.workspace = true +reth-trie.workspace = true reth-consensus-common.workspace = true reth-beacon-consensus.workspace = true @@ -71,7 +72,11 @@ hyper.workspace = true tracing.workspace = true # crypto -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", +] } # async futures.workspace = true diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 7f529c2b0b40a..eb513cc4004be 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -13,14 +13,36 @@ use reth_primitives::{ use reth_provider::{ bundle_state::{BundleStateInit, RevertsInit}, providers::{StaticFileProvider, StaticFileWriter}, - BlockHashReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, - HistoryWriter, OriginalValuesKnown, ProviderError, ProviderFactory, + BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, + DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, + ProviderFactory, }; +use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; +use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, + io::BufRead, + ops::DerefMut, sync::Arc, }; -use tracing::debug; +use tracing::{debug, error, info, trace}; + +/// Default soft limit for number of bytes to read from state dump file, before inserting into +/// database. +/// +/// Default is 1 GB. +pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000; + +/// Approximate number of accounts per 1 GB of state dump file. One account is approximately 3.5 KB +/// +/// Approximate is 285 228 accounts. +// +// (14.05 GB OP mainnet state dump at Bedrock block / 4 007 565 accounts in file > 3.5 KB per +// account) +pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228; + +/// Soft limit for the number of flushed updates after which to log progress summary. +const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000; /// Database initialization error type. #[derive(Debug, thiserror::Error, PartialEq, Eq, Clone)] @@ -34,10 +56,19 @@ pub enum InitDatabaseError { /// Actual genesis hash. database_hash: B256, }, - /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), + /// Computed state root doesn't match state root in state dump file. + #[error( + "state root mismatch, state dump: {expected_state_root}, computed: {computed_state_root}" + )] + SateRootMismatch { + /// Expected state root. + expected_state_root: B256, + /// Actual state root. + computed_state_root: B256, + }, } impl From for InitDatabaseError { @@ -102,6 +133,16 @@ pub fn insert_genesis_state<'a, 'b, DB: Database>( tx: &::TXMut, capacity: usize, alloc: impl Iterator, +) -> ProviderResult<()> { + insert_state::(tx, capacity, alloc, 0) +} + +/// Inserts state at given block into database. +pub fn insert_state<'a, 'b, DB: Database>( + tx: &::TXMut, + capacity: usize, + alloc: impl Iterator, + block: u64, ) -> ProviderResult<()> { let mut state_init: BundleStateInit = HashMap::with_capacity(capacity); let mut reverts_init = HashMap::with_capacity(capacity); @@ -149,18 +190,20 @@ pub fn insert_genesis_state<'a, 'b, DB: Database>( ), ); } - let all_reverts_init: RevertsInit = HashMap::from([(0, reverts_init)]); + let all_reverts_init: RevertsInit = HashMap::from([(block, reverts_init)]); let bundle = BundleStateWithReceipts::new_init( state_init, all_reverts_init, contracts.into_iter().collect(), Receipts::new(), - 0, + block, ); bundle.write_to_storage(tx, None, OriginalValuesKnown::Yes)?; + trace!(target: "reth::cli", "Inserted state"); + Ok(()) } @@ -174,6 +217,8 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( alloc.clone().map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); provider.insert_account_for_hashing(alloc_accounts)?; + trace!(target: "reth::cli", "Inserted account hashes"); + let alloc_storage = alloc.filter_map(|(addr, account)| { // only return Some if there is storage account.storage.as_ref().map(|storage| { @@ -188,6 +233,8 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( }); provider.insert_storage_for_hashing(alloc_storage)?; + trace!(target: "reth::cli", "Inserted storage hashes"); + Ok(()) } @@ -195,17 +242,30 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( pub fn insert_genesis_history<'a, 'b, DB: Database>( provider: &DatabaseProviderRW, alloc: impl Iterator + Clone, +) -> ProviderResult<()> { + insert_history::(provider, alloc, 0) +} + +/// Inserts history indices for genesis accounts and storage. +pub fn insert_history<'a, 'b, DB: Database>( + provider: &DatabaseProviderRW, + alloc: impl Iterator + Clone, + block: u64, ) -> ProviderResult<()> { let account_transitions = - alloc.clone().map(|(addr, _)| (*addr, vec![0])).collect::>(); + alloc.clone().map(|(addr, _)| (*addr, vec![block])).collect::>(); provider.insert_account_history_index(account_transitions)?; + trace!(target: "reth::cli", "Inserted account history"); + let storage_transitions = alloc .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage))) - .flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), vec![0]))) + .flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), vec![block]))) .collect::>(); provider.insert_storage_history_index(storage_transitions)?; + trace!(target: "reth::cli", "Inserted storage history"); + Ok(()) } @@ -233,6 +293,182 @@ pub fn insert_genesis_header( Ok(()) } +/// Initialize chain with state at specific block, from reader of state dump. +pub fn init_from_state_dump( + mut reader: impl BufRead, + factory: ProviderFactory, +) -> eyre::Result { + let block = factory.last_block_number()?; + let hash = factory.block_hash(block)?.unwrap(); + + debug!(target: "reth::cli", + block, + chain=%factory.chain_spec().chain, + "Initializing state at block" + ); + + let mut total_inserted_accounts = 0; + let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP); + let mut chunk_total_byte_len = 0; + let mut line = String::new(); + + // first line can be state root, then it can be used for verifying against computed state root + reader.read_line(&mut line)?; + let expected_state_root = serde_json::from_str::(&line)?.root; + + trace!(target: "reth::cli", + root=%expected_state_root, + "Read state root from file" + ); + + line.clear(); + + // remaining lines are accounts + let mut provider_rw = factory.provider_rw()?; + while let Ok(n) = reader.read_line(&mut line) { + chunk_total_byte_len += n; + if DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK <= chunk_total_byte_len || n == 0 { + // acc + total_inserted_accounts += accounts.len(); + + info!(target: "reth::cli", + chunk_total_byte_len, + parsed_new_accounts=accounts.len(), + total_inserted_accounts, + "Writing accounts to db" + ); + + // reset + chunk_total_byte_len = 0; + + // use transaction to insert genesis header + insert_genesis_hashes( + &provider_rw, + accounts.iter().map(|(address, account)| (address, account)), + )?; + insert_history( + &provider_rw, + accounts.iter().map(|(address, account)| (address, account)), + block, + )?; + + // block is already written to static files + let tx = provider_rw.deref_mut().tx_mut(); + insert_state::( + tx, + accounts.len(), + accounts.iter().map(|(address, account)| (address, account)), + block, + )?; + + accounts.clear(); + } + + if n == 0 { + break; + } + + let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?; + accounts.push((address, genesis_account)); + + line.clear(); + } + + // compute and compare state root. this advances the stage checkpoints. + let computed_state_root = compute_state_root(&provider_rw)?; + if computed_state_root != expected_state_root { + error!(target: "reth::cli", + ?computed_state_root, + ?expected_state_root, + "Computed state root does not match state root in state dump" + ); + + Err(InitDatabaseError::SateRootMismatch { expected_state_root, computed_state_root })? + } else { + info!(target: "reth::cli", + ?computed_state_root, + "Computed state root matches state root in state dump" + ); + } + + provider_rw.commit()?; + + Ok(hash) +} + +/// Computes the state root (from scratch) based on the accounts and storages present in the +/// database. +fn compute_state_root(provider: &DatabaseProviderRW) -> eyre::Result { + trace!(target: "reth::cli", "Computing state root"); + + let tx = provider.tx_ref(); + let mut intermediate_state: Option = None; + let mut total_flushed_updates = 0; + + loop { + match StateRootComputer::from_tx(tx) + .with_intermediate_state(intermediate_state) + .root_with_progress()? + { + StateRootProgress::Progress(state, _, updates) => { + let updates_len = updates.len(); + + trace!(target: "reth::cli", + last_account_key = %state.last_account_key, + updates_len, + total_flushed_updates, + "Flushing trie updates" + ); + + intermediate_state = Some(*state); + updates.flush(tx)?; + + total_flushed_updates += updates_len; + + if total_flushed_updates % SOFT_LIMIT_COUNT_FLUSHED_UPDATES == 0 { + info!(target: "reth::cli", + total_flushed_updates, + "Flushing trie updates" + ); + } + } + StateRootProgress::Complete(root, _, updates) => { + let updates_len = updates.len(); + + updates.flush(tx)?; + + total_flushed_updates += updates_len; + + trace!(target: "reth::cli", + %root, + updates_len = updates_len, + total_flushed_updates, + "State root has been computed" + ); + + return Ok(root) + } + } + } +} + +/// Type to deserialize state root from state dump file. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +struct StateRoot { + root: B256, +} + +/// An account as in the state dump file. This contains a [`GenesisAccount`] and the account's +/// address. +#[derive(Debug, Serialize, Deserialize)] +struct GenesisAccountWithAddress { + /// The account's balance, nonce, code, and storage. + #[serde(flatten)] + genesis_account: GenesisAccount, + /// The account's address. + address: Address, +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 1153464f76fab..baf5fa5977318 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -297,8 +297,8 @@ impl BundleStateWithReceipts { /// files if `static_file_producer` is `Some`. It should be none if there is any kind of /// pruning/filtering over the receipts. /// - /// `omit_changed_check` should be set to true of bundle has some of it data - /// detached, This would make some original values not known. + /// `omit_changed_check` should be set to true if bundle has some of its data detached. This + /// would make some original values not known. pub fn write_to_storage( self, tx: &TX, diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs index a62606dedebc3..7f7bde79e3f8b 100644 --- a/crates/storage/provider/src/bundle_state/state_changes.rs +++ b/crates/storage/provider/src/bundle_state/state_changes.rs @@ -77,6 +77,7 @@ impl StateChanges { } } } + Ok(()) } } diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 63c5595c58d8d..e61572cf5b614 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -1,6 +1,6 @@ use rayon::slice::ParallelSliceMut; use reth_db::{ - cursor::{DbCursorRO, DbDupCursorRO, DbDupCursorRW}, + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, models::{AccountBeforeTx, BlockNumberAddress}, tables, transaction::{DbTx, DbTxMut}, @@ -74,15 +74,31 @@ impl StateReverts { // Write account changes tracing::trace!(target: "provider::reverts", "Writing account changes"); let mut account_changeset_cursor = tx.cursor_dup_write::()?; + + // append entries if key is new + let should_append_accounts = + account_changeset_cursor.last()?.map_or(true, |(block_number, _)| { + block_number < first_block || block_number == first_block && block_number == 0 + }); for (block_index, mut account_block_reverts) in self.0.accounts.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; // Sort accounts by address. account_block_reverts.par_sort_by_key(|a| a.0); + for (address, info) in account_block_reverts { - account_changeset_cursor.append_dup( - block_number, - AccountBeforeTx { address, info: info.map(into_reth_acc) }, - )?; + if should_append_accounts { + account_changeset_cursor.append_dup( + block_number, + AccountBeforeTx { address, info: info.map(into_reth_acc) }, + )?; + } else { + // upsert on dupsort tables will append to subkey. see implementation of + // DbCursorRW::upsert for reth_db::implementation::mdbx::cursor::Cursor + account_changeset_cursor.upsert( + block_number, + AccountBeforeTx { address, info: info.map(into_reth_acc) }, + )?; + } } } From 7efdbf4924319ff3dc3148f6dd44e5269e338c9f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 25 Apr 2024 23:12:43 +0200 Subject: [PATCH 345/700] fix: account for legacy tx in `try_from` tx to `TransactionSignedEcRecovered` (#7882) --- crates/primitives/src/transaction/mod.rs | 28 ++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index a4ec978a3fda3..76d9b01978faa 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1726,12 +1726,36 @@ impl TryFrom for TransactionSignedEcRecovered { fn try_from(tx: reth_rpc_types::Transaction) -> Result { let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; + let transaction: Transaction = tx.try_into()?; + TransactionSigned::from_transaction_and_signature( - tx.try_into()?, + transaction.clone(), Signature { r: signature.r, s: signature.s, - odd_y_parity: signature.y_parity.ok_or(ConversionError::MissingYParity)?.0, + odd_y_parity: if let Some(y_parity) = signature.y_parity { + y_parity.0 + } else { + match transaction.tx_type() { + // If the transaction type is Legacy, adjust the v component of the + // signature according to the Ethereum specification + TxType::Legacy => { + // Calculate the new v value based on the EIP-155 formula: + // v = {0,1} + CHAIN_ID * 2 + 35 + !(signature.v - + U256::from(if let Some(chain_id) = transaction.chain_id() { + // If CHAIN_ID is available, calculate the new v value + // accordingly + chain_id.saturating_mul(2).saturating_add(35) + } else { + // If CHAIN_ID is not available, set v = {0,1} + 27 + 27 + })) + .is_zero() + } + _ => !signature.v.is_zero(), + } + }, }, ) .try_into_ecrecovered() From 57d09e84beaa4dafa505461db733d672163e4d99 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Fri, 26 Apr 2024 01:10:21 +0200 Subject: [PATCH 346/700] deps: remove `webpki-roots` from `reqwest` deps (#7887) --- Cargo.lock | 10 ++-------- crates/optimism/node/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index edc487bad18cd..027cd7e853cba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4052,7 +4052,7 @@ dependencies = [ "tokio-util", "tracing", "url", - "webpki-roots 0.26.1", + "webpki-roots", ] [[package]] @@ -5982,6 +5982,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls 0.21.11", + "rustls-native-certs 0.6.3", "rustls-pemfile 1.0.4", "serde", "serde_json", @@ -5997,7 +5998,6 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.25.4", "winreg 0.50.0", ] @@ -9845,12 +9845,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - [[package]] name = "webpki-roots" version = "0.26.1" diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 36bfe96b55134..29a99a961b2c0 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -39,7 +39,7 @@ hyper.workspace = true http.workspace = true http-body.workspace = true reqwest = { version = "0.11", default-features = false, features = [ - "rustls-tls", + "rustls-tls-native-roots", ] } tracing.workspace = true From 6425064d07cb440b4e8b47e351b0d10614bb05ff Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 26 Apr 2024 11:24:26 +0200 Subject: [PATCH 347/700] fix: use enrforkid in dns (#7900) Co-authored-by: Emilia Hane --- crates/ethereum-forks/src/forkid.rs | 26 ++++++++++++++++++++++++++ crates/ethereum-forks/src/lib.rs | 4 +++- crates/net/discv4/src/lib.rs | 23 +---------------------- crates/net/discv4/src/proto.rs | 6 +++--- crates/net/dns/src/lib.rs | 15 ++++++++++----- crates/net/network/src/discovery.rs | 4 ++-- 6 files changed, 45 insertions(+), 33 deletions(-) diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index bb163c86e71ff..3be3e3ab84d99 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -115,6 +115,32 @@ pub struct ForkId { pub next: u64, } +/// Represents a forward-compatible ENR entry for including the forkid in a node record via +/// EIP-868. Forward compatibility is achieved by allowing trailing fields. +/// +/// See: +/// +/// +/// for how geth implements ForkId values and forward compatibility. +#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] +#[rlp(trailing)] +pub struct EnrForkIdEntry { + /// The inner forkid + pub fork_id: ForkId, +} + +impl From for EnrForkIdEntry { + fn from(fork_id: ForkId) -> Self { + Self { fork_id } + } +} + +impl From for ForkId { + fn from(entry: EnrForkIdEntry) -> Self { + entry.fork_id + } +} + /// Reason for rejecting provided `ForkId`. #[derive(Clone, Copy, Debug, Error, PartialEq, Eq, Hash)] pub enum ValidationError { diff --git a/crates/ethereum-forks/src/lib.rs b/crates/ethereum-forks/src/lib.rs index e781fe3a5694b..6dbec7c38d7c4 100644 --- a/crates/ethereum-forks/src/lib.rs +++ b/crates/ethereum-forks/src/lib.rs @@ -20,7 +20,9 @@ mod forkid; mod hardfork; mod head; -pub use forkid::{ForkFilter, ForkFilterKey, ForkHash, ForkId, ForkTransition, ValidationError}; +pub use forkid::{ + EnrForkIdEntry, ForkFilter, ForkFilterKey, ForkHash, ForkId, ForkTransition, ValidationError, +}; pub use hardfork::Hardfork; pub use head::Head; diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 3ac6bfa8cf871..071b81df94b70 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -28,7 +28,6 @@ use crate::{ error::{DecodePacketError, Discv4Error}, proto::{FindNode, Message, Neighbours, Packet, Ping, Pong}, }; -use alloy_rlp::{RlpDecodable, RlpEncodable}; use discv5::{ kbucket, kbucket::{ @@ -2174,33 +2173,13 @@ pub enum DiscoveryUpdate { Batch(Vec), } -/// Represents a forward-compatible ENR entry for including the forkid in a node record via -/// EIP-868. Forward compatibility is achieved by allowing trailing fields. -/// -/// See: -/// -/// -/// for how geth implements ForkId values and forward compatibility. -#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] -#[rlp(trailing)] -pub struct EnrForkIdEntry { - /// The inner forkid - pub fork_id: ForkId, -} - -impl From for EnrForkIdEntry { - fn from(fork_id: ForkId) -> Self { - Self { fork_id } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{create_discv4, create_discv4_with_config, rng_endpoint, rng_record}; use alloy_rlp::{Decodable, Encodable}; use rand::{thread_rng, Rng}; - use reth_primitives::{hex, mainnet_nodes, ForkHash}; + use reth_primitives::{hex, mainnet_nodes, EnrForkIdEntry, ForkHash}; use std::future::poll_fn; #[tokio::test] diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index bdca3bfb4de82..059ecc5bb73e0 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -1,11 +1,11 @@ //! Discovery v4 protocol implementation. -use crate::{error::DecodePacketError, EnrForkIdEntry, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; +use crate::{error::DecodePacketError, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable}; use enr::Enr; use reth_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, - keccak256, pk2id, ForkId, NodeRecord, B256, + keccak256, pk2id, EnrForkIdEntry, ForkId, NodeRecord, B256, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, @@ -261,7 +261,7 @@ impl EnrResponse { /// See also pub fn eth_fork_id(&self) -> Option { let mut maybe_fork_id = self.enr.get_raw_rlp(b"eth")?; - EnrForkIdEntry::decode(&mut maybe_fork_id).ok().map(|entry| entry.fork_id) + EnrForkIdEntry::decode(&mut maybe_fork_id).ok().map(Into::into) } } diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index 03c72e33016f3..b72a45b31fdca 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -22,7 +22,7 @@ use crate::{ pub use config::DnsDiscoveryConfig; use enr::Enr; use error::ParseDnsEntryError; -use reth_primitives::{pk2id, ForkId, NodeRecord}; +use reth_primitives::{pk2id, EnrForkIdEntry, ForkId, NodeRecord}; use schnellru::{ByLength, LruMap}; use secp256k1::SecretKey; use std::{ @@ -400,7 +400,8 @@ fn convert_enr_node_record(enr: &Enr) -> Option } .into_ipv4_mapped(); - let fork_id = enr.get_decodable::(b"eth").transpose().ok()?; + let fork_id = + enr.get_decodable::(b"eth").transpose().ok().flatten().map(Into::into); Some(DnsNodeRecordUpdate { node_record, fork_id, enr: enr.clone() }) } @@ -423,7 +424,7 @@ mod tests { .ip("127.0.0.1".parse().unwrap()) .udp4(9000) .tcp4(30303) - .add_value(b"eth", &MAINNET.latest_fork_id()) + .add_value(b"eth", &EnrForkIdEntry::from(MAINNET.latest_fork_id())) .build(&secret_key) .unwrap(); @@ -446,7 +447,7 @@ mod tests { .ip("127.0.0.1".parse().unwrap()) .udp4(9000) .tcp4(30303) - .add_value(b"eth", &MAINNET.latest_fork_id()) + .add_value(b"eth", &EnrForkIdEntry::from(MAINNET.latest_fork_id())) .add_value(b"opstack", &ForkId { hash: ForkHash(rand::random()), next: rand::random() }) .build(&secret_key) .unwrap(); @@ -510,7 +511,11 @@ mod tests { let mut builder = Enr::builder(); let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier).unwrap(); - builder.ip4(Ipv4Addr::LOCALHOST).udp4(30303).tcp4(30303).add_value(b"eth", &fork_id); + builder + .ip4(Ipv4Addr::LOCALHOST) + .udp4(30303) + .tcp4(30303) + .add_value(b"eth", &EnrForkIdEntry::from(fork_id)); let enr = builder.build(&secret_key).unwrap(); resolver.insert(format!("{}.{}", root.enr_root.clone(), link.domain), enr.to_base64()); diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index b7a1131b4776f..d95f2f9575b73 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -7,12 +7,12 @@ use crate::{ }; use enr::Enr; use futures::StreamExt; -use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config, EnrForkIdEntry}; +use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config}; use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord, PeerId}; use secp256k1::SecretKey; use std::{ collections::VecDeque, From fd46df069ee5c9478a9cc689320e0aeafa9a529f Mon Sep 17 00:00:00 2001 From: Panagiotis Ganelis <50522617+PanGan21@users.noreply.github.com> Date: Fri, 26 Apr 2024 12:29:43 +0300 Subject: [PATCH 348/700] feat: move db-access example into a separate module (#7902) --- Cargo.lock | 11 +++++++++++ Cargo.toml | 1 + examples/Cargo.toml | 4 ---- examples/README.md | 2 +- examples/db-access/Cargo.toml | 16 ++++++++++++++++ examples/{db-access.rs => db-access/src/main.rs} | 0 6 files changed, 29 insertions(+), 5 deletions(-) create mode 100644 examples/db-access/Cargo.toml rename examples/{db-access.rs => db-access/src/main.rs} (100%) diff --git a/Cargo.lock b/Cargo.lock index 027cd7e853cba..b67a7935ce464 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2204,6 +2204,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "db-access" +version = "0.0.0" +dependencies = [ + "eyre", + "reth-db", + "reth-primitives", + "reth-provider", + "reth-rpc-types", +] + [[package]] name = "debug-helper" version = "0.3.13" diff --git a/Cargo.toml b/Cargo.toml index 954fd85d88a31..cee449b22eb1c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,6 +87,7 @@ members = [ "examples/custom-inspector/", "examples/exex/minimal/", "examples/exex/op-bridge/", + "examples/db-access", "testing/ef-tests/", ] default-members = ["bin/reth"] diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 2379e9a0fe5b0..02c5717864f14 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -25,10 +25,6 @@ futures.workspace = true async-trait.workspace = true tokio.workspace = true -[[example]] -name = "db-access" -path = "db-access.rs" - [[example]] name = "network" path = "network.rs" \ No newline at end of file diff --git a/examples/README.md b/examples/README.md index dcec15d358dcc..574efe9618556 100644 --- a/examples/README.md +++ b/examples/README.md @@ -38,7 +38,7 @@ to make a PR! | Example | Description | | --------------------------- | --------------------------------------------------------------- | -| [DB access](./db-access.rs) | Illustrates how to access Reth's database in a separate process | +| [DB access](./db-access) | Illustrates how to access Reth's database in a separate process | ## Network diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml new file mode 100644 index 0000000000000..e447493c2783a --- /dev/null +++ b/examples/db-access/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "db-access" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + + +[dependencies] +reth-db.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-rpc-types.workspace = true + + +eyre.workspace = true diff --git a/examples/db-access.rs b/examples/db-access/src/main.rs similarity index 100% rename from examples/db-access.rs rename to examples/db-access/src/main.rs From 4278bc24ca1e1f0d26ab78a1bd0ecf93d1a4e5a2 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 26 Apr 2024 11:49:35 +0200 Subject: [PATCH 349/700] Bump `alloy-dyn-abi` and `alloy-sol-types` (#7903) --- Cargo.lock | 191 +++++++++++++++++++++++++++++------------------------ Cargo.toml | 4 +- 2 files changed, 105 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b67a7935ce464..863fada9c68df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -118,9 +118,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40646aa7f01e396139cf0d6c3a7475eeb8094a0f41d8199f10860c8aef09d2f1" +checksum = "fe6c2674230e94ea98767550b02853bf7024b46f784827be95acfc5f5f1a445f" dependencies = [ "alloy-rlp", "arbitrary", @@ -147,9 +147,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "872f239c15befa27cc4f0d3d82a70b3365c2d0202562bf906eb93b299fa31882" +checksum = "22ab339ca7b4ea9115f0578c941abc80a171edf8e5eadd01e6c4237b68db8083" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -194,9 +194,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a35ddfd27576474322a5869e4c123e5f3e7b2177297c18e4e82ea501cb125b" +checksum = "44294729c145cf7ae65feab544b5b81fb2bb7e2fd060214842eb3989a1e9d882" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -448,9 +448,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452d929748ac948a10481fff4123affead32c553cf362841c5103dd508bdfc16" +checksum = "bef9a94a27345fb31e3fcb5f5e9f592bb4847493b07fa1e47dd9fde2222f2e28" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -467,9 +467,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df64e094f6d2099339f9e82b5b38440b159757b6920878f28316243f8166c8d1" +checksum = "c31fe73cd259527e24dc2dbfe64bc95e5ddfcd2b2731f670a11ff72b2be2c25b" dependencies = [ "alloy-json-abi", "const-hex", @@ -484,18 +484,18 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715f4d09a330cc181fc7c361b5c5c2766408fa59a0bac60349dcb7baabd404cc" +checksum = "8c8d6e74e4feeaa2bcfdecfd3da247ab53c67bd654ba1907270c32e02b142331" dependencies = [ "winnow 0.6.6", ] [[package]] name = "alloy-sol-types" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bc2d6dfc2a19fd56644494479510f98b1ee929e04cf0d4aa45e98baa3e545b" +checksum = "afaffed78bfb17526375754931e045f96018aa810844b29c7aef823266dd4b4b" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -814,9 +814,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60" +checksum = "4e9eabd7a98fe442131a17c316bd9349c43695e49e730c3c8e12cfb5f4da2693" dependencies = [ "brotli", "flate2", @@ -927,9 +927,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -1269,9 +1269,9 @@ dependencies = [ [[package]] name = "brotli" -version = "4.0.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "125740193d7fee5cc63ab9e16c2fdc4e07c74ba755cc53b327d6ea029e9fc569" +checksum = "19483b140a7ac7174d34b5a581b406c64f84da5409d3e09cf4fff604f9270e67" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1280,9 +1280,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "3.0.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65622a320492e09b5e0ac436b14c54ff68199bac392d0e89a6832c4518eea525" +checksum = "e6221fe77a248b9117d431ad93761222e1cf8ff282d9d1d5d9f53d6299a1cf76" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1853,7 +1853,7 @@ dependencies = [ "crossterm_winapi", "libc", "mio", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "signal-hook", "signal-hook-mio", "winapi", @@ -2175,7 +2175,7 @@ dependencies = [ "hashbrown 0.14.3", "lock_api", "once_cell", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -2862,9 +2862,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" +checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e" [[package]] name = "findshlibs" @@ -4008,9 +4008,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -4079,7 +4079,7 @@ dependencies = [ "futures-util", "hyper 0.14.28", "jsonrpsee-types", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand 0.8.5", "rustc-hash", @@ -4352,7 +4352,7 @@ dependencies = [ "multihash", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "quick-protobuf", "rand 0.8.5", @@ -4407,9 +4407,9 @@ dependencies = [ [[package]] name = "libproc" -version = "0.14.6" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb6497078a4c9c2aca63df56d8dce6eb4381d53a960f781a3a748f7ea97436d" +checksum = "ae9ea4b75e1a81675429dafe43441df1caea70081e82246a8cccf514884a88bb" dependencies = [ "bindgen", "errno", @@ -4514,9 +4514,9 @@ checksum = "f9d642685b028806386b2b6e75685faadd3eb65a85fff7df711ce18446a422da" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -5219,12 +5219,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -5243,15 +5243,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -5502,7 +5502,7 @@ dependencies = [ "log", "nix", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "smallvec", "symbolic-demangle", "tempfile", @@ -5897,11 +5897,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] @@ -6237,7 +6237,7 @@ dependencies = [ "linked_hash_set", "lru", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "reth-consensus", "reth-db", "reth-interfaces", @@ -6372,7 +6372,7 @@ dependencies = [ "discv5", "enr", "generic-array", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "reth-net-common", "reth-net-nat", @@ -6417,7 +6417,7 @@ dependencies = [ "data-encoding", "enr", "linked_hash_set", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "reth-net-common", "reth-primitives", @@ -6706,7 +6706,7 @@ dependencies = [ "auto_impl", "clap", "futures", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "reth-consensus", "reth-eth-wire-types", @@ -6751,7 +6751,7 @@ dependencies = [ "indexmap 2.2.6", "libc", "libffi", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pprof", "rand 0.8.5", "rand_xorshift", @@ -6838,7 +6838,7 @@ dependencies = [ "itertools 0.12.1", "linked_hash_set", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "pprof", "rand 0.8.5", @@ -7081,7 +7081,7 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.28", "jsonrpsee", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "reqwest 0.11.27", "reth", "reth-basic-payload-builder", @@ -7227,7 +7227,7 @@ dependencies = [ "dashmap", "itertools 0.12.1", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand 0.8.5", "rayon", @@ -7306,7 +7306,7 @@ dependencies = [ "jsonrpsee", "jsonwebtoken", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand 0.8.5", "reth-consensus-common", @@ -7536,7 +7536,7 @@ version = "0.2.0-beta.6" dependencies = [ "assert_matches", "clap", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rayon", "reth-db", "reth-interfaces", @@ -7602,7 +7602,7 @@ dependencies = [ "futures-util", "itertools 0.12.1", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "paste", "pprof", "proptest", @@ -7956,9 +7956,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", @@ -7988,7 +7988,7 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki 0.102.3", "subtle", "zeroize", ] @@ -8039,9 +8039,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" [[package]] name = "rustls-webpki" @@ -8055,9 +8055,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -8114,6 +8114,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scc" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.23" @@ -8150,6 +8159,12 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "sdd" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b84345e4c9bd703274a082fb80caaa99b7612be48dfaa1dd9266577ec412309d" + [[package]] name = "sec1" version = "0.7.3" @@ -8315,11 +8330,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" +checksum = "2c85f8e96d1d6857f13768fcbd895fcb06225510022a2774ed8b5150581847b0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "chrono", "hex", "indexmap 1.9.3", @@ -8333,9 +8348,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655" +checksum = "c8b3a576c4eb2924262d5951a3b737ccaf16c931e39a2810c36f9a7e25575557" dependencies = [ "darling 0.20.8", "proc-macro2", @@ -8345,23 +8360,23 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ad9342b3aaca7cb43c45c097dd008d4907070394bd0751a0aa8817e5a018d" +checksum = "adb86f9315df5df6a70eae0cc22395a44e544a0d8897586820770a35ede74449" dependencies = [ - "dashmap", "futures", - "lazy_static", "log", - "parking_lot 0.12.1", + "once_cell", + "parking_lot 0.12.2", + "scc", "serial_test_derive", ] [[package]] name = "serial_test_derive" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" +checksum = "a9bb72430492e9549b0c4596725c0f82729bff861c45aa8099c0a8e67fc3b721" dependencies = [ "proc-macro2", "quote", @@ -8483,9 +8498,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -8799,9 +8814,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4497156948bd342b52038035a6fa514a89626e37af9d2c52a5e8d8ebcc7ee479" +checksum = "70aba06097b6eda3c15f6eebab8a6339e121475bcf08bbe6758807e716c372a1" dependencies = [ "paste", "proc-macro2", @@ -8939,18 +8954,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", @@ -9096,7 +9111,7 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", "socket2 0.5.6", @@ -9496,7 +9511,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "resolv-conf", "smallvec", @@ -9901,11 +9916,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index cee449b22eb1c..f3ac31674e389 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,8 +280,8 @@ revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = # eth alloy-chains = "0.1.15" alloy-primitives = "0.7.1" -alloy-dyn-abi = "0.7.0" -alloy-sol-types = "0.7.0" +alloy-dyn-abi = "0.7.1" +alloy-sol-types = "0.7.1" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } From d833f1aed9f54cfee27ab8d6f8d342357ee0260f Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Fri, 26 Apr 2024 15:45:07 +0530 Subject: [PATCH 350/700] feat: add new crate op-beacon-core (#7848) Co-authored-by: Matthias Seitz --- Cargo.lock | 9 ++ Cargo.toml | 4 +- crates/consensus/common/src/validation.rs | 17 +++- crates/optimism/consensus/Cargo.toml | 23 +++++ crates/optimism/consensus/src/lib.rs | 103 ++++++++++++++++++++++ 5 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 crates/optimism/consensus/Cargo.toml create mode 100644 crates/optimism/consensus/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 863fada9c68df..5262d2c41cbe2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7113,6 +7113,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-optimism-consensus" +version = "0.2.0-beta.6" +dependencies = [ + "reth-consensus", + "reth-consensus-common", + "reth-primitives", +] + [[package]] name = "reth-optimism-payload-builder" version = "0.2.0-beta.6" diff --git a/Cargo.toml b/Cargo.toml index f3ac31674e389..817f829932e8c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,7 @@ members = [ "crates/ethereum/engine-primitives/", "crates/node-ethereum/", "crates/node-builder/", + "crates/optimism/consensus", "crates/optimism/node/", "crates/optimism/evm/", "crates/node-core/", @@ -222,6 +223,7 @@ reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } +reth-node-events = { path = "crates/node/events" } reth-node-optimism = { path = "crates/optimism/node" } reth-evm-optimism = { path = "crates/optimism/evm" } reth-node-core = { path = "crates/node-core" } @@ -270,7 +272,7 @@ reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } -reth-node-events = { path = "crates/node/events" } +reth-optimism-consensus = { path = "crates/optimism/consensus" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 3ed01f63753e1..06b2303a86a04 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -3,7 +3,10 @@ use reth_consensus::ConsensusError; use reth_interfaces::RethResult; use reth_primitives::{ - constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, + constants::{ + eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, + MAXIMUM_EXTRA_DATA_SIZE, + }, BlockNumber, ChainSpec, GotExpected, Hardfork, Header, InvalidTransactionError, SealedBlock, SealedHeader, Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, @@ -321,6 +324,18 @@ pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), Cons Ok(()) } +/// Validates the header's extradata according to the beacon consensus rules. +/// +/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. +/// This must be 32 bytes or fewer; formally Hx. +pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { + if header.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { + Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data.len() }) + } else { + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml new file mode 100644 index 0000000000000..4ebbaa8d8af02 --- /dev/null +++ b/crates/optimism/consensus/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "reth-optimism-consensus" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-consensus-common.workspace = true +reth-primitives.workspace = true +reth-consensus.workspace = true + +[features] +optimism = [ + "reth-primitives/optimism", +] \ No newline at end of file diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs new file mode 100644 index 0000000000000..9a905adfaf836 --- /dev/null +++ b/crates/optimism/consensus/src/lib.rs @@ -0,0 +1,103 @@ +//! Optimism Consensus implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus_common::{validation, validation::validate_header_extradata}; +use reth_primitives::{ChainSpec, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256}; +use std::{sync::Arc, time::SystemTime}; + +/// Optimism consensus implementation. +/// +/// Provides basic checks as outlined in the execution specs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct OptimismBeaconConsensus { + /// Configuration + chain_spec: Arc, +} + +impl OptimismBeaconConsensus { + /// Create a new instance of [OptimismBeaconConsensus] + /// + /// # Panics + /// + /// If given chain spec is not optimism [ChainSpec::is_optimism] + pub fn new(chain_spec: Arc) -> Self { + assert!(chain_spec.is_optimism(), "optimism consensus only valid for optimism chains"); + Self { chain_spec } + } +} + +impl Consensus for OptimismBeaconConsensus { + fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { + validation::validate_header_standalone(header, &self.chain_spec)?; + Ok(()) + } + + fn validate_header_against_parent( + &self, + header: &SealedHeader, + parent: &SealedHeader, + ) -> Result<(), ConsensusError> { + header.validate_against_parent(parent, &self.chain_spec).map_err(ConsensusError::from)?; + Ok(()) + } + + fn validate_header_with_total_difficulty( + &self, + header: &Header, + _total_difficulty: U256, + ) -> Result<(), ConsensusError> { + // with OP-stack Bedrock activation number determines when TTD (eth Merge) has been reached. + let is_post_merge = self.chain_spec.is_bedrock_active_at_block(header.number); + + if is_post_merge { + if header.nonce != 0 { + return Err(ConsensusError::TheMergeNonceIsNotZero) + } + + if header.ommers_hash != EMPTY_OMMER_ROOT_HASH { + return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) + } + + // Post-merge, the consensus layer is expected to perform checks such that the block + // timestamp is a function of the slot. This is different from pre-merge, where blocks + // are only allowed to be in the future (compared to the system's clock) by a certain + // threshold. + // + // Block validation with respect to the parent should ensure that the block timestamp + // is greater than its parent timestamp. + + // validate header extradata for all networks post merge + validate_header_extradata(header)?; + + // mixHash is used instead of difficulty inside EVM + // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty + } else { + // Check if timestamp is in the future. Clock can drift but this can be consensus issue. + let present_timestamp = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); + + if header.exceeds_allowed_future_timestamp(present_timestamp) { + return Err(ConsensusError::TimestampIsInFuture { + timestamp: header.timestamp, + present_timestamp, + }) + } + } + + Ok(()) + } + + fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validation::validate_block_standalone(block, &self.chain_spec) + } +} From 688ee06e98774596d7f09283fd68f73303f2f0b6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 26 Apr 2024 12:57:42 +0200 Subject: [PATCH 351/700] fix(discv5): fork id list in ENR (#7909) --- crates/net/discv5/src/config.rs | 13 +++++++------ crates/net/discv5/src/lib.rs | 12 +++++++----- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index bf15be861b2df..266b530ef07a9 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -9,7 +9,7 @@ use std::{ use derive_more::Display; use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; -use reth_primitives::{Bytes, ForkId, NodeRecord, MAINNET}; +use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord, MAINNET}; use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, network_key}; @@ -50,7 +50,7 @@ impl ConfigBuilder { let Config { discv5_config, bootstrap_nodes, - fork, + fork: (network_key, fork_id), tcp_port, other_enr_kv_pairs, lookup_interval, @@ -60,7 +60,7 @@ impl ConfigBuilder { Self { discv5_config: Some(discv5_config), bootstrap_nodes, - fork: Some(fork), + fork: Some((network_key, fork_id.fork_id)), tcp_port, other_enr_kv_pairs, lookup_interval: Some(lookup_interval), @@ -160,7 +160,8 @@ impl ConfigBuilder { let discv5_config = discv5_config .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); - let fork = fork.unwrap_or((network_key::ETH, MAINNET.latest_fork_id())); + let (network_key, fork_id) = fork.unwrap_or((network_key::ETH, MAINNET.latest_fork_id())); + let fork = (network_key, fork_id.into()); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); @@ -188,8 +189,8 @@ pub struct Config { /// Nodes to boot from. pub(super) bootstrap_nodes: HashSet, /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node - /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", ForkId)`. - pub(super) fork: (&'static [u8], ForkId), + /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", [ForkId])`. + pub(super) fork: (&'static [u8], EnrForkIdEntry), /// RLPx TCP port to advertise. pub(super) tcp_port: u16, /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index de74f3fee9e0a..14414abf7416a 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -17,13 +17,12 @@ use std::{ }; use ::enr::Enr; -use alloy_rlp::Decodable; use discv5::ListenConfig; use enr::{discv4_id_to_discv5_id, EnrCombinedKeyWrapper}; use futures::future::join_all; use itertools::Itertools; use rand::{Rng, RngCore}; -use reth_primitives::{bytes::Bytes, ForkId, NodeRecord, PeerId}; +use reth_primitives::{bytes::Bytes, EnrForkIdEntry, ForkId, NodeRecord, PeerId}; use secp256k1::SecretKey; use tokio::{sync::mpsc, task}; use tracing::{debug, error, trace}; @@ -489,9 +488,12 @@ impl Discv5 { enr: &discv5::enr::Enr, ) -> Result { let key = self.fork_key; - let mut fork_id_bytes = enr.get_raw_rlp(key).ok_or(Error::ForkMissing(key))?; + let fork_id = enr + .get_decodable::(key) + .ok_or(Error::ForkMissing(key))? + .map(Into::into)?; - Ok(ForkId::decode(&mut fork_id_bytes)?) + Ok(fork_id) } //////////////////////////////////////////////////////////////////////////////////////////////// @@ -834,7 +836,7 @@ mod tests { let (enr, _, _, _) = Discv5::build_local_enr(&sk, &config); let decoded_fork_id = - ForkId::decode(&mut enr.get_raw_rlp(network_key::ETH).unwrap()).unwrap(); + enr.get_decodable::(network_key::ETH).unwrap().map(Into::into).unwrap(); assert_eq!(fork_id, decoded_fork_id); assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4 From bb0809ce2e9f3d24a757397051ccd080ec79bd3d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 26 Apr 2024 13:14:27 +0200 Subject: [PATCH 352/700] chore: reuse validation fn (#7911) --- crates/consensus/beacon-core/src/lib.rs | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/crates/consensus/beacon-core/src/lib.rs b/crates/consensus/beacon-core/src/lib.rs index c2a3df6e6814a..6ced95dbc41e0 100644 --- a/crates/consensus/beacon-core/src/lib.rs +++ b/crates/consensus/beacon-core/src/lib.rs @@ -11,10 +11,10 @@ use reth_consensus::{Consensus, ConsensusError}; use reth_consensus_common::validation; use reth_primitives::{ - constants::MAXIMUM_EXTRA_DATA_SIZE, Chain, ChainSpec, Hardfork, Header, SealedBlock, - SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, + Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, }; use std::{sync::Arc, time::SystemTime}; + /// Ethereum beacon consensus /// /// This consensus engine does basic checks as outlined in the execution specs. @@ -87,7 +87,7 @@ impl Consensus for BeaconConsensus { // is greater than its parent timestamp. // validate header extradata for all networks post merge - validate_header_extradata(header)?; + validation::validate_header_extradata(header)?; // mixHash is used instead of difficulty inside EVM // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty @@ -111,7 +111,7 @@ impl Consensus for BeaconConsensus { // * If the network is goerli pre-merge, ignore the extradata check, since we do not // support clique. Same goes for OP blocks below Bedrock. if self.chain_spec.chain != Chain::goerli() && !self.chain_spec.is_optimism() { - validate_header_extradata(header)?; + validation::validate_header_extradata(header)?; } } @@ -122,15 +122,3 @@ impl Consensus for BeaconConsensus { validation::validate_block_standalone(block, &self.chain_spec) } } - -/// Validates the header's extradata according to the beacon consensus rules. -/// -/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. -/// This must be 32 bytes or fewer; formally Hx. -fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { - if header.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { - Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data.len() }) - } else { - Ok(()) - } -} From 73ea68692c38690fc93b049cb98cd4839976dba8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 26 Apr 2024 13:31:19 +0200 Subject: [PATCH 353/700] chore: move reqwest to workspace dep (#7910) --- Cargo.lock | 34 ++++++++++++++++++++++++--------- Cargo.toml | 1 + crates/optimism/node/Cargo.toml | 4 +--- crates/optimism/node/src/rpc.rs | 2 +- 4 files changed, 28 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5262d2c41cbe2..74431b4c02334 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3534,6 +3534,23 @@ dependencies = [ "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", + "rustls 0.22.4", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.25.0", + "tower-service", +] + [[package]] name = "hyper-system-resolver" version = "0.5.0" @@ -4100,7 +4117,7 @@ checksum = "ac13bc1e44cd00448a5ff485824a128629c945f02077804cb659c07a0ba41395" dependencies = [ "async-trait", "hyper 0.14.28", - "hyper-rustls", + "hyper-rustls 0.24.2", "jsonrpsee-core", "jsonrpsee-types", "serde", @@ -5984,7 +6001,6 @@ dependencies = [ "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", - "hyper-rustls", "ipnet", "js-sys", "log", @@ -5992,16 +6008,12 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.11", - "rustls-native-certs 0.6.3", - "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "system-configuration", "tokio", - "tokio-rustls 0.24.1", "tokio-util", "tower-service", "url", @@ -6026,6 +6038,7 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.3.1", + "hyper-rustls 0.26.0", "hyper-util", "ipnet", "js-sys", @@ -6034,11 +6047,16 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", + "rustls-pemfile 2.1.2", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", + "tokio-rustls 0.25.0", "tower-service", "url", "wasm-bindgen", @@ -7077,12 +7095,10 @@ dependencies = [ "async-trait", "clap", "eyre", - "http 0.2.12", - "http-body 0.4.6", "hyper 0.14.28", "jsonrpsee", "parking_lot 0.12.2", - "reqwest 0.11.27", + "reqwest 0.12.4", "reth", "reth-basic-payload-builder", "reth-beacon-consensus", diff --git a/Cargo.toml b/Cargo.toml index 817f829932e8c..bd4fd5fd9aed0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -351,6 +351,7 @@ futures = "0.3.26" pin-project = "1.0.12" futures-util = "0.3.25" hyper = "0.14.25" +reqwest = { version = "0.12", default-features = false } tower = "0.4" tower-http = "0.4" http = "0.2.8" diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 29a99a961b2c0..be8791c7886bd 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -36,9 +36,7 @@ revm-primitives.workspace = true # async async-trait.workspace = true hyper.workspace = true -http.workspace = true -http-body.workspace = true -reqwest = { version = "0.11", default-features = false, features = [ +reqwest = { workspace = true, default-features = false, features = [ "rustls-tls-native-roots", ] } tracing.workspace = true diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 25a399e1859e2..515e1d8eb5732 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -95,7 +95,7 @@ impl SequencerClient { self.http_client() .post(self.endpoint()) - .header(http::header::CONTENT_TYPE, "application/json") + .header(reqwest::header::CONTENT_TYPE, "application/json") .body(body) .send() .await From b6b2cf816e3f39f0e9963912b48384352a27112b Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Fri, 26 Apr 2024 21:34:34 +0800 Subject: [PATCH 354/700] Add windows Ipc Client implementation (#7187) --- Cargo.lock | 185 ++++++++++++-- crates/node-builder/src/rpc.rs | 1 - crates/node-core/src/args/rpc_server_args.rs | 2 +- crates/rpc/ipc/Cargo.toml | 6 +- crates/rpc/ipc/src/client.rs | 151 ------------ crates/rpc/ipc/src/client/mod.rs | 97 ++++++++ crates/rpc/ipc/src/client/unix.rs | 82 +++++++ crates/rpc/ipc/src/client/win.rs | 82 +++++++ crates/rpc/ipc/src/lib.rs | 1 - crates/rpc/ipc/src/server/connection.rs | 55 +---- crates/rpc/ipc/src/server/future.rs | 129 +--------- crates/rpc/ipc/src/server/mod.rs | 246 ++++++++++--------- crates/rpc/rpc-builder/src/auth.rs | 14 +- crates/rpc/rpc-builder/src/lib.rs | 43 +--- deny.toml | 1 + 15 files changed, 580 insertions(+), 515 deletions(-) delete mode 100644 crates/rpc/ipc/src/client.rs create mode 100644 crates/rpc/ipc/src/client/mod.rs create mode 100644 crates/rpc/ipc/src/client/unix.rs create mode 100644 crates/rpc/ipc/src/client/win.rs diff --git a/Cargo.lock b/Cargo.lock index 74431b4c02334..5a68ee77310f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -808,10 +808,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] +[[package]] +name = "async-channel" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" +dependencies = [ + "concurrent-queue", + "event-listener 5.3.0", + "event-listener-strategy 0.5.1", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" version = "0.4.9" @@ -828,14 +841,25 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-lock" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite", +] + [[package]] name = "async-sse" version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e6fa871e4334a622afd6bb2f611635e8083a6f5e2936c0f90f37c7ef9856298" dependencies = [ - "async-channel", - "futures-lite", + "async-channel 1.9.0", + "futures-lite 1.13.0", "http-types", "log", "memchr", @@ -864,6 +888,12 @@ dependencies = [ "syn 2.0.60", ] +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + [[package]] name = "async-trait" version = "0.1.80" @@ -875,6 +905,12 @@ dependencies = [ "syn 2.0.60", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "attohttpc" version = "0.24.1" @@ -1122,6 +1158,22 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.2.1", + "async-lock", + "async-task", + "fastrand 2.0.2", + "futures-io", + "futures-lite 2.3.0", + "piper", + "tracing", +] + [[package]] name = "blst" version = "0.3.11" @@ -2727,6 +2779,48 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332f51cb23d20b0de8458b86580878211da09bcd4503cb579c225b3d124cabb3" +dependencies = [ + "event-listener 5.3.0", + "pin-project-lite", +] + [[package]] name = "examples" version = "0.0.0" @@ -2992,6 +3086,16 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "futures-core", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -3428,9 +3532,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" dependencies = [ "anyhow", - "async-channel", + "async-channel 1.9.0", "base64 0.13.1", - "futures-lite", + "futures-lite 1.13.0", "infer", "pin-project-lite", "rand 0.7.3", @@ -3951,6 +4055,33 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "interprocess" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81f2533f3be42fffe3b5e63b71aeca416c1c3bc33e4e27be018521e76b1f38fb" +dependencies = [ + "blocking", + "cfg-if", + "futures-core", + "futures-io", + "intmap", + "libc", + "once_cell", + "rustc_version 0.4.0", + "spinning", + "thiserror", + "to_method", + "tokio", + "winapi", +] + +[[package]] +name = "intmap" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae52f28f45ac2bc96edb7714de995cffc174a395fb0abf5bff453587c980d7b9" + [[package]] name = "intrusive-collections" version = "0.9.6" @@ -5203,20 +5334,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "parity-tokio-ipc" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" -dependencies = [ - "futures", - "libc", - "log", - "rand 0.7.3", - "tokio", - "winapi", -] - [[package]] name = "parking" version = "2.2.0" @@ -5400,6 +5517,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.2", + "futures-io", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -6744,9 +6872,10 @@ dependencies = [ "bytes", "futures", "futures-util", + "interprocess", "jsonrpsee", - "parity-tokio-ipc", "pin-project", + "rand 0.8.5", "reth-tracing", "serde_json", "thiserror", @@ -6755,6 +6884,7 @@ dependencies = [ "tokio-util", "tower", "tracing", + "windows-sys 0.52.0", ] [[package]] @@ -8657,6 +8787,15 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spinning" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d4f0e86297cad2658d92a707320d87bf4e6ae1050287f51d19b67ef3f153a7b" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.7.3" @@ -9125,6 +9264,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +[[package]] +name = "to_method" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c4ceeeca15c8384bbc3e011dbd8fccb7f068a440b752b7d9b32ceb0ca0e2e8" + [[package]] name = "tokio" version = "1.37.0" diff --git a/crates/node-builder/src/rpc.rs b/crates/node-builder/src/rpc.rs index d6e2eb0f239d6..3efeba7f5fa37 100644 --- a/crates/node-builder/src/rpc.rs +++ b/crates/node-builder/src/rpc.rs @@ -301,7 +301,6 @@ where let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| { let addr = handle.local_addr(); if let Some(ipc_endpoint) = handle.ipc_endpoint() { - let ipc_endpoint = ipc_endpoint.path(); info!(target: "reth::cli", url=%addr, ipc_endpoint=%ipc_endpoint,"RPC auth server started"); } else { info!(target: "reth::cli", url=%addr, "RPC auth server started"); diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server_args.rs index 2ac48e2ba3ab8..1a60aa31af01d 100644 --- a/crates/node-core/src/args/rpc_server_args.rs +++ b/crates/node-core/src/args/rpc_server_args.rs @@ -711,7 +711,7 @@ mod tests { config.ws_address().unwrap(), SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8888)) ); - assert_eq!(config.ipc_endpoint().unwrap().path(), constants::DEFAULT_IPC_ENDPOINT); + assert_eq!(config.ipc_endpoint().unwrap(), constants::DEFAULT_IPC_ENDPOINT); } #[test] diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index 21b6454094d2f..094fa5759e17b 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -15,7 +15,6 @@ workspace = true # async/net futures.workspace = true -parity-tokio-ipc = "0.9.0" tokio = { workspace = true, features = ["net", "time", "rt-multi-thread"] } tokio-util = { workspace = true, features = ["codec"] } tokio-stream.workspace = true @@ -30,7 +29,12 @@ tracing.workspace = true bytes.workspace = true thiserror.workspace = true futures-util = "0.3.30" +interprocess = { version = "1.2.1", features = ["tokio_support"] } + +[target.'cfg(windows)'.dependencies] +windows-sys = { version = "0.52.0", features = ["Win32_Foundation"] } [dev-dependencies] tokio-stream = { workspace = true, features = ["sync"] } reth-tracing.workspace = true +rand.workspace = true diff --git a/crates/rpc/ipc/src/client.rs b/crates/rpc/ipc/src/client.rs deleted file mode 100644 index f4454958f2379..0000000000000 --- a/crates/rpc/ipc/src/client.rs +++ /dev/null @@ -1,151 +0,0 @@ -//! [`jsonrpsee`] transport adapter implementation for IPC. - -use crate::stream_codec::StreamCodec; -use futures::StreamExt; -use jsonrpsee::{ - async_client::{Client, ClientBuilder}, - core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}, -}; -use std::{ - io, - path::{Path, PathBuf}, -}; -use tokio::{io::AsyncWriteExt, net::UnixStream}; -use tokio_util::codec::FramedRead; - -/// Builder type for [`Client`] -#[derive(Clone, Default, Debug)] -#[non_exhaustive] -pub struct IpcClientBuilder; - -impl IpcClientBuilder { - /// Connects to a IPC socket - pub async fn build(self, path: impl AsRef) -> Result { - let (tx, rx) = IpcTransportClientBuilder::default().build(path).await?; - Ok(self.build_with_tokio(tx, rx)) - } - - /// Uses the sender and receiver channels to connect to the socket. - pub fn build_with_tokio(self, sender: S, receiver: R) -> Client - where - S: TransportSenderT + Send, - R: TransportReceiverT + Send, - { - ClientBuilder::default().build_with_tokio(sender, receiver) - } -} - -/// Sending end of IPC transport. -#[derive(Debug)] -pub struct Sender { - inner: tokio::net::unix::OwnedWriteHalf, -} - -#[async_trait::async_trait] -impl TransportSenderT for Sender { - type Error = IpcError; - - /// Sends out a request. Returns a Future that finishes when the request has been successfully - /// sent. - async fn send(&mut self, msg: String) -> Result<(), Self::Error> { - Ok(self.inner.write_all(msg.as_bytes()).await?) - } - - async fn send_ping(&mut self) -> Result<(), Self::Error> { - tracing::trace!("send ping - not implemented"); - Err(IpcError::NotSupported) - } - - /// Close the connection. - async fn close(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} - -/// Receiving end of IPC transport. -#[derive(Debug)] -pub struct Receiver { - inner: FramedRead, -} - -#[async_trait::async_trait] -impl TransportReceiverT for Receiver { - type Error = IpcError; - - /// Returns a Future resolving when the server sent us something back. - async fn receive(&mut self) -> Result { - self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) - } -} - -/// Builder for IPC transport [`Sender`] and [`Receiver`] pair. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct IpcTransportClientBuilder; - -impl IpcTransportClientBuilder { - /// Try to establish the connection. - /// - /// ``` - /// use jsonrpsee::{core::client::ClientT, rpc_params}; - /// use reth_ipc::client::IpcClientBuilder; - /// # async fn run_client() -> Result<(), Box> { - /// let client = IpcClientBuilder::default().build("/tmp/my-uds").await?; - /// let response: String = client.request("say_hello", rpc_params![]).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn build(self, path: impl AsRef) -> Result<(Sender, Receiver), IpcError> { - let path = path.as_ref(); - - let stream = UnixStream::connect(path) - .await - .map_err(|err| IpcError::FailedToConnect { path: path.to_path_buf(), err })?; - - let (rhlf, whlf) = stream.into_split(); - - Ok(( - Sender { inner: whlf }, - Receiver { inner: FramedRead::new(rhlf, StreamCodec::stream_incoming()) }, - )) - } -} - -/// Error variants that can happen in IPC transport. -#[derive(Debug, thiserror::Error)] -pub enum IpcError { - /// Operation not supported - #[error("operation not supported")] - NotSupported, - /// Stream was closed - #[error("stream closed")] - Closed, - /// Thrown when failed to establish a socket connection. - #[error("failed to connect to socket {path}: {err}")] - FailedToConnect { - /// The path of the socket. - #[doc(hidden)] - path: PathBuf, - /// The error occurred while connecting. - #[doc(hidden)] - err: io::Error, - }, - /// Wrapped IO Error - #[error(transparent)] - Io(#[from] io::Error), -} - -#[cfg(test)] -mod tests { - use super::*; - use parity_tokio_ipc::{dummy_endpoint, Endpoint}; - - #[tokio::test] - async fn test_connect() { - let endpoint = dummy_endpoint(); - let _incoming = Endpoint::new(endpoint.clone()).incoming().unwrap(); - - let (tx, rx) = IpcTransportClientBuilder::default().build(endpoint).await.unwrap(); - let _ = IpcClientBuilder::default().build_with_tokio(tx, rx); - } -} diff --git a/crates/rpc/ipc/src/client/mod.rs b/crates/rpc/ipc/src/client/mod.rs new file mode 100644 index 0000000000000..8ca4b54066523 --- /dev/null +++ b/crates/rpc/ipc/src/client/mod.rs @@ -0,0 +1,97 @@ +//! [`jsonrpsee`] transport adapter implementation for IPC. + +use std::{ + io, + path::{Path, PathBuf}, +}; + +use jsonrpsee::{ + async_client::{Client, ClientBuilder}, + core::client::{TransportReceiverT, TransportSenderT}, +}; + +#[cfg(unix)] +use crate::client::unix::IpcTransportClientBuilder; +#[cfg(windows)] +use crate::client::win::IpcTransportClientBuilder; + +#[cfg(unix)] +mod unix; +#[cfg(windows)] +mod win; + +/// Builder type for [`Client`] +#[derive(Clone, Default, Debug)] +#[non_exhaustive] +pub struct IpcClientBuilder; + +impl IpcClientBuilder { + /// Connects to a IPC socket + /// + /// ``` + /// use jsonrpsee::{core::client::ClientT, rpc_params}; + /// use reth_ipc::client::IpcClientBuilder; + /// # async fn run_client() -> Result<(), Box> { + /// let client = IpcClientBuilder::default().build("/tmp/my-uds").await?; + /// let response: String = client.request("say_hello", rpc_params![]).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn build(self, path: impl AsRef) -> Result { + let (tx, rx) = IpcTransportClientBuilder::default().build(path).await?; + Ok(self.build_with_tokio(tx, rx)) + } + + /// Uses the sender and receiver channels to connect to the socket. + pub fn build_with_tokio(self, sender: S, receiver: R) -> Client + where + S: TransportSenderT + Send, + R: TransportReceiverT + Send, + { + ClientBuilder::default().build_with_tokio(sender, receiver) + } +} + +/// Error variants that can happen in IPC transport. +#[derive(Debug, thiserror::Error)] +pub enum IpcError { + /// Operation not supported + #[error("operation not supported")] + NotSupported, + /// Stream was closed + #[error("stream closed")] + Closed, + /// Thrown when failed to establish a socket connection. + #[error("failed to connect to socket {path}: {err}")] + FailedToConnect { + /// The path of the socket. + #[doc(hidden)] + path: PathBuf, + /// The error occurred while connecting. + #[doc(hidden)] + err: io::Error, + }, + /// Wrapped IO Error + #[error(transparent)] + Io(#[from] io::Error), +} + +#[cfg(test)] +mod tests { + use crate::server::dummy_endpoint; + use interprocess::local_socket::tokio::LocalSocketListener; + + use super::*; + + #[tokio::test] + async fn test_connect() { + let endpoint = dummy_endpoint(); + let binding = LocalSocketListener::bind(endpoint.clone()).unwrap(); + tokio::spawn(async move { + let _x = binding.accept().await; + }); + + let (tx, rx) = IpcTransportClientBuilder::default().build(endpoint).await.unwrap(); + let _ = IpcClientBuilder::default().build_with_tokio(tx, rx); + } +} diff --git a/crates/rpc/ipc/src/client/unix.rs b/crates/rpc/ipc/src/client/unix.rs new file mode 100644 index 0000000000000..c7ed7bc7a6269 --- /dev/null +++ b/crates/rpc/ipc/src/client/unix.rs @@ -0,0 +1,82 @@ +//! [`jsonrpsee`] transport adapter implementation for Unix IPC by using Unix Sockets. + +use crate::{client::IpcError, stream_codec::StreamCodec}; +use futures::StreamExt; +use jsonrpsee::core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}; +use std::path::Path; +use tokio::{ + io::AsyncWriteExt, + net::{ + unix::{OwnedReadHalf, OwnedWriteHalf}, + UnixStream, + }, +}; +use tokio_util::codec::FramedRead; + +/// Sending end of IPC transport. +#[derive(Debug)] +pub(crate) struct Sender { + inner: OwnedWriteHalf, +} + +#[async_trait::async_trait] +impl TransportSenderT for Sender { + type Error = IpcError; + + /// Sends out a request. Returns a Future that finishes when the request has been successfully + /// sent. + async fn send(&mut self, msg: String) -> Result<(), Self::Error> { + Ok(self.inner.write_all(msg.as_bytes()).await?) + } + + async fn send_ping(&mut self) -> Result<(), Self::Error> { + tracing::trace!("send ping - not implemented"); + Err(IpcError::NotSupported) + } + + /// Close the connection. + async fn close(&mut self) -> Result<(), Self::Error> { + Ok(()) + } +} + +/// Receiving end of IPC transport. +#[derive(Debug)] +pub(crate) struct Receiver { + pub(crate) inner: FramedRead, +} + +#[async_trait::async_trait] +impl TransportReceiverT for Receiver { + type Error = IpcError; + + /// Returns a Future resolving when the server sent us something back. + async fn receive(&mut self) -> Result { + self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) + } +} + +/// Builder for IPC transport [`Sender`] and [`Receiver`] pair. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub(crate) struct IpcTransportClientBuilder; + +impl IpcTransportClientBuilder { + pub(crate) async fn build( + self, + path: impl AsRef, + ) -> Result<(Sender, Receiver), IpcError> { + let path = path.as_ref(); + + let stream = UnixStream::connect(path) + .await + .map_err(|err| IpcError::FailedToConnect { path: path.to_path_buf(), err })?; + + let (rhlf, whlf) = stream.into_split(); + + Ok(( + Sender { inner: whlf }, + Receiver { inner: FramedRead::new(rhlf, StreamCodec::stream_incoming()) }, + )) + } +} diff --git a/crates/rpc/ipc/src/client/win.rs b/crates/rpc/ipc/src/client/win.rs new file mode 100644 index 0000000000000..69b3140fef1de --- /dev/null +++ b/crates/rpc/ipc/src/client/win.rs @@ -0,0 +1,82 @@ +//! [`jsonrpsee`] transport adapter implementation for Windows IPC by using NamedPipes. + +use crate::{client::IpcError, stream_codec::StreamCodec}; +use jsonrpsee::core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}; +use std::{path::Path, sync::Arc}; +use tokio::{ + io::AsyncWriteExt, + net::windows::named_pipe::{ClientOptions, NamedPipeClient}, + time, + time::Duration, +}; +use tokio_stream::StreamExt; +use tokio_util::codec::FramedRead; +use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY; + +/// Sending end of IPC transport. +#[derive(Debug)] +pub struct Sender { + inner: Arc, +} + +#[async_trait::async_trait] +impl TransportSenderT for Sender { + type Error = IpcError; + + /// Sends out a request. Returns a Future that finishes when the request has been successfully + /// sent. + async fn send(&mut self, msg: String) -> Result<(), Self::Error> { + Ok(self.inner.write_all(msg.as_bytes()).await?) + } + + async fn send_ping(&mut self) -> Result<(), Self::Error> { + tracing::trace!("send ping - not implemented"); + Err(IpcError::NotSupported) + } + + /// Close the connection. + async fn close(&mut self) -> Result<(), Self::Error> { + Ok(()) + } +} + +/// Receiving end of IPC transport. +#[derive(Debug)] +pub struct Receiver { + inner: FramedRead, StreamCodec>, +} + +#[async_trait::async_trait] +impl TransportReceiverT for Receiver { + type Error = IpcError; + + /// Returns a Future resolving when the server sent us something back. + async fn receive(&mut self) -> Result { + self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) + } +} + +/// Builder for IPC transport [`crate::client::win::Sender`] and [`crate::client::win::Receiver`] +/// pair. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct IpcTransportClientBuilder; + +impl IpcTransportClientBuilder { + pub async fn build(self, path: impl AsRef) -> Result<(Sender, Receiver), IpcError> { + let addr = path.as_ref().as_os_str(); + let client = loop { + match ClientOptions::new().open(addr) { + Ok(client) => break client, + Err(e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (), + Err(e) => return IpcError::FailedToConnect { path: path.to_path_buf(), err: e }, + } + time::sleep(Duration::from_mills(50)).await; + }; + let client = Arc::new(client); + Ok(( + Sender { inner: client.clone() }, + Receiver { inner: FramedRead::new(client, StreamCodec::stream_incoming()) }, + )) + } +} diff --git a/crates/rpc/ipc/src/lib.rs b/crates/rpc/ipc/src/lib.rs index 2d0193ed65f79..ae7a8b221f279 100644 --- a/crates/rpc/ipc/src/lib.rs +++ b/crates/rpc/ipc/src/lib.rs @@ -12,7 +12,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#[cfg(unix)] pub mod client; pub mod server; diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index abeba7bbf0a64..05f7a53a9d787 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -1,12 +1,11 @@ //! A IPC connection. use crate::stream_codec::StreamCodec; -use futures::{ready, stream::FuturesUnordered, FutureExt, Sink, Stream, StreamExt}; +use futures::{stream::FuturesUnordered, FutureExt, Sink, Stream}; use std::{ collections::VecDeque, future::Future, io, - marker::PhantomData, pin::Pin, task::{Context, Poll}, }; @@ -16,58 +15,8 @@ use tower::Service; pub(crate) type JsonRpcStream = Framed; -/// Wraps a stream of incoming connections. #[pin_project::pin_project] -pub(crate) struct Incoming { - #[pin] - inner: T, - _marker: PhantomData, -} -impl Incoming -where - T: Stream> + Unpin + 'static, - Item: AsyncRead + AsyncWrite, -{ - /// Create a new instance. - pub(crate) fn new(inner: T) -> Self { - Self { inner, _marker: Default::default() } - } - - /// Polls to accept a new incoming connection to the endpoint. - pub(crate) fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { - Poll::Ready(ready!(self.poll_next_unpin(cx)).map_or( - Err(io::Error::new(io::ErrorKind::ConnectionAborted, "ipc connection closed")), - |conn| conn, - )) - } -} - -impl Stream for Incoming -where - T: Stream> + 'static, - Item: AsyncRead + AsyncWrite, -{ - type Item = io::Result>>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - let res = match ready!(this.inner.poll_next(cx)) { - Some(Ok(item)) => { - let framed = IpcConn(tokio_util::codec::Decoder::framed( - StreamCodec::stream_incoming(), - item, - )); - Ok(framed) - } - Some(Err(err)) => Err(err), - None => return Poll::Ready(None), - }; - Poll::Ready(Some(res)) - } -} - -#[pin_project::pin_project] -pub(crate) struct IpcConn(#[pin] T); +pub(crate) struct IpcConn(#[pin] pub(crate) T); impl IpcConn> where diff --git a/crates/rpc/ipc/src/server/future.rs b/crates/rpc/ipc/src/server/future.rs index 65aaccc88df73..f807af4499316 100644 --- a/crates/rpc/ipc/src/server/future.rs +++ b/crates/rpc/ipc/src/server/future.rs @@ -26,127 +26,9 @@ //! Utilities for handling async code. -use futures::FutureExt; -use std::{ - future::Future, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; -use tokio::{ - sync::{watch, OwnedSemaphorePermit, Semaphore, TryAcquireError}, - time::{self, Duration, Interval}, -}; +use std::sync::Arc; -/// Polling for server stop monitor interval in milliseconds. -const STOP_MONITOR_POLLING_INTERVAL: Duration = Duration::from_millis(1000); - -/// This is a flexible collection of futures that need to be driven to completion -/// alongside some other future, such as connection handlers that need to be -/// handled along with a listener for new connections. -/// -/// In order to `.await` on these futures and drive them to completion, call -/// `select_with` providing some other future, the result of which you need. -pub(crate) struct FutureDriver { - futures: Vec, - stop_monitor_heartbeat: Interval, -} - -impl Default for FutureDriver { - fn default() -> Self { - let mut heartbeat = time::interval(STOP_MONITOR_POLLING_INTERVAL); - - heartbeat.set_missed_tick_behavior(time::MissedTickBehavior::Skip); - - FutureDriver { futures: Vec::new(), stop_monitor_heartbeat: heartbeat } - } -} - -impl FutureDriver { - /// Add a new future to this driver - pub(crate) fn add(&mut self, future: F) { - self.futures.push(future); - } -} - -impl FutureDriver -where - F: Future + Unpin, -{ - pub(crate) async fn select_with(&mut self, selector: S) -> S::Output { - tokio::pin!(selector); - - DriverSelect { selector, driver: self }.await - } - - fn drive(&mut self, cx: &mut Context<'_>) { - let mut i = 0; - - while i < self.futures.len() { - if self.futures[i].poll_unpin(cx).is_ready() { - // Using `swap_remove` since we don't care about ordering, - // but we do care about removing being `O(1)`. - // - // We don't increment `i` in this branch, since we now - // have a shorter length, and potentially a new value at - // current index - self.futures.swap_remove(i); - } else { - i += 1; - } - } - } - - fn poll_stop_monitor_heartbeat(&mut self, cx: &mut Context<'_>) { - // We don't care about the ticks of the heartbeat, it's here only - // to periodically wake the `Waker` on `cx`. - let _ = self.stop_monitor_heartbeat.poll_tick(cx); - } -} - -impl Future for FutureDriver -where - F: Future + Unpin, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = Pin::into_inner(self); - - this.drive(cx); - - if this.futures.is_empty() { - Poll::Ready(()) - } else { - Poll::Pending - } - } -} - -/// This is a glorified select `Future` that will attempt to drive all -/// connection futures `F` to completion on each `poll`, while also -/// handling incoming connections. -struct DriverSelect<'a, S, F> { - selector: S, - driver: &'a mut FutureDriver, -} - -impl<'a, R, F> Future for DriverSelect<'a, R, F> -where - R: Future + Unpin, - F: Future + Unpin, -{ - type Output = R::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = Pin::into_inner(self); - - this.driver.drive(cx); - this.driver.poll_stop_monitor_heartbeat(cx); - - this.selector.poll_unpin(cx) - } -} +use tokio::sync::{watch, OwnedSemaphorePermit, Semaphore, TryAcquireError}; #[derive(Debug, Clone)] pub(crate) struct StopHandle(watch::Receiver<()>); @@ -156,12 +38,7 @@ impl StopHandle { Self(rx) } - pub(crate) fn shutdown_requested(&self) -> bool { - // if a message has been seen, it means that `stop` has been called. - self.0.has_changed().unwrap_or(true) - } - - pub(crate) async fn shutdown(&mut self) { + pub(crate) async fn shutdown(mut self) { // Err(_) implies that the `sender` has been dropped. // Ok(_) implies that `stop` has been called. let _ = self.0.changed().await; diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 5301c7d2198b2..c876457e1f184 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -1,14 +1,16 @@ //! JSON-RPC IPC server implementation use crate::server::{ - connection::{Incoming, IpcConn, JsonRpcStream}, - future::{ConnectionGuard, FutureDriver, StopHandle}, + connection::{IpcConn, JsonRpcStream}, + future::{ConnectionGuard, StopHandle}, }; -use futures::{FutureExt, Stream, StreamExt}; +use futures::StreamExt; +use futures_util::{future::Either, stream::FuturesUnordered}; +use interprocess::local_socket::tokio::{LocalSocketListener, LocalSocketStream}; use jsonrpsee::{ core::TEN_MB_SIZE_BYTES, server::{ - middleware::rpc::{either::Either, RpcLoggerLayer, RpcServiceT}, + middleware::rpc::{RpcLoggerLayer, RpcServiceT}, AlreadyStoppedError, IdProvider, RandomIntegerIdProvider, }, BoundedSubscriptions, MethodSink, Methods, @@ -25,16 +27,18 @@ use tokio::{ sync::{oneshot, watch, OwnedSemaphorePermit}, }; use tower::{layer::util::Identity, Layer, Service}; -use tracing::{debug, trace, warn}; - +use tracing::{debug, trace, warn, Instrument}; // re-export so can be used during builder setup -use crate::server::{ - connection::IpcConnDriver, - rpc_service::{RpcService, RpcServiceCfg}, +use crate::{ + server::{ + connection::IpcConnDriver, + rpc_service::{RpcService, RpcServiceCfg}, + }, + stream_codec::StreamCodec, }; -pub use parity_tokio_ipc::Endpoint; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; +use tokio_util::compat::FuturesAsyncReadCompatExt; use tower::layer::{util::Stack, LayerFn}; mod connection; @@ -47,7 +51,7 @@ mod rpc_service; // This is an adapted `jsonrpsee` Server, but for `Ipc` connections. pub struct IpcServer { /// The endpoint we listen for incoming transactions - endpoint: Endpoint, + endpoint: String, id_provider: Arc, cfg: Settings, rpc_middleware: RpcServiceBuilder, @@ -55,9 +59,9 @@ pub struct IpcServer { } impl IpcServer { - /// Returns the configured [Endpoint] - pub fn endpoint(&self) -> &Endpoint { - &self.endpoint + /// Returns the configured endpoint + pub fn endpoint(&self) -> String { + self.endpoint.clone() } } @@ -123,15 +127,29 @@ where stop_handle: StopHandle, on_ready: oneshot::Sender>, ) { - trace!(endpoint = ?self.endpoint.path(), "starting ipc server"); + trace!(endpoint = ?self.endpoint, "starting ipc server"); if cfg!(unix) { // ensure the file does not exist - if std::fs::remove_file(self.endpoint.path()).is_ok() { - debug!(endpoint = ?self.endpoint.path(), "removed existing IPC endpoint file"); + if std::fs::remove_file(&self.endpoint).is_ok() { + debug!(endpoint = ?self.endpoint, "removed existing IPC endpoint file"); } } + let listener = match LocalSocketListener::bind(self.endpoint.clone()) { + Err(err) => { + on_ready + .send(Err(IpcServerStartError { endpoint: self.endpoint.clone(), source: err })) + .ok(); + return; + } + + Ok(listener) => listener, + }; + + // signal that we're ready to accept connections + on_ready.send(Ok(())).ok(); + let message_buffer_capacity = self.cfg.message_buffer_capacity; let max_request_body_size = self.cfg.max_request_body_size; let max_response_body_size = self.cfg.max_response_body_size; @@ -142,37 +160,27 @@ where let mut id: u32 = 0; let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize); - let mut connections = FutureDriver::default(); - let endpoint_path = self.endpoint.path().to_string(); - let incoming = match self.endpoint.incoming() { - Ok(connections) => { - #[cfg(windows)] - let connections = Box::pin(connections); - Incoming::new(connections) - } - Err(err) => { - on_ready - .send(Err(IpcServerStartError { endpoint: endpoint_path, source: err })) - .ok(); - return - } - }; - // signal that we're ready to accept connections - on_ready.send(Ok(())).ok(); - - let mut incoming = Monitored::new(incoming, &stop_handle); + let mut connections = FuturesUnordered::new(); + let stopped = stop_handle.clone().shutdown(); + tokio::pin!(stopped); trace!("accepting ipc connections"); loop { - match connections.select_with(&mut incoming).await { - Ok(ipc) => { + match try_accept_conn(&listener, stopped).await { + AcceptConnection::Established { local_socket_stream, stop } => { trace!("established new connection"); + let ipc = IpcConn(tokio_util::codec::Decoder::framed( + StreamCodec::stream_incoming(), + local_socket_stream.compat(), + )); + let conn = match connection_guard.try_acquire() { Some(conn) => conn, None => { warn!("Too many IPC connections. Please try again later."); - connections.add(ipc.reject_connection().boxed()); - continue + connections.push(tokio::spawn(ipc.reject_connection().in_current_span())); + stopped = stop; + continue; } }; @@ -198,30 +206,58 @@ where }; let service = self.http_middleware.service(tower_service); - connections.add(Box::pin(spawn_connection( + connections.push(tokio::spawn(process_connection( ipc, service, stop_handle.clone(), rx, - ))); + ).in_current_span())); id = id.wrapping_add(1); + stopped = stop; } - Err(MonitoredError::Selector(err)) => { - tracing::error!("Error while awaiting a new IPC connection: {:?}", err); + AcceptConnection::Shutdown => { break; } + AcceptConnection::Err((e, stop)) => { + tracing::error!("Error while awaiting a new IPC connection: {:?}", e); + stopped = stop; } - Err(MonitoredError::Shutdown) => break, } } - connections.await; + // FuturesUnordered won't poll anything until this line but because the + // tasks are spawned (so that they can progress independently) + // then this just makes sure that all tasks are completed before + // returning from this function. + while connections.next().await.is_some() {} + } +} + +enum AcceptConnection { + Shutdown, + Established { local_socket_stream: LocalSocketStream, stop: S }, + Err((io::Error, S)), +} + +async fn try_accept_conn(listener: &LocalSocketListener, stopped: S) -> AcceptConnection +where + S: Future + Unpin, +{ + let accept = listener.accept(); + tokio::pin!(accept); + + match futures_util::future::select(accept, stopped).await { + Either::Left((res, stop)) => match res { + Ok(local_socket_stream) => AcceptConnection::Established { local_socket_stream, stop }, + Err(e) => AcceptConnection::Err((e, stop)), + }, + Either::Right(_) => AcceptConnection::Shutdown, } } impl std::fmt::Debug for IpcServer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("IpcServer") - .field("endpoint", &self.endpoint.path()) + .field("endpoint", &self.endpoint) .field("cfg", &self.cfg) .field("id_provider", &self.id_provider) .finish() @@ -408,10 +444,10 @@ where } /// Spawns the IPC connection onto a new task -async fn spawn_connection( +async fn process_connection( conn: IpcConn>, service: S, - mut stop_handle: StopHandle, + stop_handle: StopHandle, rx: mpsc::Receiver, ) where S: Service> + Send + 'static, @@ -419,70 +455,34 @@ async fn spawn_connection( S::Future: Send + Unpin, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - let task = tokio::task::spawn(async move { - let rx_item = ReceiverStream::new(rx); - let conn = IpcConnDriver { - conn, - service, - pending_calls: Default::default(), - items: Default::default(), - }; - tokio::pin!(conn, rx_item); - - loop { - tokio::select! { - _ = &mut conn => { - break - } - item = rx_item.next() => { - if let Some(item) = item { - conn.push_back(item); - } - } - _ = stop_handle.shutdown() => { - // shutdown - break - } - } - } - }); - - task.await.ok(); -} - -/// This is a glorified select listening for new messages, while also checking the `stop_receiver` -/// signal. -struct Monitored<'a, F> { - future: F, - stop_monitor: &'a StopHandle, -} - -impl<'a, F> Monitored<'a, F> { - fn new(future: F, stop_monitor: &'a StopHandle) -> Self { - Monitored { future, stop_monitor } - } -} - -enum MonitoredError { - Shutdown, - Selector(E), -} + let rx_item = ReceiverStream::new(rx); + let conn = IpcConnDriver { + conn, + service, + pending_calls: Default::default(), + items: Default::default(), + }; + tokio::pin!(conn, rx_item); -impl<'a, T, Item> Future for Monitored<'a, Incoming> -where - T: Stream> + Unpin + 'static, - Item: AsyncRead + AsyncWrite, -{ - type Output = Result>, MonitoredError>; + let stopped = stop_handle.shutdown(); - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); + tokio::pin!(stopped); - if this.stop_monitor.shutdown_requested() { - return Poll::Ready(Err(MonitoredError::Shutdown)) + loop { + tokio::select! { + _ = &mut conn => { + break + } + item = rx_item.next() => { + if let Some(item) = item { + conn.push_back(item); + } + } + _ = &mut stopped=> { + // shutdown + break + } } - - this.future.poll_accept(cx).map_err(MonitoredError::Selector) } } @@ -734,17 +734,8 @@ impl Builder { /// Finalize the configuration of the server. Consumes the [`Builder`]. pub fn build(self, endpoint: impl AsRef) -> IpcServer { - let endpoint = Endpoint::new(endpoint.as_ref().to_string()); - self.build_with_endpoint(endpoint) - } - - /// Finalize the configuration of the server. Consumes the [`Builder`]. - pub fn build_with_endpoint( - self, - endpoint: Endpoint, - ) -> IpcServer { IpcServer { - endpoint, + endpoint: endpoint.as_ref().to_string(), cfg: self.settings, id_provider: self.id_provider, http_middleware: self.http_middleware, @@ -782,7 +773,18 @@ impl ServerHandle { } } -#[cfg(all(test, unix))] +/// For testing/examples +#[cfg(test)] +pub fn dummy_endpoint() -> String { + let num: u64 = rand::Rng::gen(&mut rand::thread_rng()); + if cfg!(windows) { + format!(r"\\.\pipe\my-pipe-{}", num) + } else { + format!(r"/tmp/my-uds-{}", num) + } +} + +#[cfg(test)] mod tests { use super::*; use crate::client::IpcClientBuilder; @@ -797,7 +799,6 @@ mod tests { types::Request, PendingSubscriptionSink, RpcModule, SubscriptionMessage, }; - use parity_tokio_ipc::dummy_endpoint; use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; @@ -823,7 +824,7 @@ mod tests { // and you might want to do something smarter if it's // critical that "the most recent item" must be sent when it is produced. if sink.send(notif).await.is_err() { - break Ok(()) + break Ok(()); } closed = c; @@ -848,6 +849,7 @@ mod tests { #[tokio::test] async fn can_set_the_max_response_body_size() { + // init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().max_response_body_size(100).build(&endpoint); let mut module = RpcModule::new(()); diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index cd21be27194eb..3726172576f42 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -13,7 +13,7 @@ use jsonrpsee::{ server::{AlreadyStoppedError, RpcModule}, Methods, }; -pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; +pub use reth_ipc::server::Builder as IpcServerBuilder; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; @@ -205,8 +205,7 @@ impl AuthServerConfig { let ipc_endpoint_str = ipc_endpoint .clone() .unwrap_or_else(|| constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string()); - let ipc_path = Endpoint::new(ipc_endpoint_str); - let ipc_server = ipc_server_config.build(ipc_path.path()); + let ipc_server = ipc_server_config.build(ipc_endpoint_str); let res = ipc_server .start(module.inner) .await @@ -449,7 +448,7 @@ impl AuthServerHandle { if let Some(ipc_endpoint) = self.ipc_endpoint.clone() { return Some( IpcClientBuilder::default() - .build(Endpoint::new(ipc_endpoint).path()) + .build(ipc_endpoint) .await .expect("Failed to create ipc client"), ) @@ -463,10 +462,7 @@ impl AuthServerHandle { } /// Return an ipc endpoint - pub fn ipc_endpoint(&self) -> Option { - if let Some(ipc_endpoint) = self.ipc_endpoint.clone() { - return Some(Endpoint::new(ipc_endpoint)) - } - None + pub fn ipc_endpoint(&self) -> Option { + self.ipc_endpoint.clone() } } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 4b9159e2d0035..9c28353c9eca1 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -172,7 +172,7 @@ use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; use reth_ipc::server::IpcServer; pub use reth_ipc::server::{ - Builder as IpcServerBuilder, Endpoint, RpcServiceBuilder as IpcRpcServiceBuilder, + Builder as IpcServerBuilder, RpcServiceBuilder as IpcRpcServiceBuilder, }; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_provider::{ @@ -1459,7 +1459,7 @@ where /// /// Once the [RpcModule] is built via [RpcModuleBuilder] the servers can be started, See also /// [ServerBuilder::build] and [Server::start](jsonrpsee::server::Server::start). -#[derive(Default)] +#[derive(Default, Debug)] pub struct RpcServerConfig { /// Configs for JSON-RPC Http. http_server_config: Option>, @@ -1476,26 +1476,11 @@ pub struct RpcServerConfig { /// Configs for JSON-RPC IPC server ipc_server_config: Option>, /// The Endpoint where to launch the ipc server - ipc_endpoint: Option, + ipc_endpoint: Option, /// JWT secret for authentication jwt_secret: Option, } -impl fmt::Debug for RpcServerConfig { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RpcServerConfig") - .field("http_server_config", &self.http_server_config) - .field("http_cors_domains", &self.http_cors_domains) - .field("http_addr", &self.http_addr) - .field("ws_server_config", &self.ws_server_config) - .field("ws_addr", &self.ws_addr) - .field("ipc_server_config", &self.ipc_server_config) - .field("ipc_endpoint", &self.ipc_endpoint.as_ref().map(|endpoint| endpoint.path())) - .field("jwt_secret", &self.jwt_secret) - .finish() - } -} - /// === impl RpcServerConfig === impl RpcServerConfig { @@ -1599,7 +1584,7 @@ impl RpcServerConfig { /// /// Default is [DEFAULT_IPC_ENDPOINT] pub fn with_ipc_endpoint(mut self, path: impl Into) -> Self { - self.ipc_endpoint = Some(Endpoint::new(path.into())); + self.ipc_endpoint = Some(path.into()); self } @@ -1628,9 +1613,9 @@ impl RpcServerConfig { self.ws_addr } - /// Returns the [Endpoint] of the ipc server - pub fn ipc_endpoint(&self) -> Option<&Endpoint> { - self.ipc_endpoint.as_ref() + /// Returns the endpoint of the ipc server + pub fn ipc_endpoint(&self) -> Option { + self.ipc_endpoint.clone() } /// Convenience function to do [RpcServerConfig::build] and [RpcServer::start] in one step @@ -1759,12 +1744,10 @@ impl RpcServerConfig { if let Some(builder) = self.ipc_server_config { let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::ipc).unwrap_or_default(); - let ipc_path = self - .ipc_endpoint - .unwrap_or_else(|| Endpoint::new(DEFAULT_IPC_ENDPOINT.to_string())); + let ipc_path = self.ipc_endpoint.unwrap_or_else(|| DEFAULT_IPC_ENDPOINT.into()); let ipc = builder .set_rpc_middleware(IpcRpcServiceBuilder::new().layer(metrics)) - .build(ipc_path.path()); + .build(ipc_path); server.ipc = Some(ipc); } @@ -2152,8 +2135,8 @@ impl RpcServer { self.ws_http.ws_local_addr } - /// Returns the [`Endpoint`] of the ipc server if started. - pub fn ipc_endpoint(&self) -> Option<&Endpoint> { + /// Returns the endpoint of the ipc server if started. + pub fn ipc_endpoint(&self) -> Option { self.ipc.as_ref().map(|ipc| ipc.endpoint()) } @@ -2161,7 +2144,7 @@ impl RpcServer { /// /// This returns an [RpcServerHandle] that's connected to the server task(s) until the server is /// stopped or the [RpcServerHandle] is dropped. - #[instrument(name = "start", skip_all, fields(http = ?self.http_local_addr(), ws = ?self.ws_local_addr(), ipc = ?self.ipc_endpoint().map(|ipc|ipc.path())), target = "rpc", level = "TRACE")] + #[instrument(name = "start", skip_all, fields(http = ?self.http_local_addr(), ws = ?self.ws_local_addr(), ipc = ?self.ipc_endpoint()), target = "rpc", level = "TRACE")] pub async fn start(self, modules: TransportRpcModules) -> Result { trace!(target: "rpc", "staring RPC server"); let Self { ws_http, ipc: ipc_server } = self; @@ -2183,7 +2166,7 @@ impl RpcServer { if let Some((server, module)) = ipc_server.and_then(|server| ipc.map(|module| (server, module))) { - handle.ipc_endpoint = Some(server.endpoint().path().to_string()); + handle.ipc_endpoint = Some(server.endpoint()); handle.ipc = Some(server.start(module).await?); } diff --git a/deny.toml b/deny.toml index 347b609651fca..61cced4fbed3a 100644 --- a/deny.toml +++ b/deny.toml @@ -58,6 +58,7 @@ exceptions = [ { allow = ["CC0-1.0"], name = "secp256k1-sys" }, { allow = ["CC0-1.0"], name = "tiny-keccak" }, { allow = ["CC0-1.0"], name = "more-asserts" }, + { allow = ["CC0-1.0"], name = "to_method" }, { allow = ["CC0-1.0"], name = "aurora-engine-modexp" }, # TODO: decide on MPL-2.0 handling # These dependencies are grandfathered in in https://github.com/paradigmxyz/reth/pull/6980 From 7f0e81e476b910b42997f9cf418ca995e0c3d841 Mon Sep 17 00:00:00 2001 From: Sean Matt Date: Fri, 26 Apr 2024 09:47:12 -0400 Subject: [PATCH 355/700] refactor: remove WsHttpServerKind enum and simplify server launch (#7531) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-builder/src/cors.rs | 2 +- crates/rpc/rpc-builder/src/error.rs | 5 +- crates/rpc/rpc-builder/src/lib.rs | 237 ++++++++++------------------ 3 files changed, 91 insertions(+), 153 deletions(-) diff --git a/crates/rpc/rpc-builder/src/cors.rs b/crates/rpc/rpc-builder/src/cors.rs index 73e755f9fae67..46ff722ac25b5 100644 --- a/crates/rpc/rpc-builder/src/cors.rs +++ b/crates/rpc/rpc-builder/src/cors.rs @@ -3,7 +3,7 @@ use tower_http::cors::{AllowOrigin, Any, CorsLayer}; /// Error thrown when parsing cors domains went wrong #[derive(Debug, thiserror::Error)] -pub(crate) enum CorsDomainError { +pub enum CorsDomainError { #[error("{domain} is an invalid header value")] InvalidHeader { domain: String }, #[error("wildcard origin (`*`) cannot be passed as part of a list: {input}")] diff --git a/crates/rpc/rpc-builder/src/error.rs b/crates/rpc/rpc-builder/src/error.rs index fd59536f7abfc..68a2183fe41ab 100644 --- a/crates/rpc/rpc-builder/src/error.rs +++ b/crates/rpc/rpc-builder/src/error.rs @@ -1,4 +1,4 @@ -use crate::RethRpcModule; +use crate::{cors::CorsDomainError, RethRpcModule}; use reth_ipc::server::IpcServerStartError; use std::{io, io::ErrorKind, net::SocketAddr}; @@ -57,6 +57,9 @@ pub enum RpcError { /// IO error. error: io::Error, }, + /// Cors parsing error. + #[error(transparent)] + Cors(#[from] CorsDomainError), /// Http and WS server configured on the same port but with conflicting settings. #[error(transparent)] WsHttpSamePortError(#[from] WsHttpSamePortError), diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 9c28353c9eca1..4bd367060dbe4 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -156,8 +156,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use crate::{ - auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics, - RpcModuleSelection::Selection, + auth::AuthRpcModule, cors::CorsDomainError, error::WsHttpSamePortError, + metrics::RpcRequestMetrics, RpcModuleSelection::Selection, }; use constants::*; use error::{RpcError, ServerKind}; @@ -1623,6 +1623,16 @@ impl RpcServerConfig { self.build(&modules).await?.start(modules).await } + /// Creates the [CorsLayer] if any + fn maybe_cors_layer(cors: Option) -> Result, CorsDomainError> { + cors.as_deref().map(cors::create_cors_layer).transpose() + } + + /// Creates the [AuthLayer] if any + fn maybe_jwt_layer(&self) -> Option> { + self.jwt_secret.clone().map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) + } + /// Builds the ws and http server(s). /// /// If both are on the same port, they are combined into one server. @@ -1634,7 +1644,6 @@ impl RpcServerConfig { Ipv4Addr::LOCALHOST, DEFAULT_HTTP_RPC_PORT, ))); - let jwt_secret = self.jwt_secret.clone(); let ws_socket_addr = self .ws_addr @@ -1660,33 +1669,39 @@ impl RpcServerConfig { } .cloned(); - let secret = self.jwt_secret.clone(); - // we merge this into one server using the http setup self.ws_server_config.take(); modules.config.ensure_ws_http_identical()?; let builder = self.http_server_config.take().expect("http_server_config is Some"); - let (server, addr) = WsHttpServerKind::build( - builder, - http_socket_addr, - cors, - secret, - ServerKind::WsHttp(http_socket_addr), - modules - .http - .as_ref() - .or(modules.ws.as_ref()) - .map(RpcRequestMetrics::same_port) - .unwrap_or_default(), - ) - .await?; + let server = builder + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(cors)?) + .option_layer(self.maybe_jwt_layer()), + ) + .set_rpc_middleware( + RpcServiceBuilder::new().layer( + modules + .http + .as_ref() + .or(modules.ws.as_ref()) + .map(RpcRequestMetrics::same_port) + .unwrap_or_default(), + ), + ) + .build(http_socket_addr) + .await + .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; + let addr = server + .local_addr() + .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; return Ok(WsHttpServer { http_local_addr: Some(addr), ws_local_addr: Some(addr), server: WsHttpServers::SamePort(server), - jwt_secret, + jwt_secret: self.jwt_secret.clone(), }) } @@ -1696,32 +1711,48 @@ impl RpcServerConfig { let mut ws_local_addr = None; let mut ws_server = None; if let Some(builder) = self.ws_server_config.take() { - let builder = builder.ws_only(); - let (server, addr) = WsHttpServerKind::build( - builder, - ws_socket_addr, - self.ws_cors_domains.take(), - self.jwt_secret.clone(), - ServerKind::WS(ws_socket_addr), - modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default(), - ) - .await?; + let server = builder + .ws_only() + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(self.ws_cors_domains.clone())?) + .option_layer(self.maybe_jwt_layer()), + ) + .set_rpc_middleware( + RpcServiceBuilder::new() + .layer(modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default()), + ) + .build(ws_socket_addr) + .await + .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; + let addr = server + .local_addr() + .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; + ws_local_addr = Some(addr); ws_server = Some(server); } if let Some(builder) = self.http_server_config.take() { - let builder = builder.http_only(); - let (server, addr) = WsHttpServerKind::build( - builder, - http_socket_addr, - self.http_cors_domains.take(), - self.jwt_secret.clone(), - ServerKind::Http(http_socket_addr), - modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), - ) - .await?; - http_local_addr = Some(addr); + let server = builder + .http_only() + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(self.http_cors_domains.clone())?) + .option_layer(self.maybe_jwt_layer()), + ) + .set_rpc_middleware( + RpcServiceBuilder::new().layer( + modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), + ), + ) + .build(http_socket_addr) + .await + .map_err(|err| RpcError::server_error(err, ServerKind::Http(http_socket_addr)))?; + let local_addr = server + .local_addr() + .map_err(|err| RpcError::server_error(err, ServerKind::Http(http_socket_addr)))?; + http_local_addr = Some(local_addr); http_server = Some(server); } @@ -1729,7 +1760,7 @@ impl RpcServerConfig { http_local_addr, ws_local_addr, server: WsHttpServers::DifferentPort { http: http_server, ws: ws_server }, - jwt_secret, + jwt_secret: self.jwt_secret.clone(), }) } @@ -1945,6 +1976,15 @@ struct WsHttpServer { jwt_secret: Option, } +// Define the type alias with detailed type complexity +type WsHttpServerKind = Server< + Stack< + tower::util::Either, Identity>, + Stack, Identity>, + >, + Stack, +>; + /// Enum for holding the http and ws servers in all possible combinations. enum WsHttpServers { /// Both servers are on the same port @@ -1966,13 +2006,13 @@ impl WsHttpServers { let mut http_handle = None; let mut ws_handle = None; match self { - WsHttpServers::SamePort(both) => { + WsHttpServers::SamePort(server) => { // Make sure http and ws modules are identical, since we currently can't run // different modules on same server config.ensure_ws_http_identical()?; if let Some(module) = http_module.or(ws_module) { - let handle = both.start(module).await; + let handle = server.start(module); http_handle = Some(handle.clone()); ws_handle = Some(handle); } @@ -1981,12 +2021,12 @@ impl WsHttpServers { if let Some((server, module)) = http.and_then(|server| http_module.map(|module| (server, module))) { - http_handle = Some(server.start(module).await); + http_handle = Some(server.start(module)); } if let Some((server, module)) = ws.and_then(|server| ws_module.map(|module| (server, module))) { - ws_handle = Some(server.start(module).await); + ws_handle = Some(server.start(module)); } } } @@ -2001,111 +2041,6 @@ impl Default for WsHttpServers { } } -/// Http Servers Enum -#[allow(clippy::type_complexity)] -enum WsHttpServerKind { - /// Http server - Plain(Server>), - /// Http server with cors - WithCors(Server, Stack>), - /// Http server with auth - WithAuth( - Server, Identity>, Stack>, - ), - /// Http server with cors and auth - WithCorsAuth( - Server< - Stack, Stack>, - Stack, - >, - ), -} - -// === impl WsHttpServerKind === - -impl WsHttpServerKind { - /// Starts the server and returns the handle - async fn start(self, module: RpcModule<()>) -> ServerHandle { - match self { - WsHttpServerKind::Plain(server) => server.start(module), - WsHttpServerKind::WithCors(server) => server.start(module), - WsHttpServerKind::WithAuth(server) => server.start(module), - WsHttpServerKind::WithCorsAuth(server) => server.start(module), - } - } - - /// Builds the server according to the given config parameters. - /// - /// Returns the address of the started server. - async fn build( - builder: ServerBuilder, - socket_addr: SocketAddr, - cors_domains: Option, - jwt_secret: Option, - server_kind: ServerKind, - metrics: RpcRequestMetrics, - ) -> Result<(Self, SocketAddr), RpcError> { - if let Some(cors) = cors_domains.as_deref().map(cors::create_cors_layer) { - let cors = cors.map_err(|err| RpcError::Custom(err.to_string()))?; - - if let Some(secret) = jwt_secret { - // stack cors and auth layers - let middleware = tower::ServiceBuilder::new() - .layer(cors) - .layer(AuthLayer::new(JwtAuthValidator::new(secret.clone()))); - - let server = builder - .set_http_middleware(middleware) - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, server_kind))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::WithCorsAuth(server); - Ok((server, local_addr)) - } else { - let middleware = tower::ServiceBuilder::new().layer(cors); - let server = builder - .set_http_middleware(middleware) - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, server_kind))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::WithCors(server); - Ok((server, local_addr)) - } - } else if let Some(secret) = jwt_secret { - // jwt auth layered service - let middleware = tower::ServiceBuilder::new() - .layer(AuthLayer::new(JwtAuthValidator::new(secret.clone()))); - let server = builder - .set_http_middleware(middleware) - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, ServerKind::Auth(socket_addr)))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::WithAuth(server); - Ok((server, local_addr)) - } else { - // plain server without any middleware - let server = builder - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, server_kind))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::Plain(server); - Ok((server, local_addr)) - } - } -} - /// Container type for each transport ie. http, ws, and ipc server pub struct RpcServer { /// Configured ws,http servers From 953ba043adca08c35b21c6383a690d9850944ca4 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 26 Apr 2024 17:04:06 +0200 Subject: [PATCH 356/700] chore: bump alloy 4e22b9e (#7895) --- Cargo.lock | 71 +++++++++++++------ Cargo.toml | 26 +++---- crates/e2e-test-utils/src/transaction.rs | 2 +- crates/primitives/src/withdrawal.rs | 8 +-- crates/rpc/rpc-api/src/eth.rs | 5 +- crates/rpc/rpc-builder/tests/it/http.rs | 2 +- .../rpc-types-compat/src/transaction/mod.rs | 2 +- crates/rpc/rpc/src/eth/api/call.rs | 6 +- crates/rpc/rpc/src/eth/api/server.rs | 17 ++--- crates/rpc/rpc/src/eth/api/transactions.rs | 20 ++---- crates/rpc/rpc/src/eth/revm_utils.rs | 8 ++- 11 files changed, 94 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a68ee77310f5..c046501c6b7b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,7 +133,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-eips", "alloy-primitives", @@ -166,7 +166,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -185,11 +185,12 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "alloy-serde", "serde", + "serde_json", ] [[package]] @@ -207,7 +208,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "serde", @@ -219,7 +220,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-consensus", "alloy-eips", @@ -227,6 +228,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types", "alloy-signer", + "alloy-sol-types", "async-trait", "futures-utils-wasm", "thiserror", @@ -235,7 +237,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -277,7 +279,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -327,7 +329,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -347,7 +349,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-consensus", "alloy-eips", @@ -369,7 +371,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "alloy-serde", @@ -379,7 +381,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-consensus", "alloy-eips", @@ -390,6 +392,8 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", + "jsonwebtoken 9.3.0", + "rand 0.8.5", "serde", "thiserror", ] @@ -397,7 +401,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -409,7 +413,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "serde", @@ -419,7 +423,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "async-trait", @@ -432,7 +436,7 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-consensus", "alloy-network", @@ -507,7 +511,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -525,7 +529,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -3199,8 +3203,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -4341,13 +4347,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ "base64 0.21.7", - "pem", + "pem 1.1.1", "ring 0.16.20", "serde", "serde_json", "simple_asn1", ] +[[package]] +name = "jsonwebtoken" +version = "9.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" +dependencies = [ + "base64 0.21.7", + "js-sys", + "pem 3.0.4", + "ring 0.17.8", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "k256" version = "0.13.3" @@ -5413,6 +5434,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.0", + "serde", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -7459,7 +7490,7 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.28", "jsonrpsee", - "jsonwebtoken", + "jsonwebtoken 8.3.0", "metrics", "parking_lot 0.12.2", "pin-project", @@ -7849,7 +7880,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=dc614ee#dc614eec85ee4d4af938865b121fad58ec7dad5f" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=848d568#848d5688d0c499c538b9a78b423a7061525aa580" dependencies = [ "alloy-primitives", "alloy-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index bd4fd5fd9aed0..b6246740552b4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -277,7 +277,7 @@ reth-optimism-consensus = { path = "crates/optimism/consensus" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "dc614ee" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "848d568" } # eth alloy-chains = "0.1.15" @@ -286,20 +286,20 @@ alloy-dyn-abi = "0.7.1" alloy-sol-types = "0.7.1" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "39b8695" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "4e22b9e" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } # misc auto_impl = "1" diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index a2c40052c47d2..ea066304b35c6 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -69,7 +69,7 @@ fn tx(chain_id: u64, data: Option, nonce: u64) -> TransactionRequest { TransactionRequest { nonce: Some(nonce), value: Some(U256::from(100)), - to: Some(Address::random()), + to: Some(reth_primitives::TxKind::Call(Address::random())), gas: Some(210000), max_fee_per_gas: Some(20e9 as u128), max_priority_fee_per_gas: Some(20e9 as u128), diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index 730fb291c0430..a348b6a051c3e 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -1,4 +1,4 @@ -use crate::{constants::GWEI_TO_WEI, serde_helper::u64_hex, Address}; +use crate::{constants::GWEI_TO_WEI, serde_helper::u64_via_ruint, Address}; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; use reth_codecs::{main_codec, Compact}; use std::{ @@ -11,15 +11,15 @@ use std::{ #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] pub struct Withdrawal { /// Monotonically increasing identifier issued by consensus layer. - #[serde(with = "u64_hex")] + #[serde(with = "u64_via_ruint")] pub index: u64, /// Index of validator associated with withdrawal. - #[serde(with = "u64_hex", rename = "validatorIndex")] + #[serde(with = "u64_via_ruint", rename = "validatorIndex")] pub validator_index: u64, /// Target address for withdrawn ether. pub address: Address, /// Value of the withdrawal in gwei. - #[serde(with = "u64_hex")] + #[serde(with = "u64_via_ruint")] pub amount: u64, } diff --git a/crates/rpc/rpc-api/src/eth.rs b/crates/rpc/rpc-api/src/eth.rs index c878a7e1c1505..8811ef87dd1d0 100644 --- a/crates/rpc/rpc-api/src/eth.rs +++ b/crates/rpc/rpc-api/src/eth.rs @@ -1,7 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::{ - serde_helper::{num::U64HexOrNumber, JsonStorageKey}, - Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, + serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, }; use reth_rpc_types::{ state::StateOverride, AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, @@ -247,7 +246,7 @@ pub trait EthApi { #[method(name = "feeHistory")] async fn fee_history( &self, - block_count: U64HexOrNumber, + block_count: u64, newest_block: BlockNumberOrTag, reward_percentiles: Option>, ) -> RpcResult; diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 7fc714a2da635..42fecb87d0755 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -167,7 +167,7 @@ where EthApiClient::block_number(client).await.unwrap(); EthApiClient::get_code(client, address, None).await.unwrap(); EthApiClient::send_raw_transaction(client, tx).await.unwrap(); - EthApiClient::fee_history(client, 0.into(), block_number, None).await.unwrap(); + EthApiClient::fee_history(client, 0, block_number, None).await.unwrap(); EthApiClient::balance(client, address, None).await.unwrap(); EthApiClient::transaction_count(client, address, None).await.unwrap(); EthApiClient::storage_at(client, address, U256::default().into(), None).await.unwrap(); diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 1004e93e25f80..d0f4672a29f73 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -181,7 +181,7 @@ pub fn from_primitive_access_list( /// Convert [TransactionSignedEcRecovered] to [TransactionRequest] pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> TransactionRequest { let from = tx.signer(); - let to = tx.transaction.to(); + let to = Some(tx.transaction.to().into()); let gas = tx.transaction.gas_limit(); let value = tx.transaction.value(); let input = tx.transaction.input().clone(); diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 62be2612c4092..d556249c3ffb5 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -14,7 +14,9 @@ use crate::{ }; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, U256}; +use reth_primitives::{ + revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, TxKind, U256, +}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, }; @@ -411,7 +413,7 @@ where } let from = request.from.unwrap_or_default(); - let to = if let Some(to) = request.to { + let to = if let Some(TxKind::Call(to)) = request.to { to } else { let nonce = db.basic_ref(from)?.unwrap_or_default().nonce; diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 6be1a88af81e6..2648df08f7aad 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -8,8 +8,7 @@ use tracing::trace; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_primitives::{ - serde_helper::{num::U64HexOrNumber, JsonStorageKey}, - Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, + serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, }; use reth_provider::{ BlockIdReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, @@ -353,14 +352,12 @@ where /// Handler for: `eth_feeHistory` async fn fee_history( &self, - block_count: U64HexOrNumber, + block_count: u64, newest_block: BlockNumberOrTag, reward_percentiles: Option>, ) -> Result { trace!(target: "rpc::eth", ?block_count, ?newest_block, ?reward_percentiles, "Serving eth_feeHistory"); - return Ok( - EthApi::fee_history(self, block_count.to(), newest_block, reward_percentiles).await? - ) + return Ok(EthApi::fee_history(self, block_count, newest_block, reward_percentiles).await?) } /// Handler for: `eth_mining` @@ -585,7 +582,7 @@ mod tests { async fn test_fee_history_empty() { let response = as EthApiServer>::fee_history( &build_test_eth_api(NoopProvider::default()), - 1.into(), + 1, BlockNumberOrTag::Latest, None, ) @@ -607,7 +604,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - (newest_block + 1).into(), + newest_block + 1, newest_block.into(), Some(vec![10.0]), ) @@ -630,7 +627,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - 1.into(), + 1, (newest_block + 1000).into(), Some(vec![10.0]), ) @@ -653,7 +650,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - 0.into(), + 0, newest_block.into(), None, ) diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 1ca8ed1195cf9..3e582821b5b55 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -919,10 +919,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), chain_id: None, })) } @@ -935,10 +932,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), chain_id: 0, access_list, })) @@ -958,10 +952,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), chain_id: 0, access_list: access_list.unwrap_or_default(), })) @@ -987,10 +978,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), access_list: access_list.unwrap_or_default(), // eip-4844 specific. diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 4b00d4662cb9f..c80aee99d5c4f 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -7,7 +7,7 @@ use reth_primitives::revm::env::fill_op_tx_env; use reth_primitives::revm::env::fill_tx_env; use reth_primitives::{ revm::env::fill_tx_env_with_recovered, Address, TransactionSigned, - TransactionSignedEcRecovered, TxHash, B256, U256, + TransactionSignedEcRecovered, TxHash, TxKind, B256, U256, }; use reth_rpc_types::{ state::{AccountOverride, StateOverride}, @@ -250,13 +250,17 @@ pub(crate) fn create_txn_env( )?; let gas_limit = gas.unwrap_or_else(|| block_env.gas_limit.min(U256::from(u64::MAX)).to()); + let transact_to = match to { + Some(TxKind::Call(to)) => TransactTo::call(to), + _ => TransactTo::create(), + }; let env = TxEnv { gas_limit: gas_limit.try_into().map_err(|_| RpcInvalidTransactionError::GasUintOverflow)?, nonce, caller: from.unwrap_or_default(), gas_price, gas_priority_fee: max_priority_fee_per_gas, - transact_to: to.map(TransactTo::Call).unwrap_or_else(TransactTo::create), + transact_to, value: value.unwrap_or_default(), data: input.try_into_unique_input()?.unwrap_or_default(), chain_id, From 51bdc6afe8aea36726ba173e1741255475d1b1be Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 26 Apr 2024 16:14:35 +0100 Subject: [PATCH 357/700] fix(exex): skipping logic of the notifications (#7919) --- crates/exex/src/manager.rs | 59 ++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 1c9eaf9ef3096..81e523718cf8b 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -84,31 +84,53 @@ impl ExExHandle { fn send( &mut self, cx: &mut Context<'_>, - (event_id, notification): &(usize, ExExNotification), + (notification_id, notification): &(usize, ExExNotification), ) -> Poll>> { - // check that this notification is above the finished height of the exex if the exex has set - // one if let Some(finished_height) = self.finished_height { match notification { - ExExNotification::ChainCommitted { new } | - ExExNotification::ChainReorged { old: _, new } - if finished_height >= new.tip().number => - { - self.next_notification_id = event_id + 1; - return Poll::Ready(Ok(())) + ExExNotification::ChainCommitted { new } => { + // Skip the chain commit notification if the finished height of the ExEx is + // higher than or equal to the tip of the new notification. + // I.e., the ExEx has already processed the notification. + if finished_height >= new.tip().number { + debug!( + exex_id = %self.id, + %notification_id, + %finished_height, + new_tip = %new.tip().number, + "Skipping notification" + ); + + self.next_notification_id = notification_id + 1; + return Poll::Ready(Ok(())) + } } - _ => (), + // Do not handle [ExExNotification::ChainReorged] and + // [ExExNotification::ChainReverted] cases and always send the + // notification, because the ExEx should be aware of the reorgs and reverts lower + // than its finished height + ExExNotification::ChainReorged { .. } | ExExNotification::ChainReverted { .. } => {} } } + debug!( + exex_id = %self.id, + %notification_id, + "Reserving slot for notification" + ); match self.sender.poll_reserve(cx) { Poll::Ready(Ok(())) => (), other => return other, } + debug!( + exex_id = %self.id, + %notification_id, + "Sending notification" + ); match self.sender.send_item(notification.clone()) { Ok(()) => { - self.next_notification_id = event_id + 1; + self.next_notification_id = notification_id + 1; self.metrics.notifications_sent_total.increment(1); Poll::Ready(Ok(())) } @@ -263,7 +285,11 @@ impl Future for ExExManager { // drain handle notifications while self.buffer.len() < self.max_capacity { if let Poll::Ready(Some(notification)) = self.handle_rx.poll_recv(cx) { - debug!("received new notification"); + debug!( + committed_tip = ?notification.committed_chain().map(|chain| chain.tip().number), + reverted_tip = ?notification.reverted_chain().map(|chain| chain.tip().number), + "Received new notification" + ); self.push_notification(notification); continue } @@ -285,11 +311,6 @@ impl Future for ExExManager { .checked_sub(self.min_id) .expect("exex expected notification ID outside the manager's range"); if let Some(notification) = self.buffer.get(notification_index) { - debug!( - exex.id, - notification_id = exex.next_notification_id, - "sent notification to exex" - ); if let Poll::Ready(Err(err)) = exex.send(cx, notification) { // the channel was closed, which is irrecoverable for the manager return Poll::Ready(Err(err.into())) @@ -300,9 +321,9 @@ impl Future for ExExManager { } // remove processed buffered notifications + debug!(%min_id, "Updating lowest notification id in buffer"); self.buffer.retain(|&(id, _)| id >= min_id); self.min_id = min_id; - debug!(min_id, "lowest notification id in buffer updated"); // update capacity self.update_capacity(); @@ -310,7 +331,7 @@ impl Future for ExExManager { // handle incoming exex events for exex in self.exex_handles.iter_mut() { while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) { - debug!(?event, id = exex.id, "received event from exex"); + debug!(exex_id = exex.id, ?event, "Received event from exex"); exex.metrics.events_sent_total.increment(1); match event { ExExEvent::FinishedHeight(height) => exex.finished_height = Some(height), From 704b3e3ac496d7ae72a964e269bc1ee1bb7a809e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 26 Apr 2024 18:39:35 +0200 Subject: [PATCH 358/700] chore(sync): add block number to body validation error (#7918) Co-authored-by: Oliver Nordbjerg Co-authored-by: Matthias Seitz --- crates/interfaces/src/p2p/error.rs | 6 ++++-- crates/net/downloaders/src/bodies/request.rs | 7 ++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 6d822f44c8c08..f63f8879ad44e 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -158,10 +158,12 @@ pub enum DownloadError { /* ==================== BODIES ERRORS ==================== */ /// Block validation failed - #[error("failed to validate body for header {hash}: {error}")] + #[error("failed to validate body for header {hash}, block number {number}: {error}")] BodyValidation { - /// Hash of header failing validation + /// Hash of the block failing validation hash: B256, + /// Number of the block failing validation + number: u64, /// The details of validation failure #[source] error: Box, diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index d6da2444c49db..032fb3ebc91f0 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -184,8 +184,13 @@ where if let Err(error) = self.consensus.validate_block(&block) { // Body is invalid, put the header back and return an error let hash = block.hash(); + let number = block.number; self.pending_headers.push_front(block.header); - return Err(DownloadError::BodyValidation { hash, error: Box::new(error) }) + return Err(DownloadError::BodyValidation { + hash, + number, + error: Box::new(error), + }) } self.buffer.push(BlockResponse::Full(block)); From ffa36b7348b2e5f4bdb09804961eda597131b91d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 26 Apr 2024 20:46:44 +0200 Subject: [PATCH 359/700] use default implementation for `BlockId` (#7917) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/debug.rs | 4 ++-- crates/rpc/rpc/src/eth/api/call.rs | 17 +++++------------ crates/rpc/rpc/src/eth/api/server.rs | 8 +------- crates/rpc/rpc/src/eth/api/state.rs | 2 +- crates/rpc/rpc/src/trace.rs | 8 ++------ 5 files changed, 11 insertions(+), 28 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b212d1636df6d..e47ccc46612b0 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -267,7 +267,7 @@ where block_id: Option, opts: GethDebugTracingCallOptions, ) -> EthResult { - let at = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let at = block_id.unwrap_or_default(); let GethDebugTracingCallOptions { tracing_options, state_overrides, block_overrides } = opts; let overrides = EvmOverrides::new(state_overrides, block_overrides.map(Box::new)); @@ -420,7 +420,7 @@ where let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); let transaction_index = transaction_index.unwrap_or_default(); - let target_block = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let target_block = block_number.unwrap_or_default(); let ((cfg, mut block_env, _), block) = futures::try_join!( self.inner.eth_api.evm_env_at(target_block), self.inner.eth_api.block_by_id_with_senders(target_block), diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index d556249c3ffb5..7066f73729823 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -14,9 +14,7 @@ use crate::{ }; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{ - revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, TxKind, U256, -}; +use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, Bytes, TxKind, U256}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, }; @@ -73,13 +71,8 @@ where block_number: Option, overrides: EvmOverrides, ) -> EthResult { - let (res, _env) = self - .transact_call_at( - request, - block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)), - overrides, - ) - .await?; + let (res, _env) = + self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; ensure_success(res.result) } @@ -100,7 +93,7 @@ where let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); let transaction_index = transaction_index.unwrap_or_default(); - let target_block = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let target_block = block_number.unwrap_or_default(); let is_block_target_pending = target_block.is_pending(); let ((cfg, block_env, _), block) = futures::try_join!( @@ -390,7 +383,7 @@ where mut request: TransactionRequest, at: Option, ) -> EthResult { - let block_id = at.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let block_id = at.unwrap_or_default(); let (cfg, block, at) = self.evm_env_at(block_id).await?; let state = self.state_at(at)?; diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 2648df08f7aad..c2be79a10e5be 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -314,13 +314,7 @@ where state_override: Option, ) -> Result { trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas"); - Ok(self - .estimate_gas_at( - request, - block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)), - state_override, - ) - .await?) + Ok(self.estimate_gas_at(request, block_number.unwrap_or_default(), state_override).await?) } /// Handler for: `eth_gasPrice` diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs index f739c4a75fa70..7f0bdd4e2f701 100644 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ b/crates/rpc/rpc/src/eth/api/state.rs @@ -84,7 +84,7 @@ where block_id: Option, ) -> EthResult { let chain_info = self.provider().chain_info()?; - let block_id = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let block_id = block_id.unwrap_or_default(); // if we are trying to create a proof for the latest block, but have a BlockId as input // that is not BlockNumberOrTag::Latest, then we need to figure out whether or not the diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index ade8291c3bd24..0479190367b9e 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -78,7 +78,7 @@ where { /// Executes the given call and returns a number of possible traces for it. pub async fn trace_call(&self, trace_request: TraceCallRequest) -> EthResult { - let at = trace_request.block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let at = trace_request.block_id.unwrap_or_default(); let config = TracingInspectorConfig::from_parity_config(&trace_request.trace_types); let overrides = EvmOverrides::new(trace_request.state_overrides, trace_request.block_overrides); @@ -106,11 +106,7 @@ where ) -> EthResult { let tx = recover_raw_transaction(tx)?; - let (cfg, block, at) = self - .inner - .eth_api - .evm_env_at(block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest))) - .await?; + let (cfg, block, at) = self.inner.eth_api.evm_env_at(block_id.unwrap_or_default()).await?; let tx = tx_env_with_recovered(&tx.into_ecrecovered_transaction()); let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx); From 2f052a81120507dc90de4352cb1dca38de86836a Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Sat, 27 Apr 2024 11:47:34 +0800 Subject: [PATCH 360/700] feat: replace low level IPC with interprocess (#7922) --- Cargo.lock | 1 - crates/rpc/ipc/Cargo.toml | 3 - crates/rpc/ipc/src/client/mod.rs | 93 +++++++++++++++++++++++++------ crates/rpc/ipc/src/client/unix.rs | 82 --------------------------- crates/rpc/ipc/src/client/win.rs | 82 --------------------------- 5 files changed, 77 insertions(+), 184 deletions(-) delete mode 100644 crates/rpc/ipc/src/client/unix.rs delete mode 100644 crates/rpc/ipc/src/client/win.rs diff --git a/Cargo.lock b/Cargo.lock index c046501c6b7b6..c66304e91e7e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6915,7 +6915,6 @@ dependencies = [ "tokio-util", "tower", "tracing", - "windows-sys 0.52.0", ] [[package]] diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index 094fa5759e17b..af6e64db19c38 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -31,9 +31,6 @@ thiserror.workspace = true futures-util = "0.3.30" interprocess = { version = "1.2.1", features = ["tokio_support"] } -[target.'cfg(windows)'.dependencies] -windows-sys = { version = "0.52.0", features = ["Win32_Foundation"] } - [dev-dependencies] tokio-stream = { workspace = true, features = ["sync"] } reth-tracing.workspace = true diff --git a/crates/rpc/ipc/src/client/mod.rs b/crates/rpc/ipc/src/client/mod.rs index 8ca4b54066523..05ea7ed589d5b 100644 --- a/crates/rpc/ipc/src/client/mod.rs +++ b/crates/rpc/ipc/src/client/mod.rs @@ -1,24 +1,85 @@ //! [`jsonrpsee`] transport adapter implementation for IPC. -use std::{ - io, - path::{Path, PathBuf}, -}; - +use crate::stream_codec::StreamCodec; +use futures::StreamExt; +use interprocess::local_socket::tokio::{LocalSocketStream, OwnedReadHalf, OwnedWriteHalf}; use jsonrpsee::{ async_client::{Client, ClientBuilder}, - core::client::{TransportReceiverT, TransportSenderT}, + core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}, +}; +use std::io; +use tokio::io::AsyncWriteExt; +use tokio_util::{ + codec::FramedRead, + compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt}, }; -#[cfg(unix)] -use crate::client::unix::IpcTransportClientBuilder; -#[cfg(windows)] -use crate::client::win::IpcTransportClientBuilder; +/// Sending end of IPC transport. +#[derive(Debug)] +pub(crate) struct Sender { + inner: Compat, +} + +#[async_trait::async_trait] +impl TransportSenderT for Sender { + type Error = IpcError; + + /// Sends out a request. Returns a Future that finishes when the request has been successfully + /// sent. + async fn send(&mut self, msg: String) -> Result<(), Self::Error> { + Ok(self.inner.write_all(msg.as_bytes()).await?) + } + + async fn send_ping(&mut self) -> Result<(), Self::Error> { + tracing::trace!("send ping - not implemented"); + Err(IpcError::NotSupported) + } + + /// Close the connection. + async fn close(&mut self) -> Result<(), Self::Error> { + Ok(()) + } +} + +/// Receiving end of IPC transport. +#[derive(Debug)] +pub(crate) struct Receiver { + pub(crate) inner: FramedRead, StreamCodec>, +} + +#[async_trait::async_trait] +impl TransportReceiverT for Receiver { + type Error = IpcError; + + /// Returns a Future resolving when the server sent us something back. + async fn receive(&mut self) -> Result { + self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) + } +} + +/// Builder for IPC transport [`Sender`] and [`Receiver`] pair. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub(crate) struct IpcTransportClientBuilder; + +impl IpcTransportClientBuilder { + pub(crate) async fn build( + self, + endpoint: impl AsRef, + ) -> Result<(Sender, Receiver), IpcError> { + let endpoint = endpoint.as_ref().to_string(); + let conn = LocalSocketStream::connect(endpoint.clone()) + .await + .map_err(|err| IpcError::FailedToConnect { path: endpoint, err })?; -#[cfg(unix)] -mod unix; -#[cfg(windows)] -mod win; + let (rhlf, whlf) = conn.into_split(); + + Ok(( + Sender { inner: whlf.compat_write() }, + Receiver { inner: FramedRead::new(rhlf.compat(), StreamCodec::stream_incoming()) }, + )) + } +} /// Builder type for [`Client`] #[derive(Clone, Default, Debug)] @@ -37,7 +98,7 @@ impl IpcClientBuilder { /// # Ok(()) /// # } /// ``` - pub async fn build(self, path: impl AsRef) -> Result { + pub async fn build(self, path: impl AsRef) -> Result { let (tx, rx) = IpcTransportClientBuilder::default().build(path).await?; Ok(self.build_with_tokio(tx, rx)) } @@ -66,7 +127,7 @@ pub enum IpcError { FailedToConnect { /// The path of the socket. #[doc(hidden)] - path: PathBuf, + path: String, /// The error occurred while connecting. #[doc(hidden)] err: io::Error, diff --git a/crates/rpc/ipc/src/client/unix.rs b/crates/rpc/ipc/src/client/unix.rs deleted file mode 100644 index c7ed7bc7a6269..0000000000000 --- a/crates/rpc/ipc/src/client/unix.rs +++ /dev/null @@ -1,82 +0,0 @@ -//! [`jsonrpsee`] transport adapter implementation for Unix IPC by using Unix Sockets. - -use crate::{client::IpcError, stream_codec::StreamCodec}; -use futures::StreamExt; -use jsonrpsee::core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}; -use std::path::Path; -use tokio::{ - io::AsyncWriteExt, - net::{ - unix::{OwnedReadHalf, OwnedWriteHalf}, - UnixStream, - }, -}; -use tokio_util::codec::FramedRead; - -/// Sending end of IPC transport. -#[derive(Debug)] -pub(crate) struct Sender { - inner: OwnedWriteHalf, -} - -#[async_trait::async_trait] -impl TransportSenderT for Sender { - type Error = IpcError; - - /// Sends out a request. Returns a Future that finishes when the request has been successfully - /// sent. - async fn send(&mut self, msg: String) -> Result<(), Self::Error> { - Ok(self.inner.write_all(msg.as_bytes()).await?) - } - - async fn send_ping(&mut self) -> Result<(), Self::Error> { - tracing::trace!("send ping - not implemented"); - Err(IpcError::NotSupported) - } - - /// Close the connection. - async fn close(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} - -/// Receiving end of IPC transport. -#[derive(Debug)] -pub(crate) struct Receiver { - pub(crate) inner: FramedRead, -} - -#[async_trait::async_trait] -impl TransportReceiverT for Receiver { - type Error = IpcError; - - /// Returns a Future resolving when the server sent us something back. - async fn receive(&mut self) -> Result { - self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) - } -} - -/// Builder for IPC transport [`Sender`] and [`Receiver`] pair. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub(crate) struct IpcTransportClientBuilder; - -impl IpcTransportClientBuilder { - pub(crate) async fn build( - self, - path: impl AsRef, - ) -> Result<(Sender, Receiver), IpcError> { - let path = path.as_ref(); - - let stream = UnixStream::connect(path) - .await - .map_err(|err| IpcError::FailedToConnect { path: path.to_path_buf(), err })?; - - let (rhlf, whlf) = stream.into_split(); - - Ok(( - Sender { inner: whlf }, - Receiver { inner: FramedRead::new(rhlf, StreamCodec::stream_incoming()) }, - )) - } -} diff --git a/crates/rpc/ipc/src/client/win.rs b/crates/rpc/ipc/src/client/win.rs deleted file mode 100644 index 69b3140fef1de..0000000000000 --- a/crates/rpc/ipc/src/client/win.rs +++ /dev/null @@ -1,82 +0,0 @@ -//! [`jsonrpsee`] transport adapter implementation for Windows IPC by using NamedPipes. - -use crate::{client::IpcError, stream_codec::StreamCodec}; -use jsonrpsee::core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}; -use std::{path::Path, sync::Arc}; -use tokio::{ - io::AsyncWriteExt, - net::windows::named_pipe::{ClientOptions, NamedPipeClient}, - time, - time::Duration, -}; -use tokio_stream::StreamExt; -use tokio_util::codec::FramedRead; -use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY; - -/// Sending end of IPC transport. -#[derive(Debug)] -pub struct Sender { - inner: Arc, -} - -#[async_trait::async_trait] -impl TransportSenderT for Sender { - type Error = IpcError; - - /// Sends out a request. Returns a Future that finishes when the request has been successfully - /// sent. - async fn send(&mut self, msg: String) -> Result<(), Self::Error> { - Ok(self.inner.write_all(msg.as_bytes()).await?) - } - - async fn send_ping(&mut self) -> Result<(), Self::Error> { - tracing::trace!("send ping - not implemented"); - Err(IpcError::NotSupported) - } - - /// Close the connection. - async fn close(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} - -/// Receiving end of IPC transport. -#[derive(Debug)] -pub struct Receiver { - inner: FramedRead, StreamCodec>, -} - -#[async_trait::async_trait] -impl TransportReceiverT for Receiver { - type Error = IpcError; - - /// Returns a Future resolving when the server sent us something back. - async fn receive(&mut self) -> Result { - self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) - } -} - -/// Builder for IPC transport [`crate::client::win::Sender`] and [`crate::client::win::Receiver`] -/// pair. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct IpcTransportClientBuilder; - -impl IpcTransportClientBuilder { - pub async fn build(self, path: impl AsRef) -> Result<(Sender, Receiver), IpcError> { - let addr = path.as_ref().as_os_str(); - let client = loop { - match ClientOptions::new().open(addr) { - Ok(client) => break client, - Err(e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (), - Err(e) => return IpcError::FailedToConnect { path: path.to_path_buf(), err: e }, - } - time::sleep(Duration::from_mills(50)).await; - }; - let client = Arc::new(client); - Ok(( - Sender { inner: client.clone() }, - Receiver { inner: FramedRead::new(client, StreamCodec::stream_incoming()) }, - )) - } -} From 6bdba8a2a6c22c2fc8762c8075cd7ebd5ec1bb5d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 06:25:09 +0200 Subject: [PATCH 361/700] chore: misc clippy fixes (#7926) --- crates/optimism/consensus/src/lib.rs | 1 - crates/optimism/evm/src/execute.rs | 7 +++---- crates/rpc/ipc/src/server/mod.rs | 2 +- crates/stages/src/stages/headers.rs | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 9a905adfaf836..4deea2879624f 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -5,7 +5,6 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 5b7d797da9395..c56c7622e622b 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -541,12 +541,11 @@ mod tests { b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, }; - use reth_revm::{database::StateProviderDatabase, L1_BLOCK_CONTRACT}; + use reth_revm::{ + database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, + }; use std::{collections::HashMap, str::FromStr}; - use crate::OptimismEvmConfig; - use reth_revm::test_utils::StateProviderTest; - fn create_op_state_provider() -> StateProviderTest { let mut db = StateProviderTest::default(); diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index c876457e1f184..7239249e1c194 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -788,7 +788,7 @@ pub fn dummy_endpoint() -> String { mod tests { use super::*; use crate::client::IpcClientBuilder; - use futures::future::{select, Either}; + use futures::future::select; use jsonrpsee::{ core::{ client, diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index fd14841284ec2..548048dd713a6 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -387,7 +387,7 @@ mod tests { use reth_primitives::{ stage::StageUnitCheckpoint, BlockBody, SealedBlock, SealedBlockWithSenders, B256, }; - use reth_provider::{BlockHashReader, BlockWriter, BundleStateWithReceipts, ProviderFactory}; + use reth_provider::{BlockWriter, BundleStateWithReceipts, ProviderFactory}; use reth_trie::{updates::TrieUpdates, HashedPostState}; use test_runner::HeadersTestRunner; From cc4a418ddf73ae5af240b403e4b6e1d191130cee Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Sat, 27 Apr 2024 06:18:59 +0200 Subject: [PATCH 362/700] refactor: extract peer types to net/types (#7912) Co-authored-by: Matthias Seitz --- Cargo.lock | 30 ++- Cargo.toml | 4 +- crates/consensus/auto-seal/Cargo.toml | 1 + crates/consensus/auto-seal/src/client.rs | 5 +- crates/interfaces/Cargo.toml | 1 + crates/interfaces/src/p2p/download.rs | 2 +- crates/interfaces/src/p2p/either.rs | 2 +- crates/interfaces/src/p2p/error.rs | 5 +- crates/interfaces/src/p2p/full_block.rs | 3 +- crates/interfaces/src/test_utils/bodies.rs | 2 +- .../interfaces/src/test_utils/full_block.rs | 5 +- crates/interfaces/src/test_utils/headers.rs | 3 +- crates/net/common/Cargo.toml | 2 +- crates/net/common/src/ban_list.rs | 2 +- crates/net/discv4/Cargo.toml | 1 + crates/net/discv4/src/lib.rs | 6 +- crates/net/discv4/src/node.rs | 3 +- crates/net/discv4/src/proto.rs | 5 +- crates/net/discv4/src/table.rs | 2 +- crates/net/discv4/src/test_utils.rs | 3 +- crates/net/discv5/Cargo.toml | 1 + crates/net/discv5/src/enr.rs | 2 +- crates/net/discv5/src/lib.rs | 3 +- crates/net/dns/Cargo.toml | 1 + crates/net/dns/src/lib.rs | 3 +- crates/net/downloaders/Cargo.toml | 1 + crates/net/downloaders/src/bodies/request.rs | 5 +- crates/net/downloaders/src/file_client.rs | 3 +- .../src/headers/reverse_headers.rs | 4 +- .../src/test_utils/bodies_client.rs | 3 +- crates/net/ecies/Cargo.toml | 1 + crates/net/ecies/src/algorithm.rs | 3 +- crates/net/ecies/src/stream.rs | 2 +- crates/net/eth-wire/Cargo.toml | 1 + crates/net/eth-wire/src/ethstream.rs | 3 +- crates/net/eth-wire/src/hello.rs | 9 +- crates/net/eth-wire/src/muxdemux.rs | 3 +- crates/net/eth-wire/src/test_utils.rs | 3 +- crates/net/network-api/Cargo.toml | 1 + crates/net/network-api/src/lib.rs | 3 +- crates/net/network-api/src/noop.rs | 3 +- crates/net/network/Cargo.toml | 1 + crates/net/network/src/config.rs | 4 +- crates/net/network/src/discovery.rs | 3 +- crates/net/network/src/eth_requests.rs | 3 +- crates/net/network/src/fetch/client.rs | 3 +- crates/net/network/src/fetch/mod.rs | 3 +- crates/net/network/src/import.rs | 2 +- crates/net/network/src/manager.rs | 3 +- crates/net/network/src/message.rs | 3 +- crates/net/network/src/network.rs | 3 +- crates/net/network/src/peers/manager.rs | 6 +- crates/net/network/src/session/active.rs | 5 +- crates/net/network/src/session/handle.rs | 2 +- crates/net/network/src/session/mod.rs | 3 +- crates/net/network/src/state.rs | 6 +- crates/net/network/src/swarm.rs | 2 +- crates/net/network/src/test_utils/init.rs | 2 +- crates/net/network/src/test_utils/testnet.rs | 3 +- .../net/network/src/transactions/fetcher.rs | 3 +- crates/net/network/src/transactions/mod.rs | 4 +- crates/net/types/Cargo.toml | 27 ++ crates/net/types/src/lib.rs | 239 ++++++++++++++++++ crates/primitives/Cargo.toml | 2 - crates/primitives/src/lib.rs | 2 - crates/rpc/rpc-api/Cargo.toml | 1 + crates/rpc/rpc-api/src/admin.rs | 3 +- crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/admin.rs | 3 +- crates/stages/Cargo.toml | 1 + crates/stages/src/lib.rs | 3 +- crates/transaction-pool/Cargo.toml | 1 + crates/transaction-pool/src/traits.rs | 8 +- examples/manual-p2p/Cargo.toml | 11 +- examples/manual-p2p/src/main.rs | 3 +- 75 files changed, 428 insertions(+), 86 deletions(-) create mode 100644 crates/net/types/Cargo.toml create mode 100644 crates/net/types/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index c66304e91e7e1..a40fb4513bc63 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4741,6 +4741,7 @@ dependencies = [ "reth-ecies", "reth-eth-wire", "reth-network", + "reth-network-types", "reth-primitives", "secp256k1", "tokio", @@ -6323,6 +6324,7 @@ dependencies = [ "reth-engine-primitives", "reth-evm", "reth-interfaces", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", @@ -6553,6 +6555,7 @@ dependencies = [ "rand 0.8.5", "reth-net-common", "reth-net-nat", + "reth-network-types", "reth-primitives", "reth-tracing", "secp256k1", @@ -6578,6 +6581,7 @@ dependencies = [ "multiaddr", "rand 0.8.5", "reth-metrics", + "reth-network-types", "reth-primitives", "reth-tracing", "secp256k1", @@ -6597,6 +6601,7 @@ dependencies = [ "parking_lot 0.12.2", "rand 0.8.5", "reth-net-common", + "reth-network-types", "reth-primitives", "reth-tracing", "schnellru", @@ -6628,6 +6633,7 @@ dependencies = [ "reth-db", "reth-interfaces", "reth-metrics", + "reth-network-types", "reth-primitives", "reth-provider", "reth-tasks", @@ -6689,6 +6695,7 @@ dependencies = [ "pin-project", "rand 0.8.5", "reth-net-common", + "reth-network-types", "reth-primitives", "secp256k1", "sha2 0.10.8", @@ -6732,6 +6739,7 @@ dependencies = [ "reth-eth-wire-types", "reth-metrics", "reth-net-common", + "reth-network-types", "reth-primitives", "reth-tracing", "secp256k1", @@ -6888,6 +6896,7 @@ dependencies = [ "reth-consensus", "reth-eth-wire-types", "reth-network-api", + "reth-network-types", "reth-primitives", "secp256k1", "thiserror", @@ -6978,7 +6987,7 @@ name = "reth-net-common" version = "0.2.0-beta.6" dependencies = [ "pin-project", - "reth-primitives", + "reth-network-types", "tokio", ] @@ -7031,6 +7040,7 @@ dependencies = [ "reth-net-common", "reth-network", "reth-network-api", + "reth-network-types", "reth-primitives", "reth-provider", "reth-rpc-types", @@ -7059,6 +7069,7 @@ dependencies = [ "enr", "reth-discv4", "reth-eth-wire", + "reth-network-types", "reth-primitives", "reth-rpc-types", "serde", @@ -7066,6 +7077,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "reth-network-types" +version = "0.2.0-beta.6" +dependencies = [ + "enr", + "reth-primitives", + "reth-rpc-types", + "secp256k1", + "serde_with", +] + [[package]] name = "reth-nippy-jar" version = "0.2.0-beta.6" @@ -7369,7 +7391,6 @@ dependencies = [ "clap", "criterion", "derive_more", - "enr", "hash-db", "itertools 0.12.1", "modular-bitfield", @@ -7390,7 +7411,6 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "serde_with", "sha2 0.10.8", "strum 0.26.2", "sucds", @@ -7500,6 +7520,7 @@ dependencies = [ "reth-interfaces", "reth-metrics", "reth-network-api", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", @@ -7531,6 +7552,7 @@ version = "0.2.0-beta.6" dependencies = [ "jsonrpsee", "reth-engine-primitives", + "reth-network-types", "reth-primitives", "reth-rpc-types", "serde", @@ -7679,6 +7701,7 @@ dependencies = [ "reth-evm-ethereum", "reth-exex", "reth-interfaces", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", @@ -7794,6 +7817,7 @@ dependencies = [ "rand 0.8.5", "reth-eth-wire", "reth-metrics", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", diff --git a/Cargo.toml b/Cargo.toml index b6246740552b4..d56392c1d3aa5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,7 @@ members = [ "crates/net/nat/", "crates/net/network/", "crates/net/network-api/", + "crates/net/types/", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/ethereum/", @@ -223,7 +224,6 @@ reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } -reth-node-events = { path = "crates/node/events" } reth-node-optimism = { path = "crates/optimism/node" } reth-evm-optimism = { path = "crates/optimism/evm" } reth-node-core = { path = "crates/node-core" } @@ -249,6 +249,7 @@ reth-net-common = { path = "crates/net/common" } reth-net-nat = { path = "crates/net/nat" } reth-network = { path = "crates/net/network" } reth-network-api = { path = "crates/net/network-api" } +reth-network-types = { path = "crates/net/types" } reth-nippy-jar = { path = "crates/storage/nippy-jar" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-validator = { path = "crates/payload/validator" } @@ -273,6 +274,7 @@ reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } reth-optimism-consensus = { path = "crates/optimism/consensus" } +reth-node-events = { path = "crates/node/events" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index ec96426a40217..435ade53db32c 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -24,6 +24,7 @@ reth-evm.workspace = true reth-engine-primitives.workspace = true reth-consensus.workspace = true reth-rpc-types.workspace = true +reth-network-types.workspace = true # async futures-util.workspace = true diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs index 7ed69c2899610..67a84d5d9ebdb 100644 --- a/crates/consensus/auto-seal/src/client.rs +++ b/crates/consensus/auto-seal/src/client.rs @@ -7,9 +7,8 @@ use reth_interfaces::p2p::{ headers::client::{HeadersClient, HeadersFut, HeadersRequest}, priority::Priority, }; -use reth_primitives::{ - BlockBody, BlockHashOrNumber, Header, HeadersDirection, PeerId, WithPeerId, B256, -}; +use reth_network_types::{PeerId, WithPeerId}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, B256}; use std::fmt::Debug; use tracing::{trace, warn}; diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 6c066593b1004..c2e276a3359e7 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -15,6 +15,7 @@ reth-primitives.workspace = true reth-network-api.workspace = true reth-eth-wire-types.workspace = true reth-consensus.workspace = true +reth-network-types.workspace = true # async futures.workspace = true diff --git a/crates/interfaces/src/p2p/download.rs b/crates/interfaces/src/p2p/download.rs index b9fb6ab3e037d..823860507ae58 100644 --- a/crates/interfaces/src/p2p/download.rs +++ b/crates/interfaces/src/p2p/download.rs @@ -1,4 +1,4 @@ -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::fmt::Debug; /// Generic download client for peer penalization diff --git a/crates/interfaces/src/p2p/either.rs b/crates/interfaces/src/p2p/either.rs index 1a6bd170c2c31..af7f150189941 100644 --- a/crates/interfaces/src/p2p/either.rs +++ b/crates/interfaces/src/p2p/either.rs @@ -22,7 +22,7 @@ where A: DownloadClient, B: DownloadClient, { - fn report_bad_message(&self, peer_id: reth_primitives::PeerId) { + fn report_bad_message(&self, peer_id: reth_network_types::PeerId) { match self { EitherDownloader::Left(a) => a.report_bad_message(peer_id), EitherDownloader::Right(b) => b.report_bad_message(peer_id), diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index f63f8879ad44e..1a847b6494920 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -2,8 +2,9 @@ use super::headers::client::HeadersRequest; use crate::{db::DatabaseError, provider::ProviderError}; use reth_consensus::ConsensusError; use reth_network_api::ReputationChangeKind; +use reth_network_types::WithPeerId; use reth_primitives::{ - BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, WithPeerId, B256, + BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, B256, }; use std::ops::RangeInclusive; use thiserror::Error; @@ -12,7 +13,7 @@ use tokio::sync::{mpsc, oneshot}; /// Result alias for result of a request. pub type RequestResult = Result; -/// Result with [PeerId][reth_primitives::PeerId] +/// Result with [PeerId][reth_network_types::PeerId] pub type PeerRequestResult = RequestResult>; /// Helper trait used to validate responses. diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index 6cf3f2c81e064..dd8cfff4d4cf9 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -6,8 +6,9 @@ use crate::p2p::{ }; use futures::Stream; use reth_consensus::{Consensus, ConsensusError}; +use reth_network_types::WithPeerId; use reth_primitives::{ - BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, WithPeerId, B256, + BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, B256, }; use std::{ cmp::Reverse, diff --git a/crates/interfaces/src/test_utils/bodies.rs b/crates/interfaces/src/test_utils/bodies.rs index e1d42a2a57f4f..8f0bfcef09fec 100644 --- a/crates/interfaces/src/test_utils/bodies.rs +++ b/crates/interfaces/src/test_utils/bodies.rs @@ -22,7 +22,7 @@ impl Debug for TestBodiesClient { } impl DownloadClient for TestBodiesClient { - fn report_bad_message(&self, _peer_id: reth_primitives::PeerId) { + fn report_bad_message(&self, _peer_id: reth_network_types::PeerId) { // noop } diff --git a/crates/interfaces/src/test_utils/full_block.rs b/crates/interfaces/src/test_utils/full_block.rs index a9710491992b1..95c1c2b3a0fbc 100644 --- a/crates/interfaces/src/test_utils/full_block.rs +++ b/crates/interfaces/src/test_utils/full_block.rs @@ -6,9 +6,10 @@ use crate::p2p::{ priority::Priority, }; use parking_lot::Mutex; +use reth_network_types::{PeerId, WithPeerId}; use reth_primitives::{ - BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, PeerId, SealedBlock, - SealedHeader, WithPeerId, B256, + BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, SealedBlock, + SealedHeader, B256, }; use std::{collections::HashMap, sync::Arc}; diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index 304f394c896e6..0272c68d3048f 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -24,7 +24,8 @@ use crate::p2p::{ priority::Priority, }; use reth_consensus::{test_utils::TestConsensus, Consensus}; -use reth_primitives::{Header, HeadersDirection, PeerId, SealedHeader, WithPeerId}; +use reth_network_types::{PeerId, WithPeerId}; +use reth_primitives::{Header, HeadersDirection, SealedHeader}; /// A test downloader which just returns the values that have been pushed to it. #[derive(Debug)] diff --git a/crates/net/common/Cargo.toml b/crates/net/common/Cargo.toml index 8d85fc9067b8c..0c3b253a50ad0 100644 --- a/crates/net/common/Cargo.toml +++ b/crates/net/common/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-network-types.workspace = true # async pin-project.workspace = true diff --git a/crates/net/common/src/ban_list.rs b/crates/net/common/src/ban_list.rs index 0527c86200baa..11d4c6049b40f 100644 --- a/crates/net/common/src/ban_list.rs +++ b/crates/net/common/src/ban_list.rs @@ -1,6 +1,6 @@ //! Support for banning peers. -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{collections::HashMap, net::IpAddr, time::Instant}; /// Determines whether or not the IP is globally routable. diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 9a7cb943d1f01..bd7e99ee6d062 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-primitives.workspace = true reth-net-common.workspace = true reth-net-nat.workspace = true +reth-network-types.workspace = true # ethereum alloy-rlp = { workspace = true, features = ["derive"] } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 071b81df94b70..061e4a33b0100 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -39,7 +39,8 @@ use discv5::{ use enr::Enr; use parking_lot::Mutex; use proto::{EnrRequest, EnrResponse}; -use reth_primitives::{bytes::Bytes, hex, ForkId, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{bytes::Bytes, hex, ForkId, B256}; use secp256k1::SecretKey; use std::{ cell::RefCell, @@ -210,7 +211,8 @@ impl Discv4 { /// # use std::io; /// use rand::thread_rng; /// use reth_discv4::{Discv4, Discv4Config}; - /// use reth_primitives::{pk2id, NodeRecord, PeerId}; + /// use reth_network_types::{pk2id, PeerId}; + /// use reth_primitives::NodeRecord; /// use secp256k1::SECP256K1; /// use std::{net::SocketAddr, str::FromStr}; /// # async fn t() -> io::Result<()> { diff --git a/crates/net/discv4/src/node.rs b/crates/net/discv4/src/node.rs index 2e8dc1773a6ec..62e45db0e1d69 100644 --- a/crates/net/discv4/src/node.rs +++ b/crates/net/discv4/src/node.rs @@ -1,5 +1,6 @@ use generic_array::GenericArray; -use reth_primitives::{keccak256, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{keccak256, NodeRecord}; /// The key type for the table. #[derive(Debug, Copy, Clone, Eq, PartialEq)] diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 059ecc5bb73e0..da84dc05aa243 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -1,11 +1,12 @@ //! Discovery v4 protocol implementation. -use crate::{error::DecodePacketError, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; +use crate::{error::DecodePacketError, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable}; use enr::Enr; +use reth_network_types::{pk2id, PeerId}; use reth_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, - keccak256, pk2id, EnrForkIdEntry, ForkId, NodeRecord, B256, + keccak256, EnrForkIdEntry, ForkId, NodeRecord, B256, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, diff --git a/crates/net/discv4/src/table.rs b/crates/net/discv4/src/table.rs index c7d75778ca75a..00e1fe50c29e9 100644 --- a/crates/net/discv4/src/table.rs +++ b/crates/net/discv4/src/table.rs @@ -1,6 +1,6 @@ //! Additional support for tracking nodes. -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{collections::HashMap, net::IpAddr, time::Instant}; /// Keeps track of nodes from which we have received a `Pong` message. diff --git a/crates/net/discv4/src/test_utils.rs b/crates/net/discv4/src/test_utils.rs index ccd4f9a039608..dae3ea388d787 100644 --- a/crates/net/discv4/src/test_utils.rs +++ b/crates/net/discv4/src/test_utils.rs @@ -6,7 +6,8 @@ use crate::{ IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; use rand::{thread_rng, Rng, RngCore}; -use reth_primitives::{hex, pk2id, ForkHash, ForkId, NodeRecord, B256}; +use reth_network_types::pk2id; +use reth_primitives::{hex, ForkHash, ForkId, NodeRecord, B256}; use secp256k1::{SecretKey, SECP256K1}; use std::{ collections::{HashMap, HashSet}, diff --git a/crates/net/discv5/Cargo.toml b/crates/net/discv5/Cargo.toml index 705ea17a8fbb0..a73888ae0ea2c 100644 --- a/crates/net/discv5/Cargo.toml +++ b/crates/net/discv5/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-metrics.workspace = true +reth-network-types.workspace = true # ethereum alloy-rlp.workspace = true diff --git a/crates/net/discv5/src/enr.rs b/crates/net/discv5/src/enr.rs index 088baf18e8ab5..162370bb4cf60 100644 --- a/crates/net/discv5/src/enr.rs +++ b/crates/net/discv5/src/enr.rs @@ -3,7 +3,7 @@ use discv5::enr::{CombinedPublicKey, EnrPublicKey, NodeId}; use enr::Enr; -use reth_primitives::{id2pk, pk2id, PeerId}; +use reth_network_types::{id2pk, pk2id, PeerId}; use secp256k1::{PublicKey, SecretKey}; /// Extracts a [`CombinedPublicKey::Secp256k1`] from a [`discv5::Enr`] and converts it to a diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 14414abf7416a..5275956bfa54c 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -22,7 +22,8 @@ use enr::{discv4_id_to_discv5_id, EnrCombinedKeyWrapper}; use futures::future::join_all; use itertools::Itertools; use rand::{Rng, RngCore}; -use reth_primitives::{bytes::Bytes, EnrForkIdEntry, ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{bytes::Bytes, EnrForkIdEntry, ForkId, NodeRecord}; use secp256k1::SecretKey; use tokio::{sync::mpsc, task}; use tracing::{debug, error, trace}; diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 18d7bf81519b4..8076bd4e1e2dc 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-net-common.workspace = true +reth-network-types.workspace = true # ethereum secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index b72a45b31fdca..e5ddc0fd1851b 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -22,7 +22,8 @@ use crate::{ pub use config::DnsDiscoveryConfig; use enr::Enr; use error::ParseDnsEntryError; -use reth_primitives::{pk2id, EnrForkIdEntry, ForkId, NodeRecord}; +use reth_network_types::pk2id; +use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord}; use schnellru::{ByLength, LruMap}; use secp256k1::SecretKey; use std::{ diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index f1f14c85c01a4..353956d3bd9b9 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -19,6 +19,7 @@ reth-tasks.workspace = true reth-provider.workspace = true reth-config.workspace = true reth-consensus.workspace = true +reth-network-types.workspace = true # async futures.workspace = true diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 032fb3ebc91f0..dfe877a0b917d 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -6,9 +6,8 @@ use reth_interfaces::p2p::{ error::{DownloadError, DownloadResult}, priority::Priority, }; -use reth_primitives::{ - BlockBody, GotExpected, PeerId, SealedBlock, SealedHeader, WithPeerId, B256, -}; +use reth_network_types::{PeerId, WithPeerId}; +use reth_primitives::{BlockBody, GotExpected, SealedBlock, SealedHeader, B256}; use std::{ collections::VecDeque, mem, diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index ce8f3898bc8fd..362ed3c402483 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -7,9 +7,10 @@ use reth_interfaces::p2p::{ headers::client::{HeadersClient, HeadersFut, HeadersRequest}, priority::Priority, }; +use reth_network_types::PeerId; use reth_primitives::{ BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BytesMut, Header, HeadersDirection, - PeerId, SealedHeader, B256, + SealedHeader, B256, }; use std::{collections::HashMap, path::Path}; use thiserror::Error; diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 5c12a161a00a3..273f97e589b47 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -16,9 +16,9 @@ use reth_interfaces::p2p::{ }, priority::Priority, }; +use reth_network_types::PeerId; use reth_primitives::{ - BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, PeerId, SealedHeader, - B256, + BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, SealedHeader, B256, }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index 2f3cf2f293fb8..a7387fa88f22d 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -3,7 +3,8 @@ use reth_interfaces::p2p::{ download::DownloadClient, priority::Priority, }; -use reth_primitives::{BlockBody, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{BlockBody, B256}; use std::{ collections::HashMap, fmt::Debug, diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index 461aad885d972..d4a4de32aceed 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] reth-primitives.workspace = true reth-net-common.workspace = true +reth-network-types.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } futures.workspace = true diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index bd1eb1d328f82..52398de4fe42e 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -13,9 +13,10 @@ use ctr::Ctr64BE; use digest::{crypto_common::KeyIvInit, Digest}; use educe::Educe; use rand::{thread_rng, Rng}; +use reth_network_types::{id2pk, pk2id}; use reth_primitives::{ bytes::{BufMut, Bytes, BytesMut}, - id2pk, pk2id, B128, B256, B512 as PeerId, + B128, B256, B512 as PeerId, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index 47518aa2575ce..4538fc059d462 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -175,7 +175,7 @@ where #[cfg(test)] mod tests { use super::*; - use reth_primitives::pk2id; + use reth_network_types::pk2id; use secp256k1::SECP256K1; use tokio::net::{TcpListener, TcpStream}; diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index cddc84cf9d77c..0cfdfef244fd6 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -19,6 +19,7 @@ reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } reth-discv4.workspace = true reth-eth-wire-types.workspace = true +reth-network-types.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index cbd1e3150430c..8de5090347b0d 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -354,7 +354,8 @@ mod tests { use futures::{SinkExt, StreamExt}; use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::stream::ECIESStream; - use reth_primitives::{pk2id, ForkFilter, Head, NamedChain, B256, U256}; + use reth_network_types::pk2id; + use reth_primitives::{ForkFilter, Head, NamedChain, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; use std::time::Duration; use tokio::net::{TcpListener, TcpStream}; diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 6ca8d9d99d806..f953c4aaedb08 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -2,7 +2,8 @@ use crate::{capability::Capability, EthVersion, ProtocolVersion}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs::derive_arbitrary; use reth_discv4::DEFAULT_DISCOVERY_PORT; -use reth_primitives::{constants::RETH_CLIENT_VERSION, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::constants::RETH_CLIENT_VERSION; use crate::protocol::Protocol; #[cfg(feature = "serde")] @@ -38,7 +39,7 @@ impl HelloMessageWithProtocols { /// /// ``` /// use reth_eth_wire::HelloMessageWithProtocols; - /// use reth_primitives::pk2id; + /// use reth_network_types::pk2id; /// use secp256k1::{SecretKey, SECP256K1}; /// let secret_key = SecretKey::new(&mut rand::thread_rng()); /// let id = pk2id(&secret_key.public_key(SECP256K1)); @@ -120,7 +121,7 @@ impl HelloMessage { /// /// ``` /// use reth_eth_wire::HelloMessage; - /// use reth_primitives::pk2id; + /// use reth_network_types::pk2id; /// use secp256k1::{SecretKey, SECP256K1}; /// let secret_key = SecretKey::new(&mut rand::thread_rng()); /// let id = pk2id(&secret_key.public_key(SECP256K1)); @@ -209,7 +210,7 @@ impl HelloMessageBuilder { mod tests { use alloy_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; use reth_discv4::DEFAULT_DISCOVERY_PORT; - use reth_primitives::pk2id; + use reth_network_types::pk2id; use secp256k1::{SecretKey, SECP256K1}; use crate::{ diff --git a/crates/net/eth-wire/src/muxdemux.rs b/crates/net/eth-wire/src/muxdemux.rs index 3aa7bc1dd6f9f..a9bbe2fdb4326 100644 --- a/crates/net/eth-wire/src/muxdemux.rs +++ b/crates/net/eth-wire/src/muxdemux.rs @@ -357,9 +357,10 @@ mod tests { UnauthedEthStream, UnauthedP2PStream, }; use futures::{Future, SinkExt, StreamExt}; + use reth_network_types::pk2id; use reth_primitives::{ bytes::{BufMut, Bytes, BytesMut}, - pk2id, ForkFilter, Hardfork, MAINNET, + ForkFilter, Hardfork, MAINNET, }; use secp256k1::{SecretKey, SECP256K1}; use std::{net::SocketAddr, pin::Pin}; diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index 1708e1ffa4414..0783e4dad0afe 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -4,7 +4,8 @@ use crate::{ EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, Status, UnauthedP2PStream, }; use reth_discv4::DEFAULT_DISCOVERY_PORT; -use reth_primitives::{pk2id, Chain, ForkFilter, Head, B256, U256}; +use reth_network_types::pk2id; +use reth_primitives::{Chain, ForkFilter, Head, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; use std::net::SocketAddr; use tokio::net::TcpStream; diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index dcf4089cd4914..81536aad985ed 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives.workspace = true reth-eth-wire.workspace = true reth-rpc-types.workspace = true reth-discv4.workspace = true +reth-network-types.workspace = true # eth enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 0c43273cdb4e0..6c3040bd9b82f 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -14,7 +14,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_eth_wire::{DisconnectReason, EthVersion, Status}; -use reth_primitives::{NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::NodeRecord; use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; pub use error::NetworkError; diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index b6a0fa846de4a..2ace603e348e0 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -10,7 +10,8 @@ use crate::{ use enr::{secp256k1::SecretKey, Enr}; use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_eth_wire::{DisconnectReason, ProtocolVersion}; -use reth_primitives::{Chain, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{Chain, NodeRecord}; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use std::net::{IpAddr, SocketAddr}; diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index c06ff15182447..aa6da6ea29138 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -28,6 +28,7 @@ reth-provider.workspace = true reth-rpc-types.workspace = true reth-tokio-util.workspace = true reth-consensus.workspace = true +reth-network-types.workspace = true # ethereum enr = { workspace = true, features = ["serde", "rust-secp256k1"] } diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 3e89a1f3ae81e..463bde78d9ff0 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -12,9 +12,9 @@ use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::network_key; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; +use reth_network_types::{pk2id, PeerId}; use reth_primitives::{ - mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NamedChain, NodeRecord, - PeerId, MAINNET, + mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, NamedChain, NodeRecord, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index d95f2f9575b73..67d6594547add 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -12,7 +12,8 @@ use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; -use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord}; use secp256k1::SecretKey; use std::{ collections::VecDeque, diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index baa636b935c41..57e83391dee90 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -11,7 +11,8 @@ use reth_eth_wire::{ Receipts, }; use reth_interfaces::p2p::error::RequestResult; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection}; use reth_provider::{BlockReader, HeaderProvider, ReceiptProvider}; use std::{ future::Future, diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index eab4745065eaa..63e22abe00fa0 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -11,7 +11,8 @@ use reth_interfaces::p2p::{ priority::Priority, }; use reth_network_api::ReputationChangeKind; -use reth_primitives::{Header, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{Header, B256}; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 1f85f242da188..3a529c97e22ee 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -9,7 +9,8 @@ use reth_interfaces::p2p::{ priority::Priority, }; use reth_network_api::ReputationChangeKind; -use reth_primitives::{BlockBody, Header, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{BlockBody, Header, B256}; use std::{ collections::{HashMap, VecDeque}, sync::{ diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 738851f0a6123..2d18da9d41e13 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -1,7 +1,7 @@ //! This module provides an abstraction over block import in the form of the `BlockImport` trait. use crate::message::NewBlockMessage; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::task::{Context, Poll}; /// Abstraction over block import. diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 5783c4ebd4987..39d29ee715af8 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -44,7 +44,8 @@ use reth_eth_wire::{ use reth_metrics::common::mpsc::UnboundedMeteredSender; use reth_net_common::bandwidth_meter::BandwidthMeter; use reth_network_api::ReputationChangeKind; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{ForkId, NodeRecord}; use reth_provider::{BlockNumReader, BlockReader}; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use reth_tasks::shutdown::GracefulShutdown; diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index b6861267a1729..2086fd60ea39b 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -11,8 +11,9 @@ use reth_eth_wire::{ SharedTransactions, Transactions, }; use reth_interfaces::p2p::error::{RequestError, RequestResult}; +use reth_network_types::PeerId; use reth_primitives::{ - BlockBody, Bytes, Header, PeerId, PooledTransactionsElement, ReceiptWithBloom, B256, + BlockBody, Bytes, Header, PooledTransactionsElement, ReceiptWithBloom, B256, }; use std::{ fmt, diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 7104e442e68e5..86669bf19f489 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -13,7 +13,8 @@ use reth_network_api::{ NetworkError, NetworkInfo, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; -use reth_primitives::{Head, NodeRecord, PeerId, TransactionSigned, B256}; +use reth_network_types::PeerId; +use reth_primitives::{Head, NodeRecord, TransactionSigned, B256}; use reth_rpc_types::NetworkStatus; use secp256k1::SecretKey; use std::{ diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index b94c22db74bd6..d6ae9c4da812b 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -14,7 +14,8 @@ use futures::StreamExt; use reth_eth_wire::{errors::EthStreamError, DisconnectReason}; use reth_net_common::ban_list::BanList; use reth_network_api::{PeerKind, ReputationChangeKind}; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{ForkId, NodeRecord}; use std::{ collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, fmt::Display, @@ -1558,7 +1559,8 @@ mod tests { }; use reth_net_common::ban_list::BanList; use reth_network_api::{Direction, ReputationChangeKind}; - use reth_primitives::{PeerId, B512}; + use reth_network_types::PeerId; + use reth_primitives::B512; use std::{ collections::HashSet, future::{poll_fn, Future}, diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 33c0a66e3f29b..32bfb72acb310 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -20,7 +20,7 @@ use reth_eth_wire::{ }; use reth_interfaces::p2p::error::RequestError; use reth_metrics::common::mpsc::MeteredPollSender; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{ collections::VecDeque, future::Future, @@ -769,7 +769,8 @@ mod tests { UnauthedEthStream, UnauthedP2PStream, }; use reth_net_common::bandwidth_meter::{BandwidthMeter, MeteredStream}; - use reth_primitives::{pk2id, ForkFilter, Hardfork, MAINNET}; + use reth_network_types::pk2id; + use reth_primitives::{ForkFilter, Hardfork, MAINNET}; use secp256k1::{SecretKey, SECP256K1}; use tokio::{ net::{TcpListener, TcpStream}, diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index 80298f3240c8b..c48fff6189883 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -12,7 +12,7 @@ use reth_eth_wire::{ DisconnectReason, EthVersion, Status, }; use reth_network_api::PeerInfo; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::sync::{ mpsc::{self, error::SendError}, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 94d41226e27ec..95f426c5424d3 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -19,7 +19,8 @@ use reth_net_common::{ bandwidth_meter::{BandwidthMeter, MeteredStream}, stream::HasRemoteAddr, }; -use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; use reth_tasks::TaskSpawner; use secp256k1::SecretKey; use std::{ diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index d75a1aaa5f2ec..0020b4927dc1b 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -18,7 +18,8 @@ use reth_eth_wire::{ capability::Capabilities, BlockHashNumber, DisconnectReason, NewBlockHashes, Status, }; use reth_network_api::PeerKind; -use reth_primitives::{ForkId, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{ForkId, B256}; use reth_provider::BlockNumReader; use std::{ collections::{HashMap, VecDeque}, @@ -537,7 +538,8 @@ mod tests { BlockBodies, EthVersion, }; use reth_interfaces::p2p::{bodies::client::BodiesClient, error::RequestError}; - use reth_primitives::{BlockBody, Header, PeerId, B256}; + use reth_network_types::PeerId; + use reth_primitives::{BlockBody, Header, B256}; use reth_provider::test_utils::NoopProvider; use std::{ future::poll_fn, diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 136ece0bd2c6b..11ac5949aaebd 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -12,7 +12,7 @@ use reth_eth_wire::{ errors::EthStreamError, EthVersion, Status, }; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use reth_provider::{BlockNumReader, BlockReader}; use std::{ io, diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index 1419191aad151..b72046a7f594e 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -1,5 +1,5 @@ use enr::{k256::ecdsa::SigningKey, Enr, EnrPublicKey}; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{net::SocketAddr, time::Duration}; /// The timeout for tests that create a GethInstance diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 9720b7a93781c..a92934c0cbcee 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -14,7 +14,8 @@ use futures::{FutureExt, StreamExt}; use pin_project::pin_project; use reth_eth_wire::{protocol::Protocol, DisconnectReason, HelloMessageWithProtocols}; use reth_network_api::{NetworkInfo, Peers}; -use reth_primitives::{PeerId, MAINNET}; +use reth_network_types::PeerId; +use reth_primitives::MAINNET; use reth_provider::{ test_utils::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, }; diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index cbec0f1e67fb5..e82a20a31628a 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -41,7 +41,8 @@ use reth_eth_wire::{ PartiallyValidData, RequestTxHashes, ValidAnnouncementData, }; use reth_interfaces::p2p::error::{RequestError, RequestResult}; -use reth_primitives::{PeerId, PooledTransactionsElement, TxHash}; +use reth_network_types::PeerId; +use reth_primitives::{PooledTransactionsElement, TxHash}; use schnellru::ByLength; #[cfg(debug_assertions)] use smallvec::{smallvec, SmallVec}; diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 83176c566f722..f7d03520ff2fe 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -26,9 +26,9 @@ use reth_interfaces::{ }; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{Peers, ReputationChangeKind}; +use reth_network_types::PeerId; use reth_primitives::{ - FromRecoveredPooledTransaction, PeerId, PooledTransactionsElement, TransactionSigned, TxHash, - B256, + FromRecoveredPooledTransaction, PooledTransactionsElement, TransactionSigned, TxHash, B256, }; use reth_transaction_pool::{ error::{PoolError, PoolResult}, diff --git a/crates/net/types/Cargo.toml b/crates/net/types/Cargo.toml new file mode 100644 index 0000000000000..841a76dfe5744 --- /dev/null +++ b/crates/net/types/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "reth-network-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Network types and utils" + +[lints] +workspace = true + +[dependencies] +# reth +reth-rpc-types.workspace = true +reth-primitives.workspace = true + +# eth +enr.workspace = true + +# crypto +secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } + +# misc +serde_with.workspace = true + diff --git a/crates/net/types/src/lib.rs b/crates/net/types/src/lib.rs new file mode 100644 index 0000000000000..ccd9757c94560 --- /dev/null +++ b/crates/net/types/src/lib.rs @@ -0,0 +1,239 @@ +//! Network Types and Utilities. +//! +//! This crate manages and converts Ethereum network entities such as node records, peer IDs, and +//! Ethereum Node Records (ENRs) + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use secp256k1::{constants::UNCOMPRESSED_PUBLIC_KEY_SIZE, PublicKey, SecretKey}; +use std::{net::IpAddr, str::FromStr}; + +// Re-export PeerId for ease of use. +pub use enr::Enr; +pub use reth_rpc_types::{NodeRecord, PeerId}; + +/// This tag should be set to indicate to libsecp256k1 that the following bytes denote an +/// uncompressed pubkey. +/// +/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` = `0x04` +/// +/// See: +const SECP256K1_TAG_PUBKEY_UNCOMPRESSED: u8 = 4; + +/// Converts a [secp256k1::PublicKey] to a [PeerId] by stripping the +/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag and storing the rest of the slice in the [PeerId]. +#[inline] +pub fn pk2id(pk: &PublicKey) -> PeerId { + PeerId::from_slice(&pk.serialize_uncompressed()[1..]) +} + +/// Converts a [PeerId] to a [secp256k1::PublicKey] by prepending the [PeerId] bytes with the +/// SECP256K1_TAG_PUBKEY_UNCOMPRESSED tag. +#[inline] +pub fn id2pk(id: PeerId) -> Result { + // NOTE: B512 is used as a PeerId because 512 bits is enough to represent an uncompressed + // public key. + let mut s = [0u8; UNCOMPRESSED_PUBLIC_KEY_SIZE]; + s[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED; + s[1..].copy_from_slice(id.as_slice()); + PublicKey::from_slice(&s) +} + +/// A peer that can come in ENR or [NodeRecord] form. +#[derive( + Debug, Clone, Eq, PartialEq, Hash, serde_with::SerializeDisplay, serde_with::DeserializeFromStr, +)] +pub enum AnyNode { + /// An "enode:" peer with full ip + NodeRecord(NodeRecord), + /// An "enr:" + Enr(Enr), + /// An incomplete "enode" with only a peer id + PeerId(PeerId), +} + +impl AnyNode { + /// Returns the peer id of the node. + pub fn peer_id(&self) -> PeerId { + match self { + AnyNode::NodeRecord(record) => record.id, + AnyNode::Enr(enr) => pk2id(&enr.public_key()), + AnyNode::PeerId(peer_id) => *peer_id, + } + } + + /// Returns the full node record if available. + pub fn node_record(&self) -> Option { + match self { + AnyNode::NodeRecord(record) => Some(*record), + AnyNode::Enr(enr) => { + let node_record = NodeRecord { + address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, + tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, + udp_port: enr.udp4().or_else(|| enr.udp6())?, + id: pk2id(&enr.public_key()), + } + .into_ipv4_mapped(); + Some(node_record) + } + _ => None, + } + } +} + +impl From for AnyNode { + fn from(value: NodeRecord) -> Self { + Self::NodeRecord(value) + } +} + +impl From> for AnyNode { + fn from(value: Enr) -> Self { + Self::Enr(value) + } +} + +impl FromStr for AnyNode { + type Err = String; + + fn from_str(s: &str) -> Result { + if let Some(rem) = s.strip_prefix("enode://") { + if let Ok(record) = NodeRecord::from_str(s) { + return Ok(AnyNode::NodeRecord(record)) + } + // incomplete enode + if let Ok(peer_id) = PeerId::from_str(rem) { + return Ok(AnyNode::PeerId(peer_id)) + } + return Err(format!("invalid public key: {rem}")) + } + if s.starts_with("enr:") { + return Enr::from_str(s).map(AnyNode::Enr) + } + Err("missing 'enr:' prefix for base64-encoded record".to_string()) + } +} + +impl std::fmt::Display for AnyNode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AnyNode::NodeRecord(record) => write!(f, "{record}"), + AnyNode::Enr(enr) => write!(f, "{enr}"), + AnyNode::PeerId(peer_id) => { + write!(f, "enode://{}", reth_primitives::hex::encode(peer_id.as_slice())) + } + } + } +} + +/// Generic wrapper with peer id +#[derive(Debug)] +pub struct WithPeerId(PeerId, pub T); + +impl From<(PeerId, T)> for WithPeerId { + fn from(value: (PeerId, T)) -> Self { + Self(value.0, value.1) + } +} + +impl WithPeerId { + /// Wraps the value with the peerid. + pub fn new(peer: PeerId, value: T) -> Self { + Self(peer, value) + } + + /// Get the peer id + pub fn peer_id(&self) -> PeerId { + self.0 + } + + /// Get the underlying data + pub fn data(&self) -> &T { + &self.1 + } + + /// Returns ownership of the underlying data. + pub fn into_data(self) -> T { + self.1 + } + + /// Transform the data + pub fn transform>(self) -> WithPeerId { + WithPeerId(self.0, self.1.into()) + } + + /// Split the wrapper into [PeerId] and data tuple + pub fn split(self) -> (PeerId, T) { + (self.0, self.1) + } + + /// Maps the inner value to a new value using the given function. + pub fn map U>(self, op: F) -> WithPeerId { + WithPeerId(self.0, op(self.1)) + } +} + +impl WithPeerId> { + /// returns `None` if the inner value is `None`, otherwise returns `Some(WithPeerId)`. + pub fn transpose(self) -> Option> { + self.1.map(|v| WithPeerId(self.0, v)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use secp256k1::SECP256K1; + + #[test] + fn test_node_record_parse() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; + let node: AnyNode = url.parse().unwrap(); + assert_eq!(node, AnyNode::NodeRecord(NodeRecord { + address: IpAddr::V4([10,3,58,6].into()), + tcp_port: 30303, + udp_port: 30301, + id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), + })); + assert_eq!(node.to_string(), url) + } + + #[test] + fn test_peer_id_parse() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0"; + let node: AnyNode = url.parse().unwrap(); + assert_eq!(node, AnyNode::PeerId("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap())); + assert_eq!(node.to_string(), url); + + let url = "enode://"; + let err = url.parse::().unwrap_err(); + assert_eq!(err, "invalid public key: "); + } + + // + #[test] + fn test_enr_parse() { + let url = "enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8"; + let node: AnyNode = url.parse().unwrap(); + assert_eq!( + node.peer_id(), + "0xca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f" + .parse::() + .unwrap() + ); + assert_eq!(node.to_string(), url); + } + + #[test] + fn pk2id2pk() { + let prikey = SecretKey::new(&mut secp256k1::rand::thread_rng()); + let pubkey = PublicKey::from_secret_key(SECP256K1, &prikey); + assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap()); + } +} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index ab6b44303bc40..4fa5046654ce6 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -27,7 +27,6 @@ alloy-trie = { workspace = true, features = ["serde"] } nybbles = { workspace = true, features = ["serde", "rlp"] } alloy-genesis.workspace = true alloy-eips.workspace = true -enr.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } @@ -43,7 +42,6 @@ itertools.workspace = true modular-bitfield.workspace = true once_cell.workspace = true rayon.workspace = true -serde_with.workspace = true serde.workspace = true serde_json.workspace = true sha2 = { version = "0.10.7", optional = true } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 9e77b4c15dbc4..1c88086288171 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -33,7 +33,6 @@ mod header; mod integer_list; mod log; mod net; -mod peer; pub mod proofs; mod prune; mod receipt; @@ -77,7 +76,6 @@ pub use net::{ NodeRecordParseError, GOERLI_BOOTNODES, HOLESKY_BOOTNODES, MAINNET_BOOTNODES, SEPOLIA_BOOTNODES, }; -pub use peer::{id2pk, pk2id, AnyNode, PeerId, WithPeerId}; pub use prune::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneModes, PruneProgress, PrunePurpose, PruneSegment, PruneSegmentError, ReceiptsLogPruneConfig, diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index c2ada1e881927..fe22eae0f6a70 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-primitives.workspace = true reth-rpc-types.workspace = true reth-engine-primitives.workspace = true +reth-network-types.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-api/src/admin.rs b/crates/rpc/rpc-api/src/admin.rs index 7497d120519c9..4c31221cdfe2b 100644 --- a/crates/rpc/rpc-api/src/admin.rs +++ b/crates/rpc/rpc-api/src/admin.rs @@ -1,5 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{AnyNode, NodeRecord}; +use reth_network_types::AnyNode; +use reth_primitives::NodeRecord; use reth_rpc_types::{admin::NodeInfo, PeerInfo}; /// Admin namespace rpc interface that gives access to several non-standard RPC methods. diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index d5bd324ae027c..81788f0a39117 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -27,6 +27,7 @@ reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true revm-inspectors.workspace = true reth-evm.workspace = true +reth-network-types.workspace = true # eth alloy-rlp.workspace = true diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 3f55784333a26..6f3125e068f78 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -3,7 +3,8 @@ use alloy_primitives::B256; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_network_api::{NetworkInfo, PeerKind, Peers}; -use reth_primitives::{AnyNode, ChainSpec, NodeRecord}; +use reth_network_types::AnyNode; +use reth_primitives::{ChainSpec, NodeRecord}; use reth_rpc_api::AdminApiServer; use reth_rpc_types::{ admin::{EthProtocolInfo, NodeInfo, Ports, ProtocolInfo}, diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 2692c94103d05..f3bd16a5e2a18 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -51,6 +51,7 @@ reth-revm.workspace = true reth-static-file.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-network-types.workspace = true alloy-rlp.workspace = true itertools.workspace = true diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 4b6df93913306..3fea3e04df12b 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -17,7 +17,8 @@ //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_interfaces::test_utils::{TestBodiesClient, TestHeadersClient}; //! # use reth_revm::EvmProcessorFactory; -//! # use reth_primitives::{PeerId, MAINNET, B256, PruneModes}; +//! # use reth_primitives::{MAINNET, B256, PruneModes}; +//! # use reth_network_types::PeerId; //! # use reth_stages::Pipeline; //! # use reth_stages::sets::DefaultStages; //! # use tokio::sync::watch; diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 5b6b8548682ff..4365245cf79fa 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -20,6 +20,7 @@ reth-tasks.workspace = true revm.workspace = true alloy-rlp.workspace = true reth-revm = { workspace = true, optional = true } +reth-network-types.workspace = true # async/futures futures-util.workspace = true diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index c5603ec7bd01c..79b9af6984d1e 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -9,13 +9,13 @@ use crate::{ }; use futures_util::{ready, Stream}; use reth_eth_wire::HandleMempoolData; +use reth_network_types::PeerId; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, - IntoRecoveredTransaction, PeerId, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, - TryFromRecoveredTransaction, TxEip4844, TxHash, TxKind, B256, EIP1559_TX_TYPE_ID, - EIP4844_TX_TYPE_ID, U256, + IntoRecoveredTransaction, PooledTransactionsElement, PooledTransactionsElementEcRecovered, + SealedBlock, Transaction, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip4844, + TxHash, TxKind, B256, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/examples/manual-p2p/Cargo.toml b/examples/manual-p2p/Cargo.toml index a9c7f251322b0..139cb0e1827ce 100644 --- a/examples/manual-p2p/Cargo.toml +++ b/examples/manual-p2p/Cargo.toml @@ -6,14 +6,17 @@ edition.workspace = true license.workspace = true [dependencies] -once_cell.workspace = true -eyre.workspace = true - reth-primitives.workspace = true reth-network.workspace = true reth-discv4.workspace = true reth-eth-wire.workspace = true reth-ecies.workspace = true -futures.workspace = true +reth-network-types.workspace = true + secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } + +futures.workspace = true tokio.workspace = true + +eyre.workspace = true +once_cell.workspace = true diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index 737daf728653a..e97cb3662d294 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -16,8 +16,9 @@ use reth_eth_wire::{ EthMessage, EthStream, HelloMessage, P2PStream, Status, UnauthedEthStream, UnauthedP2PStream, }; use reth_network::config::rng_secret_key; +use reth_network_types::pk2id; use reth_primitives::{ - mainnet_nodes, pk2id, Chain, Hardfork, Head, NodeRecord, MAINNET, MAINNET_GENESIS_HASH, + mainnet_nodes, Chain, Hardfork, Head, NodeRecord, MAINNET, MAINNET_GENESIS_HASH, }; use secp256k1::{SecretKey, SECP256K1}; use tokio::net::TcpStream; From 43f58f16dd23fc9e75de06e11222165ffe71b1f6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 07:55:06 +0200 Subject: [PATCH 363/700] chore: get rid of compat call (#7930) --- crates/payload/validator/src/lib.rs | 22 +++-- crates/primitives/src/block.rs | 126 +++++++++++++++------------- 2 files changed, 80 insertions(+), 68 deletions(-) diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 4f9f5150730ef..c3b25aef90ecd 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -10,7 +10,7 @@ use reth_primitives::{ChainSpec, SealedBlock}; use reth_rpc_types::{engine::MaybeCancunPayloadFields, ExecutionPayload, PayloadError}; -use reth_rpc_types_compat::engine::payload::{try_into_block, validate_block_hash}; +use reth_rpc_types_compat::engine::payload::try_into_block; use std::sync::Arc; /// Execution payload validator. @@ -100,21 +100,27 @@ impl ExecutionPayloadValidator { payload: ExecutionPayload, cancun_fields: MaybeCancunPayloadFields, ) -> Result { - let block_hash = payload.block_hash(); + let expected_hash = payload.block_hash(); // First parse the block - let block = try_into_block(payload, cancun_fields.parent_beacon_block_root())?; + let sealed_block = + try_into_block(payload, cancun_fields.parent_beacon_block_root())?.seal_slow(); - let cancun_active = self.is_cancun_active_at_timestamp(block.timestamp); + // Ensure the hash included in the payload matches the block hash + if expected_hash != sealed_block.hash() { + return Err(PayloadError::BlockHash { + execution: sealed_block.hash(), + consensus: expected_hash, + }) + } + + let cancun_active = self.is_cancun_active_at_timestamp(sealed_block.timestamp); - if !cancun_active && block.has_blob_transactions() { + if !cancun_active && sealed_block.has_blob_transactions() { // cancun not active but blob transactions present return Err(PayloadError::PreCancunBlockWithBlobTransactions) } - // Ensure the hash included in the payload matches the block hash - let sealed_block = validate_block_hash(block_hash, block)?; - // EIP-4844 checks self.ensure_matching_blob_versioned_hashes(&sealed_block, &cancun_fields)?; diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 06c08db1fb20e..864e7954f58bf 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -296,66 +296,6 @@ pub struct SealedBlock { pub withdrawals: Option, } -/// Generates a header which is valid __with respect to past and future forks__. This means, for -/// example, that if the withdrawals root is present, the base fee per gas is also present. -/// -/// If blob gas used were present, then the excess blob gas and parent beacon block root are also -/// present. In this example, the withdrawals root would also be present. -/// -/// This __does not, and should not guarantee__ that the header is valid with respect to __anything -/// else__. -#[cfg(any(test, feature = "arbitrary"))] -pub fn generate_valid_header( - mut header: Header, - eip_4844_active: bool, - blob_gas_used: u64, - excess_blob_gas: u64, - parent_beacon_block_root: B256, -) -> Header { - // EIP-1559 logic - if header.base_fee_per_gas.is_none() { - // If EIP-1559 is not active, clear related fields - header.withdrawals_root = None; - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if header.withdrawals_root.is_none() { - // If EIP-4895 is not active, clear related fields - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if eip_4844_active { - // Set fields based on EIP-4844 being active - header.blob_gas_used = Some(blob_gas_used); - header.excess_blob_gas = Some(excess_blob_gas); - header.parent_beacon_block_root = Some(parent_beacon_block_root); - } else { - // If EIP-4844 is not active, clear related fields - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } - - header -} - -#[cfg(any(test, feature = "arbitrary"))] -prop_compose! { - /// Generates a proptest strategy for constructing an instance of a header which is valid __with - /// respect to past and future forks__. - /// - /// See docs for [generate_valid_header] for more information. - pub fn valid_header_strategy()( - header in any::
(), - eip_4844_active in any::(), - blob_gas_used in any::(), - excess_blob_gas in any::(), - parent_beacon_block_root in any::() - ) -> Header { - generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root) - } -} - impl SealedBlock { /// Create a new sealed block instance using the sealed header and block body. #[inline] @@ -458,6 +398,12 @@ impl SealedBlock { self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() } + /// Returns whether or not the block contains any blob transactions. + #[inline] + pub fn has_blob_transactions(&self) -> bool { + self.body.iter().any(|tx| tx.is_eip4844()) + } + /// Ensures that the transaction root in the block header is valid. /// /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure @@ -653,6 +599,66 @@ impl From for BlockBody { } } +/// Generates a header which is valid __with respect to past and future forks__. This means, for +/// example, that if the withdrawals root is present, the base fee per gas is also present. +/// +/// If blob gas used were present, then the excess blob gas and parent beacon block root are also +/// present. In this example, the withdrawals root would also be present. +/// +/// This __does not, and should not guarantee__ that the header is valid with respect to __anything +/// else__. +#[cfg(any(test, feature = "arbitrary"))] +pub fn generate_valid_header( + mut header: Header, + eip_4844_active: bool, + blob_gas_used: u64, + excess_blob_gas: u64, + parent_beacon_block_root: B256, +) -> Header { + // EIP-1559 logic + if header.base_fee_per_gas.is_none() { + // If EIP-1559 is not active, clear related fields + header.withdrawals_root = None; + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } else if header.withdrawals_root.is_none() { + // If EIP-4895 is not active, clear related fields + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } else if eip_4844_active { + // Set fields based on EIP-4844 being active + header.blob_gas_used = Some(blob_gas_used); + header.excess_blob_gas = Some(excess_blob_gas); + header.parent_beacon_block_root = Some(parent_beacon_block_root); + } else { + // If EIP-4844 is not active, clear related fields + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } + + header +} + +#[cfg(any(test, feature = "arbitrary"))] +prop_compose! { + /// Generates a proptest strategy for constructing an instance of a header which is valid __with + /// respect to past and future forks__. + /// + /// See docs for [generate_valid_header] for more information. + pub fn valid_header_strategy()( + header in any::
(), + eip_4844_active in any::(), + blob_gas_used in any::(), + excess_blob_gas in any::(), + parent_beacon_block_root in any::() + ) -> Header { + generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root) + } +} + #[cfg(test)] mod tests { use super::{BlockNumberOrTag::*, *}; From 5f15af5401b8df52ebcc39d451e220c4022ee10b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 17:08:37 +0200 Subject: [PATCH 364/700] chore: bump ratatui; rm unused (#7934) --- Cargo.lock | 76 +++++++++++++++++----------------- Cargo.toml | 1 + bin/reth/Cargo.toml | 9 +--- crates/node-builder/Cargo.toml | 2 +- 4 files changed, 41 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a40fb4513bc63..478d38713c892 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -127,7 +127,7 @@ dependencies = [ "num_enum", "proptest", "serde", - "strum 0.26.2", + "strum", ] [[package]] @@ -1470,6 +1470,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "castaway" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a17ed5635fc8536268e5d4de1e22e81ac34419e5f052d4d51f4e01dcc263fcc" +dependencies = [ + "rustversion", +] + [[package]] name = "cc" version = "1.0.95" @@ -1673,11 +1682,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" dependencies = [ "crossterm", - "strum 0.26.2", - "strum_macros 0.26.2", + "strum", + "strum_macros", "unicode-width", ] +[[package]] +name = "compact_str" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" +dependencies = [ + "castaway", + "cfg-if", + "itoa", + "ryu", + "static_assertions", +] + [[package]] name = "concat-kdf" version = "0.1.0" @@ -6017,19 +6039,20 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.25.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5659e52e4ba6e07b2dad9f1158f578ef84a73762625ddb51536019f34d180eb" +checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80" dependencies = [ "bitflags 2.5.0", "cassowary", + "compact_str", "crossterm", "indoc", "itertools 0.12.1", "lru", "paste", "stability", - "strum 0.25.0", + "strum", "unicode-segmentation", "unicode-width", ] @@ -6262,7 +6285,6 @@ dependencies = [ "rand 0.8.5", "ratatui", "rayon", - "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-blockchain-tree", @@ -6294,7 +6316,6 @@ dependencies = [ "reth-rpc", "reth-rpc-api", "reth-rpc-builder", - "reth-rpc-engine-api", "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", @@ -6307,7 +6328,6 @@ dependencies = [ "serde_json", "similar-asserts", "tempfile", - "thiserror", "tikv-jemallocator", "tokio", "toml", @@ -6537,7 +6557,7 @@ dependencies = [ "rustc-hash", "serde", "serde_json", - "strum 0.26.2", + "strum", "tempfile", "test-fuzz", "thiserror", @@ -7412,7 +7432,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "strum 0.26.2", + "strum", "sucds", "tempfile", "test-fuzz", @@ -7446,7 +7466,7 @@ dependencies = [ "reth-rpc-types", "reth-trie", "revm", - "strum 0.26.2", + "strum", "tempfile", "tokio", "tokio-stream", @@ -7602,7 +7622,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "strum 0.26.2", + "strum", "thiserror", "tokio", "tower", @@ -8868,12 +8888,12 @@ checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" [[package]] name = "stability" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce" +checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 1.0.109", + "syn 2.0.60", ] [[package]] @@ -8912,35 +8932,13 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" -[[package]] -name = "strum" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" -dependencies = [ - "strum_macros 0.25.3", -] - [[package]] name = "strum" version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.26.2", -] - -[[package]] -name = "strum_macros" -version = "0.25.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.60", + "strum_macros", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d56392c1d3aa5..478c2b4536cb1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -310,6 +310,7 @@ bytes = "1.5" bitflags = "2.4" clap = "4" derive_more = "0.99.17" +fdlimit = "0.3.0" eyre = "0.6" tracing = "0.1.0" tracing-appender = "0.2" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 5e47506db9bfc..995b296107508 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -24,11 +24,9 @@ reth-stages.workspace = true reth-interfaces = { workspace = true, features = ["clap"] } reth-transaction-pool.workspace = true reth-beacon-consensus.workspace = true -reth-auto-seal-consensus.workspace = true reth-cli-runner.workspace = true reth-consensus-common.workspace = true reth-blockchain-tree.workspace = true -reth-rpc-engine-api.workspace = true reth-rpc-builder.workspace = true reth-rpc.workspace = true reth-rpc-types.workspace = true @@ -65,7 +63,7 @@ alloy-rlp.workspace = true tracing.workspace = true # io -fdlimit = "0.3.0" +fdlimit.workspace = true serde.workspace = true serde_json.workspace = true confy.workspace = true @@ -81,7 +79,7 @@ rand.workspace = true # tui comfy-table = "7.0" crossterm = "0.27.0" -ratatui = "0.25.0" +ratatui = { version = "0.26", default-features = false, features = ["crossterm"] } human_bytes = "0.4.1" # async @@ -104,7 +102,6 @@ itertools.workspace = true rayon.workspace = true boyer-moore-magiclen = "0.2.16" ahash = "0.8" -thiserror.workspace = true # p2p discv5.workspace = true @@ -136,10 +133,8 @@ optimism = [ "reth-revm/optimism", "reth-interfaces/optimism", "reth-rpc/optimism", - "reth-rpc-engine-api/optimism", "reth-provider/optimism", "reth-beacon-consensus/optimism", - "reth-auto-seal-consensus/optimism", "reth-blockchain-tree/optimism", "dep:reth-node-optimism", "reth-node-core/optimism", diff --git a/crates/node-builder/Cargo.toml b/crates/node-builder/Cargo.toml index aae73f5a63f93..270b0dfe5b634 100644 --- a/crates/node-builder/Cargo.toml +++ b/crates/node-builder/Cargo.toml @@ -51,6 +51,6 @@ tokio = { workspace = true, features = [ ## misc aquamarine.workspace = true eyre.workspace = true -fdlimit = "0.3.0" +fdlimit.workspace = true confy.workspace = true rayon.workspace = true From 2deb259ead0793740d6b1dc0535ef7afa3c3f80f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 17:09:28 +0200 Subject: [PATCH 365/700] feat: replace duplicate Withdrawal type with alloy (#7931) --- .../ethereum/engine-primitives/src/payload.rs | 14 +-- crates/payload/optimism/src/payload.rs | 11 +- crates/primitives/src/withdrawal.rs | 112 ++++++++++-------- crates/revm/src/state_change.rs | 2 +- crates/rpc/rpc-engine-api/tests/it/payload.rs | 10 +- crates/rpc/rpc-types-compat/src/engine/mod.rs | 5 +- .../rpc-types-compat/src/engine/payload.rs | 70 ++--------- 7 files changed, 82 insertions(+), 142 deletions(-) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index a6c47ebde9105..a354e0588844f 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -11,8 +11,7 @@ use reth_rpc_types::engine::{ PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, - convert_standalone_withdraw_to_withdrawal, try_block_to_payload_v1, + block_to_payload_v3, convert_block_to_payload_field_v2, try_block_to_payload_v1, }; use revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use std::convert::Infallible; @@ -159,22 +158,13 @@ impl EthPayloadBuilderAttributes { pub fn new(parent: B256, attributes: PayloadAttributes) -> Self { let id = payload_id(&parent, &attributes); - let withdraw = attributes.withdrawals.map(|withdrawals| { - Withdrawals::new( - withdrawals - .into_iter() - .map(convert_standalone_withdraw_to_withdrawal) // Removed the parentheses here - .collect(), - ) - }); - Self { id, parent, timestamp: attributes.timestamp, suggested_fee_recipient: attributes.suggested_fee_recipient, prev_randao: attributes.prev_randao, - withdrawals: withdraw.unwrap_or_default(), + withdrawals: attributes.withdrawals.unwrap_or_default().into(), parent_beacon_block_root: attributes.parent_beacon_block_root, } } diff --git a/crates/payload/optimism/src/payload.rs b/crates/payload/optimism/src/payload.rs index d753370fd2b73..b90d05d5f7e85 100644 --- a/crates/payload/optimism/src/payload.rs +++ b/crates/payload/optimism/src/payload.rs @@ -16,8 +16,7 @@ use reth_rpc_types::engine::{ OptimismPayloadAttributes, PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, - convert_standalone_withdraw_to_withdrawal, try_block_to_payload_v1, + block_to_payload_v3, convert_block_to_payload_field_v2, try_block_to_payload_v1, }; use revm::primitives::HandlerCfg; use std::sync::Arc; @@ -54,19 +53,13 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { (payload_id_optimism(&parent, &attributes, &transactions), transactions) }; - let withdraw = attributes.payload_attributes.withdrawals.map(|withdrawals| { - Withdrawals::new( - withdrawals.into_iter().map(convert_standalone_withdraw_to_withdrawal).collect(), - ) - }); - let payload_attributes = EthPayloadBuilderAttributes { id, parent, timestamp: attributes.payload_attributes.timestamp, suggested_fee_recipient: attributes.payload_attributes.suggested_fee_recipient, prev_randao: attributes.payload_attributes.prev_randao, - withdrawals: withdraw.unwrap_or_default(), + withdrawals: attributes.payload_attributes.withdrawals.unwrap_or_default().into(), parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, }; diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index a348b6a051c3e..e47b2816a80b9 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -1,51 +1,12 @@ -use crate::{constants::GWEI_TO_WEI, serde_helper::u64_via_ruint, Address}; -use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs::{main_codec, Compact}; -use std::{ - mem, - ops::{Deref, DerefMut}, -}; +//! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. -/// Withdrawal represents a validator withdrawal from the consensus layer. -#[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] -pub struct Withdrawal { - /// Monotonically increasing identifier issued by consensus layer. - #[serde(with = "u64_via_ruint")] - pub index: u64, - /// Index of validator associated with withdrawal. - #[serde(with = "u64_via_ruint", rename = "validatorIndex")] - pub validator_index: u64, - /// Target address for withdrawn ether. - pub address: Address, - /// Value of the withdrawal in gwei. - #[serde(with = "u64_via_ruint")] - pub amount: u64, -} +use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; +use reth_codecs::{main_codec, Compact}; +use std::ops::{Deref, DerefMut}; -impl Withdrawal { - /// Return the withdrawal amount in wei. - pub fn amount_wei(&self) -> u128 { - self.amount as u128 * GWEI_TO_WEI as u128 - } - - /// Calculate a heuristic for the in-memory size of the [Withdrawal]. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() - } -} - -impl From for Withdrawal { - fn from(withdrawal: reth_rpc_types::Withdrawal) -> Self { - Self { - index: withdrawal.index, - validator_index: withdrawal.index, - address: withdrawal.address, - amount: withdrawal.amount, - } - } -} +/// Re-export from `alloy_eips`. +#[doc(inline)] +pub use alloy_eips::eip4895::Withdrawal; /// Represents a collection of Withdrawals. #[main_codec] @@ -61,13 +22,13 @@ impl Withdrawals { /// Calculate the total size, including capacity, of the Withdrawals. #[inline] pub fn total_size(&self) -> usize { - self.size() + self.capacity() * std::mem::size_of::() + self.capacity() * std::mem::size_of::() } /// Calculate a heuristic for the in-memory size of the [Withdrawals]. #[inline] pub fn size(&self) -> usize { - self.iter().map(Withdrawal::size).sum() + self.len() * std::mem::size_of::() } /// Get an iterator over the Withdrawals. @@ -115,15 +76,45 @@ impl DerefMut for Withdrawals { } } -impl From> for Withdrawals { - fn from(withdrawals: Vec) -> Self { - Self(withdrawals.into_iter().map(Into::into).collect()) +impl From> for Withdrawals { + fn from(withdrawals: Vec) -> Self { + Self(withdrawals) } } #[cfg(test)] mod tests { use super::*; + use crate::{serde_helper::u64_via_ruint, Address}; + use alloy_rlp::{RlpDecodable, RlpEncodable}; + use proptest::proptest; + + /// This type is kept for compatibility tests after the codec support was added to alloy-eips + /// Withdrawal type natively + #[main_codec] + #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] + struct RethWithdrawal { + /// Monotonically increasing identifier issued by consensus layer. + #[serde(with = "u64_via_ruint")] + index: u64, + /// Index of validator associated with withdrawal. + #[serde(with = "u64_via_ruint", rename = "validatorIndex")] + validator_index: u64, + /// Target address for withdrawn ether. + address: Address, + /// Value of the withdrawal in gwei. + #[serde(with = "u64_via_ruint")] + amount: u64, + } + + impl PartialEq for RethWithdrawal { + fn eq(&self, other: &Withdrawal) -> bool { + self.index == other.index && + self.validator_index == other.validator_index && + self.address == other.address && + self.amount == other.amount + } + } // #[test] @@ -134,4 +125,23 @@ mod tests { let s = serde_json::to_string(&withdrawals).unwrap(); assert_eq!(input, s); } + + proptest!( + #[test] + fn test_roundtrip_withdrawal_compat(withdrawal: RethWithdrawal) { + // Convert to buffer and then create alloy_access_list from buffer and + // compare + let mut compacted_reth_withdrawal = Vec::::new(); + let len = withdrawal.clone().to_compact(&mut compacted_reth_withdrawal); + + // decode the compacted buffer to AccessList + let alloy_withdrawal = Withdrawal::from_compact(&compacted_reth_withdrawal, len).0; + assert_eq!(withdrawal, alloy_withdrawal); + + let mut compacted_alloy_withdrawal = Vec::::new(); + let alloy_len = alloy_withdrawal.to_compact(&mut compacted_alloy_withdrawal); + assert_eq!(len, alloy_len); + assert_eq!(compacted_reth_withdrawal, compacted_alloy_withdrawal); + } + ); } diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index 5d38c656e35b2..d2b0a6b5b3805 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -152,7 +152,7 @@ pub fn insert_post_block_withdrawals_balance_increments( for withdrawal in withdrawals.iter() { if withdrawal.amount > 0 { *balance_increments.entry(withdrawal.address).or_default() += - withdrawal.amount_wei(); + withdrawal.amount_wei().to::(); } } } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 8853b5c88b974..0979af400cca9 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -13,8 +13,8 @@ use reth_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, }; use reth_rpc_types_compat::engine::payload::{ - convert_standalone_withdraw_to_withdrawal, convert_to_payload_body_v1, try_block_to_payload, - try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block, + convert_to_payload_body_v1, try_block_to_payload, try_block_to_payload_v1, + try_into_sealed_block, try_payload_v1_to_block, }; fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { @@ -46,11 +46,7 @@ fn payload_body_roundtrip() { .map(|x| TransactionSigned::decode(&mut &x[..])) .collect::, _>>(), ); - let withdraw = payload_body.withdrawals.map(|withdrawals| { - Withdrawals::new( - withdrawals.into_iter().map(convert_standalone_withdraw_to_withdrawal).collect(), - ) - }); + let withdraw = payload_body.withdrawals.map(Withdrawals::new); assert_eq!(block.withdrawals, withdraw); } } diff --git a/crates/rpc/rpc-types-compat/src/engine/mod.rs b/crates/rpc/rpc-types-compat/src/engine/mod.rs index e03ba6f4cd8d4..e14b8350051ca 100644 --- a/crates/rpc/rpc-types-compat/src/engine/mod.rs +++ b/crates/rpc/rpc-types-compat/src/engine/mod.rs @@ -1,6 +1,3 @@ //! Standalone functions for engine specific rpc type conversions pub mod payload; -pub use payload::{ - convert_standalone_withdraw_to_withdrawal, convert_withdrawal_to_standalone_withdraw, - try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block, -}; +pub use payload::{try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block}; diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 46947530103c6..fdacab4e6225a 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -4,7 +4,7 @@ use reth_primitives::{ constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE, MIN_PROTOCOL_BASE_FEE_U256}, proofs::{self}, - Block, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawal, Withdrawals, B256, U256, + Block, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawals, B256, U256, }; use reth_rpc_types::engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, @@ -65,11 +65,8 @@ pub fn try_payload_v2_to_block(payload: ExecutionPayloadV2) -> Result ExecutionPayloadV1 { /// Converts [SealedBlock] to [ExecutionPayloadV2] pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { let transactions = value.raw_transactions(); - let standalone_withdrawals: Vec = value - .withdrawals - .clone() - .unwrap_or_default() - .into_iter() - .map(convert_withdrawal_to_standalone_withdraw) - .collect(); ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { @@ -149,7 +139,7 @@ pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { block_hash: value.hash(), transactions, }, - withdrawals: standalone_withdrawals, + withdrawals: value.withdrawals.unwrap_or_default().into_inner(), } } @@ -157,15 +147,9 @@ pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { let transactions = value.raw_transactions(); - let withdrawals: Vec = value - .withdrawals - .clone() - .unwrap_or_default() - .into_iter() - .map(convert_withdrawal_to_standalone_withdraw) - .collect(); - ExecutionPayloadV3 { + blob_gas_used: value.blob_gas_used.unwrap_or_default(), + excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), payload_inner: ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { parent_hash: value.parent_hash, @@ -183,11 +167,8 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { block_hash: value.hash(), transactions, }, - withdrawals, + withdrawals: value.withdrawals.unwrap_or_default().into_inner(), }, - - blob_gas_used: value.blob_gas_used.unwrap_or_default(), - excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), } } @@ -222,11 +203,8 @@ pub fn convert_payload_input_v2_to_payload(value: ExecutionPayloadInputV2) -> Ex /// Converts [SealedBlock] to [ExecutionPayloadInputV2] pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayloadInputV2 { - let withdraw = value.withdrawals.clone().map(|withdrawals| { - withdrawals.into_iter().map(convert_withdrawal_to_standalone_withdraw).collect::>() - }); ExecutionPayloadInputV2 { - withdrawals: withdraw, + withdrawals: value.withdrawals.clone().map(Withdrawals::into_inner), execution_payload: try_block_to_payload_v1(value), } } @@ -295,30 +273,6 @@ pub fn validate_block_hash( Ok(sealed_block) } -/// Converts [Withdrawal] to [reth_rpc_types::Withdrawal] -pub fn convert_withdrawal_to_standalone_withdraw( - withdrawal: Withdrawal, -) -> reth_rpc_types::Withdrawal { - reth_rpc_types::Withdrawal { - index: withdrawal.index, - validator_index: withdrawal.validator_index, - address: withdrawal.address, - amount: withdrawal.amount, - } -} - -/// Converts [reth_rpc_types::Withdrawal] to [Withdrawal] -pub fn convert_standalone_withdraw_to_withdrawal( - standalone: reth_rpc_types::Withdrawal, -) -> Withdrawal { - Withdrawal { - index: standalone.index, - validator_index: standalone.validator_index, - address: standalone.address, - amount: standalone.amount, - } -} - /// Converts [Block] to [ExecutionPayloadBodyV1] pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { let transactions = value.body.into_iter().map(|tx| { @@ -326,10 +280,10 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { tx.encode_enveloped(&mut out); out.into() }); - let withdraw: Option> = value.withdrawals.map(|withdrawals| { - withdrawals.into_iter().map(convert_withdrawal_to_standalone_withdraw).collect::>() - }); - ExecutionPayloadBodyV1 { transactions: transactions.collect(), withdrawals: withdraw } + ExecutionPayloadBodyV1 { + transactions: transactions.collect(), + withdrawals: value.withdrawals.map(Withdrawals::into_inner), + } } /// Transforms a [SealedBlock] into a [ExecutionPayloadV1] From 2b6921b1614f1dc5f63211b5516e393ea3496258 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 17:10:26 +0200 Subject: [PATCH 366/700] chore: replace fnv with fxhashmap (#7927) --- Cargo.lock | 2 +- Cargo.toml | 1 + crates/storage/db/Cargo.toml | 2 +- crates/transaction-pool/Cargo.toml | 2 +- crates/transaction-pool/src/identifier.rs | 4 ++-- crates/transaction-pool/src/pool/parked.rs | 4 ++-- crates/transaction-pool/src/pool/txpool.rs | 6 +++--- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 478d38713c892..3c1b2b70f8840 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7826,7 +7826,6 @@ dependencies = [ "auto_impl", "bitflags 2.5.0", "criterion", - "fnv", "futures-util", "itertools 0.12.1", "metrics", @@ -7844,6 +7843,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "revm", + "rustc-hash", "schnellru", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index 478c2b4536cb1..e66aa341314ea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -321,6 +321,7 @@ serde_with = "3.3.0" humantime = "2.1" humantime-serde = "1.1" rand = "0.8.5" +rustc-hash = "1.1.0" schnellru = "0.2" strum = "0.26" rayon = "1.7" diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 461a84f3e89a5..f816cc2c0bfa7 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -37,7 +37,7 @@ tempfile = { workspace = true, optional = true } derive_more.workspace = true eyre.workspace = true paste.workspace = true -rustc-hash = "1.1.0" +rustc-hash.workspace = true # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 4365245cf79fa..ebb6e497f9aad 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -36,9 +36,9 @@ metrics.workspace = true aquamarine.workspace = true thiserror.workspace = true tracing.workspace = true +rustc-hash.workspace = true schnellru.workspace = true serde = { workspace = true, features = ["derive", "rc"], optional = true } -fnv = "1.0.7" bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 6ec1527bd1408..4e4bec4d1b1bf 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -1,5 +1,5 @@ -use fnv::FnvHashMap; use reth_primitives::Address; +use rustc_hash::FxHashMap; use std::collections::HashMap; /// An internal mapping of addresses. @@ -13,7 +13,7 @@ pub(crate) struct SenderIdentifiers { /// Assigned `SenderId` for an `Address`. address_to_id: HashMap, /// Reverse mapping of `SenderId` to `Address`. - sender_to_address: FnvHashMap, + sender_to_address: FxHashMap, } impl SenderIdentifiers { diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 2815deaee7340..ef0766bed8d7f 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -3,7 +3,7 @@ use crate::{ pool::size::SizeTracker, PoolTransaction, SubPoolLimit, ValidPoolTransaction, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, }; -use fnv::FnvHashMap; +use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ cmp::Ordering, @@ -40,7 +40,7 @@ pub struct ParkedPool { last_sender_submission: BTreeSet, /// Keeps track of the number of transactions in the pool by the sender and the last submission /// id. - sender_transaction_count: FnvHashMap, + sender_transaction_count: FxHashMap, /// Keeps track of the size of this pool. /// /// See also [`PoolTransaction::size`]. diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index cdd897448e8e8..44a90f1cf912c 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -18,7 +18,6 @@ use crate::{ PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; -use fnv::FnvHashMap; use itertools::Itertools; use reth_primitives::{ constants::{ @@ -26,6 +25,7 @@ use reth_primitives::{ }, Address, TxHash, B256, }; +use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ cmp::Ordering, @@ -44,7 +44,7 @@ use tracing::trace; /// include_mmd!("docs/mermaid/txpool.mmd") pub struct TxPool { /// Contains the currently known information about the senders. - sender_info: FnvHashMap, + sender_info: FxHashMap, /// pending subpool /// /// Holds transactions that are ready to be executed on the current state. @@ -903,7 +903,7 @@ pub(crate) struct AllTransactions { /// _All_ transaction in the pool sorted by their sender and nonce pair. txs: BTreeMap>, /// Tracks the number of transactions by sender that are currently in the pool. - tx_counter: FnvHashMap, + tx_counter: FxHashMap, /// The current block number the pool keeps track of. last_seen_block_number: u64, /// The current block hash the pool keeps track of. From b3bac08f6849de639fbff353f8f0daab1eb5b0ee Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 28 Apr 2024 06:11:58 +0200 Subject: [PATCH 367/700] chore(deps): weekly `cargo update` (#7937) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 72 ++++++++++++++++++++++++++---------------------------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c1b2b70f8840..e14f5db80463e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -160,7 +160,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.6", + "winnow 0.6.7", ] [[package]] @@ -492,7 +492,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c8d6e74e4feeaa2bcfdecfd3da247ab53c67bd654ba1907270c32e02b142331" dependencies = [ - "winnow 0.6.6", + "winnow 0.6.7", ] [[package]] @@ -824,7 +824,7 @@ checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" dependencies = [ "concurrent-queue", "event-listener 5.3.0", - "event-listener-strategy 0.5.1", + "event-listener-strategy 0.5.2", "futures-core", "pin-project-lite", ] @@ -894,9 +894,9 @@ dependencies = [ [[package]] name = "async-task" -version = "4.7.0" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" @@ -959,7 +959,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0" dependencies = [ - "fastrand 2.0.2", + "fastrand 2.1.0", "futures-core", "pin-project", "tokio", @@ -1164,18 +1164,16 @@ dependencies = [ [[package]] name = "blocking" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" dependencies = [ "async-channel 2.2.1", "async-lock", "async-task", - "fastrand 2.0.2", "futures-io", "futures-lite 2.3.0", "piper", - "tracing", ] [[package]] @@ -1711,9 +1709,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -2839,9 +2837,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332f51cb23d20b0de8458b86580878211da09bcd4503cb579c225b3d124cabb3" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ "event-listener 5.3.0", "pin-project-lite", @@ -2945,9 +2943,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fastrlp" @@ -3013,9 +3011,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" dependencies = [ "crc32fast", "miniz_oxide", @@ -3660,7 +3658,7 @@ dependencies = [ "http 0.2.12", "hyper 0.14.28", "log", - "rustls 0.21.11", + "rustls 0.21.12", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", @@ -5578,7 +5576,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" dependencies = [ "atomic-waker", - "fastrand 2.0.2", + "fastrand 2.1.0", "futures-io", ] @@ -8198,9 +8196,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.11" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", @@ -8486,9 +8484,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.198" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" +checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" dependencies = [ "serde_derive", ] @@ -8504,9 +8502,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.198" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" +checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" dependencies = [ "proc-macro2", "quote", @@ -8589,9 +8587,9 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb86f9315df5df6a70eae0cc22395a44e544a0d8897586820770a35ede74449" +checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" dependencies = [ "futures", "log", @@ -8603,9 +8601,9 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9bb72430492e9549b0c4596725c0f82729bff861c45aa8099c0a8e67fc3b721" +checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", @@ -9091,7 +9089,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand 2.0.2", + "fastrand 2.1.0", "rustix", "windows-sys 0.52.0", ] @@ -9358,7 +9356,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.11", + "rustls 0.21.12", "tokio", ] @@ -9454,7 +9452,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.6", + "winnow 0.6.7", ] [[package]] @@ -9843,9 +9841,9 @@ checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" [[package]] name = "universal-hash" @@ -10320,9 +10318,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" +checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" dependencies = [ "memchr", ] From bf66a3dd27870e39ed4a99d1bbc6675e7bb4ff4d Mon Sep 17 00:00:00 2001 From: Elijah Hampton Date: Sun, 28 Apr 2024 05:44:18 -0400 Subject: [PATCH 368/700] Move network.rs example to its own folder (#7936) Co-authored-by: Elijah Hampton --- Cargo.lock | 34 ++++++------------ Cargo.toml | 2 +- examples/Cargo.toml | 30 ---------------- examples/README.md | 36 ++++++++++---------- examples/network/Cargo.toml | 13 +++++++ examples/{network.rs => network/src/main.rs} | 2 +- 6 files changed, 44 insertions(+), 73 deletions(-) delete mode 100644 examples/Cargo.toml create mode 100644 examples/network/Cargo.toml rename examples/{network.rs => network/src/main.rs} (96%) diff --git a/Cargo.lock b/Cargo.lock index e14f5db80463e..50fa59f017d3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2845,29 +2845,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "examples" -version = "0.0.0" -dependencies = [ - "async-trait", - "eyre", - "futures", - "reth-beacon-consensus", - "reth-blockchain-tree", - "reth-db", - "reth-network", - "reth-network-api", - "reth-primitives", - "reth-provider", - "reth-revm", - "reth-rpc-builder", - "reth-rpc-types", - "reth-rpc-types-compat", - "reth-tasks", - "reth-transaction-pool", - "tokio", -] - [[package]] name = "exex-minimal" version = "0.0.0" @@ -5066,6 +5043,17 @@ dependencies = [ "unsigned-varint 0.7.2", ] +[[package]] +name = "network" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "reth-network", + "reth-provider", + "tokio", +] + [[package]] name = "network-txpool" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index e66aa341314ea..7edc963128415 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,7 +72,6 @@ members = [ "crates/transaction-pool/", "crates/trie/", "crates/trie-parallel/", - "examples/", "examples/node-custom-rpc/", "examples/beacon-api-sse/", "examples/node-event-hooks/", @@ -82,6 +81,7 @@ members = [ "examples/custom-dev-node/", "examples/custom-payload-builder/", "examples/manual-p2p/", + "examples/network/", "examples/network-txpool/", "examples/rpc-db/", "examples/txpool-tracing/", diff --git a/examples/Cargo.toml b/examples/Cargo.toml deleted file mode 100644 index 02c5717864f14..0000000000000 --- a/examples/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "examples" -version = "0.0.0" -publish = false -edition.workspace = true -license.workspace = true - -[dev-dependencies] -reth-primitives.workspace = true -reth-db.workspace = true -reth-provider.workspace = true -reth-rpc-builder.workspace = true -reth-rpc-types.workspace = true -reth-rpc-types-compat.workspace = true -reth-revm.workspace = true -reth-blockchain-tree.workspace = true -reth-beacon-consensus.workspace = true -reth-network-api.workspace = true -reth-network.workspace = true -reth-transaction-pool.workspace = true -reth-tasks.workspace = true - -eyre.workspace = true -futures.workspace = true -async-trait.workspace = true -tokio.workspace = true - -[[example]] -name = "network" -path = "network.rs" \ No newline at end of file diff --git a/examples/README.md b/examples/README.md index 574efe9618556..ea2c87c1bb355 100644 --- a/examples/README.md +++ b/examples/README.md @@ -10,16 +10,16 @@ to make a PR! ## Node Builder -| Example | Description | -|---------------------------------------------------------------| ------------------------------------------------------------------------------------------------ | -| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | -| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | -| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | -| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | -| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | -| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | -| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | -| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | +| Example | Description | +| -------------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | +| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | +| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | +| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | +| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | +| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | +| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | +| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | ## ExEx @@ -36,22 +36,22 @@ to make a PR! ## Database -| Example | Description | -| --------------------------- | --------------------------------------------------------------- | +| Example | Description | +| ------------------------ | --------------------------------------------------------------- | | [DB access](./db-access) | Illustrates how to access Reth's database in a separate process | ## Network -| Example | Description | -| ---------------------------------- | ------------------------------------------------------------ | -| [Standalone network](./network.rs) | Illustrates how to use the network as a standalone component | +| Example | Description | +| ------------------------------- | ------------------------------------------------------------ | +| [Standalone network](./network) | Illustrates how to use the network as a standalone component | ## Mempool -| Example | Description | -|------------------------------------------------------| -------------------------------------------------------------------------------------------------------------------------- | +| Example | Description | +| ---------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | | [Trace pending transactions](./txpool-tracing) | Illustrates how to trace pending transactions as they arrive in the mempool | -| [Standalone txpool](./network-txpool) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | +| [Standalone txpool](./network-txpool) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | ## P2P diff --git a/examples/network/Cargo.toml b/examples/network/Cargo.toml new file mode 100644 index 0000000000000..b3b740dd8ff15 --- /dev/null +++ b/examples/network/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "network" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth-network.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +futures.workspace = true +tokio.workspace = true +eyre.workspace = true \ No newline at end of file diff --git a/examples/network.rs b/examples/network/src/main.rs similarity index 96% rename from examples/network.rs rename to examples/network/src/main.rs index 18bf5cbcf9814..16482ca1fae27 100644 --- a/examples/network.rs +++ b/examples/network/src/main.rs @@ -3,7 +3,7 @@ //! Run with //! //! ```not_rust -//! cargo run --example network +//! cargo run --release -p network //! ``` use futures::StreamExt; From 1e94d9007e4761fd40e25b0a98aeb6fb77c7b912 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 28 Apr 2024 12:20:30 +0200 Subject: [PATCH 369/700] chore: rm redunant withdrawal fn (#7940) --- crates/rpc/rpc-types-compat/src/block.rs | 25 ++++++------------------ 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 8c82686f91d08..b342f8a30b295 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -3,7 +3,7 @@ use crate::transaction::from_recovered_with_block_context; use alloy_rlp::Encodable; use reth_primitives::{ - Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, B256, U256, + Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, Withdrawals, B256, U256, }; use reth_rpc_types::{Block, BlockError, BlockTransactions, BlockTransactionsKind, Header}; @@ -144,17 +144,6 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) } } -fn from_primitive_withdrawal( - withdrawal: reth_primitives::Withdrawal, -) -> reth_rpc_types::Withdrawal { - reth_rpc_types::Withdrawal { - index: withdrawal.index, - address: withdrawal.address, - validator_index: withdrawal.validator_index, - amount: withdrawal.amount, - } -} - #[inline] fn from_block_with_transactions( block_length: usize, @@ -167,13 +156,11 @@ fn from_block_with_transactions( let mut header = from_primitive_with_hash(block.header.seal(block_hash)); header.total_difficulty = Some(total_difficulty); - let withdrawals = if header.withdrawals_root.is_some() { - block - .withdrawals - .map(|withdrawals| withdrawals.into_iter().map(from_primitive_withdrawal).collect()) - } else { - None - }; + let withdrawals = header + .withdrawals_root + .is_some() + .then(|| block.withdrawals.map(Withdrawals::into_inner)) + .flatten(); Block { header, From e18869f25b960feed2c5c776724dcbee7578731b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 28 Apr 2024 12:23:43 +0200 Subject: [PATCH 370/700] chore: rm redunant log fn (#7941) --- crates/rpc/rpc-types-compat/src/lib.rs | 1 - crates/rpc/rpc-types-compat/src/log.rs | 16 ---------------- 2 files changed, 17 deletions(-) delete mode 100644 crates/rpc/rpc-types-compat/src/log.rs diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs index 7aabf43234d93..99eff4fa79300 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -12,6 +12,5 @@ pub mod block; pub mod engine; -pub mod log; pub mod proof; pub mod transaction; diff --git a/crates/rpc/rpc-types-compat/src/log.rs b/crates/rpc/rpc-types-compat/src/log.rs deleted file mode 100644 index 2b6d33c428acd..0000000000000 --- a/crates/rpc/rpc-types-compat/src/log.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Compatibility functions for rpc `Log` type. - -/// Creates a new rpc Log from a primitive log type from DB -#[inline] -pub fn from_primitive_log(log: reth_primitives::Log) -> reth_rpc_types::Log { - reth_rpc_types::Log { - inner: log, - block_hash: None, - block_number: None, - block_timestamp: None, - transaction_hash: None, - transaction_index: None, - log_index: None, - removed: false, - } -} From f3ba80093b3eae1a7ca0b2df84b4aca9d3a928f9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 28 Apr 2024 12:32:32 +0200 Subject: [PATCH 371/700] chore: simplify accesslist extraction (#7942) --- .../rpc-types-compat/src/transaction/mod.rs | 78 ++----------------- 1 file changed, 8 insertions(+), 70 deletions(-) diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index d0f4672a29f73..a441c4c299945 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -1,13 +1,10 @@ //! Compatibility functions for rpc `Transaction` type. -use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; +use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; use reth_primitives::{ - BlockNumber, Transaction as PrimitiveTransaction, TransactionSignedEcRecovered, - TxKind as PrimitiveTransactionKind, TxType, B256, + BlockNumber, TransactionSignedEcRecovered, TxKind as PrimitiveTransactionKind, TxType, B256, }; -#[cfg(feature = "optimism")] -use reth_rpc_types::optimism::OptimismTransactionFields; -use reth_rpc_types::{AccessList, AccessListItem, Transaction}; +use reth_rpc_types::Transaction; use signature::from_primitive_signature; pub use typed::*; @@ -45,7 +42,7 @@ fn fill( transaction_index: Option, ) -> Transaction { let signer = tx.signer(); - let mut signed_tx = tx.into_signed(); + let signed_tx = tx.into_signed(); let to = match signed_tx.kind() { PrimitiveTransactionKind::Create => None, @@ -77,51 +74,8 @@ fn fill( // let chain_id = signed_tx.chain_id().map(U64::from); let chain_id = signed_tx.chain_id(); - let mut blob_versioned_hashes = None; - - #[allow(unreachable_patterns)] - let access_list = match &mut signed_tx.transaction { - PrimitiveTransaction::Legacy(_) => None, - PrimitiveTransaction::Eip2930(tx) => Some(AccessList( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - )), - PrimitiveTransaction::Eip1559(tx) => Some(AccessList( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - )), - PrimitiveTransaction::Eip4844(tx) => { - // extract the blob hashes from the transaction - blob_versioned_hashes = Some(std::mem::take(&mut tx.blob_versioned_hashes)); - - Some(AccessList( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - )) - } - _ => { - // OP deposit tx - None - } - }; + let blob_versioned_hashes = signed_tx.blob_versioned_hashes(); + let access_list = signed_tx.access_list().cloned(); let signature = from_primitive_signature(*signed_tx.signature(), signed_tx.tx_type(), signed_tx.chain_id()); @@ -151,7 +105,7 @@ fn fill( blob_versioned_hashes, // Optimism fields #[cfg(feature = "optimism")] - other: OptimismTransactionFields { + other: reth_rpc_types::optimism::OptimismTransactionFields { source_hash: signed_tx.source_hash(), mint: signed_tx.mint().map(reth_primitives::U128::from), is_system_tx: signed_tx.is_deposit().then_some(signed_tx.is_system_transaction()), @@ -162,22 +116,6 @@ fn fill( } } -/// Convert [reth_primitives::AccessList] to [reth_rpc_types::AccessList] -pub fn from_primitive_access_list( - access_list: reth_primitives::AccessList, -) -> reth_rpc_types::AccessList { - reth_rpc_types::AccessList( - access_list - .0 - .into_iter() - .map(|item| reth_rpc_types::AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.into_iter().map(|key| key.0.into()).collect(), - }) - .collect(), - ) -} - /// Convert [TransactionSignedEcRecovered] to [TransactionRequest] pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> TransactionRequest { let from = tx.signer(); @@ -187,7 +125,7 @@ pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> Transact let input = tx.transaction.input().clone(); let nonce = tx.transaction.nonce(); let chain_id = tx.transaction.chain_id(); - let access_list = tx.transaction.access_list().cloned().map(from_primitive_access_list); + let access_list = tx.transaction.access_list().cloned(); let max_fee_per_blob_gas = tx.transaction.max_fee_per_blob_gas(); let blob_versioned_hashes = tx.transaction.blob_versioned_hashes(); let tx_type = tx.transaction.tx_type(); From c535d59c8dada6029c9036560eadb5274e8fcd2e Mon Sep 17 00:00:00 2001 From: Qiwei Yang Date: Sun, 28 Apr 2024 21:15:35 +0800 Subject: [PATCH 372/700] refactor: extract GenesisAllocator type from primitives (#7932) Co-authored-by: Matthias Seitz --- Cargo.lock | 10 + Cargo.toml | 2 + crates/consensus/beacon/Cargo.toml | 1 + crates/consensus/beacon/src/engine/mod.rs | 6 +- crates/primitives/src/genesis.rs | 209 ------------------ testing/testing-utils/Cargo.toml | 17 ++ .../testing-utils/src/genesis_allocator.rs | 204 +++++++++++++++++ testing/testing-utils/src/lib.rs | 12 + 8 files changed, 248 insertions(+), 213 deletions(-) create mode 100644 testing/testing-utils/Cargo.toml create mode 100644 testing/testing-utils/src/genesis_allocator.rs create mode 100644 testing/testing-utils/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 50fa59f017d3e..54db17110766e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6395,6 +6395,7 @@ dependencies = [ "reth-stages-api", "reth-static-file", "reth-tasks", + "reth-testing-utils", "reth-tokio-util", "reth-tracing", "schnellru", @@ -7780,6 +7781,15 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "reth-testing-utils" +version = "0.2.0-beta.6" +dependencies = [ + "alloy-genesis", + "reth-primitives", + "secp256k1", +] + [[package]] name = "reth-tokio-util" version = "0.2.0-beta.6" diff --git a/Cargo.toml b/Cargo.toml index 7edc963128415..c970bf6a5392a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,6 +91,7 @@ members = [ "examples/exex/op-bridge/", "examples/db-access", "testing/ef-tests/", + "testing/testing-utils", ] default-members = ["bin/reth"] @@ -275,6 +276,7 @@ reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } reth-optimism-consensus = { path = "crates/optimism/consensus" } reth-node-events = { path = "crates/node/events" } +reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 38dd772af2cb2..4e35d06f0f49a 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -59,6 +59,7 @@ reth-downloaders.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-config.workspace = true +reth-testing-utils.workspace = true assert_matches.workspace = true diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 72fc972971cf7..428b95c0ba73c 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -2350,11 +2350,9 @@ mod tests { use super::*; use reth_db::test_utils::create_test_static_files_dir; use reth_interfaces::test_utils::generators::random_block; - use reth_primitives::{ - genesis::{Genesis, GenesisAllocator}, - Hardfork, U256, - }; + use reth_primitives::{genesis::Genesis, Hardfork, U256}; use reth_provider::test_utils::blocks::BlockchainTestData; + use reth_testing_utils::GenesisAllocator; #[tokio::test] async fn new_payload_before_forkchoice() { diff --git a/crates/primitives/src/genesis.rs b/crates/primitives/src/genesis.rs index 991b01bd7e653..c81d9488d7225 100644 --- a/crates/primitives/src/genesis.rs +++ b/crates/primitives/src/genesis.rs @@ -3,212 +3,3 @@ // re-export genesis types #[doc(inline)] pub use alloy_genesis::*; - -#[cfg(any(test, feature = "test-utils"))] -pub use allocator::GenesisAllocator; - -#[cfg(any(test, feature = "test-utils"))] -mod allocator { - use crate::{public_key_to_address, Address, Bytes, B256, U256}; - use alloy_genesis::GenesisAccount; - use secp256k1::{ - rand::{thread_rng, RngCore}, - Keypair, Secp256k1, - }; - use std::collections::{hash_map::Entry, BTreeMap, HashMap}; - - /// This helps create a custom genesis alloc by making it easy to add funded accounts with known - /// signers to the genesis block. - /// - /// # Example - /// ``` - /// # use reth_primitives::{ genesis::GenesisAllocator, Address, U256, hex, Bytes}; - /// # use std::str::FromStr; - /// let mut allocator = GenesisAllocator::default(); - /// - /// // This will add a genesis account to the alloc builder, with the provided balance. The - /// // signer for the account will be returned. - /// let (_signer, _addr) = allocator.new_funded_account(U256::from(100_000_000_000_000_000u128)); - /// - /// // You can also provide code for the account. - /// let code = Bytes::from_str("0x1234").unwrap(); - /// let (_second_signer, _second_addr) = - /// allocator.new_funded_account_with_code(U256::from(100_000_000_000_000_000u128), code); - /// - /// // You can also add an account with a specific address. - /// // This will not return a signer, since the address is provided by the user and the signer - /// // may be unknown. - /// let addr = "0Ac1dF02185025F65202660F8167210A80dD5086".parse::
().unwrap(); - /// allocator.add_funded_account_with_address(addr, U256::from(100_000_000_000_000_000u128)); - /// - /// // Once you're done adding accounts, you can build the alloc. - /// let alloc = allocator.build(); - /// ``` - #[derive(Debug)] - pub struct GenesisAllocator<'a> { - /// The genesis alloc to be built. - alloc: HashMap, - /// The rng to use for generating key pairs. - rng: Box, - } - - impl<'a> GenesisAllocator<'a> { - /// Initialize a new alloc builder with the provided rng. - pub fn new_with_rng(rng: &'a mut R) -> Self - where - R: RngCore + std::fmt::Debug, - { - Self { alloc: HashMap::default(), rng: Box::new(rng) } - } - - /// Use the provided rng for generating key pairs. - pub fn with_rng(mut self, rng: &'a mut R) -> Self - where - R: RngCore + std::fmt::Debug, - { - self.rng = Box::new(rng); - self - } - - /// Add a funded account to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_funded_account(&mut self, balance: U256) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); - - (pair, address) - } - - /// Add a funded account to the genesis alloc with the provided code. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_funded_account_with_code( - &mut self, - balance: U256, - code: Bytes, - ) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert( - address, - GenesisAccount::default().with_balance(balance).with_code(Some(code)), - ); - - (pair, address) - } - - /// Adds a funded account to the genesis alloc with the provided storage. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_funded_account_with_storage( - &mut self, - balance: U256, - storage: BTreeMap, - ) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert( - address, - GenesisAccount::default().with_balance(balance).with_storage(Some(storage)), - ); - - (pair, address) - } - - /// Adds an account with code and storage to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_account_with_code_and_storage( - &mut self, - code: Bytes, - storage: BTreeMap, - ) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert( - address, - GenesisAccount::default().with_code(Some(code)).with_storage(Some(storage)), - ); - - (pair, address) - } - - /// Adds an account with code to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_account_with_code(&mut self, code: Bytes) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert(address, GenesisAccount::default().with_code(Some(code))); - - (pair, address) - } - - /// Add a funded account to the genesis alloc with the provided address. - /// - /// Neither the key pair nor the account will be returned, since the address is provided by - /// the user and the signer may be unknown. - pub fn add_funded_account_with_address(&mut self, address: Address, balance: U256) { - self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); - } - - /// Adds the given [GenesisAccount] to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn add_account(&mut self, account: GenesisAccount) -> Address { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert(address, account); - - address - } - - /// Gets the account for the provided address. - /// - /// If it does not exist, this returns `None`. - pub fn get_account(&self, address: &Address) -> Option<&GenesisAccount> { - self.alloc.get(address) - } - - /// Gets a mutable version of the account for the provided address, if it exists. - pub fn get_account_mut(&mut self, address: &Address) -> Option<&mut GenesisAccount> { - self.alloc.get_mut(address) - } - - /// Gets an [Entry] for the provided address. - pub fn account_entry(&mut self, address: Address) -> Entry<'_, Address, GenesisAccount> { - self.alloc.entry(address) - } - - /// Build the genesis alloc. - pub fn build(self) -> HashMap { - self.alloc - } - } - - impl Default for GenesisAllocator<'_> { - fn default() -> Self { - Self { alloc: HashMap::default(), rng: Box::new(thread_rng()) } - } - } - - /// Helper trait that encapsulates [RngCore], and [Debug](std::fmt::Debug) to get around rules - /// for auto traits (Opt-in built-in traits). - trait RngDebug: RngCore + std::fmt::Debug {} - - impl RngDebug for T where T: RngCore + std::fmt::Debug {} -} diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml new file mode 100644 index 0000000000000..97a4c78dfcc37 --- /dev/null +++ b/testing/testing-utils/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "reth-testing-utils" +description = "Testing utils for reth." +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +secp256k1.workspace = true +alloy-genesis.workspace = true +reth-primitives.workspace = true diff --git a/testing/testing-utils/src/genesis_allocator.rs b/testing/testing-utils/src/genesis_allocator.rs new file mode 100644 index 0000000000000..33b7188d19119 --- /dev/null +++ b/testing/testing-utils/src/genesis_allocator.rs @@ -0,0 +1,204 @@ +//! Helps create a custom genesis alloc by making it easy to add funded accounts with known +//! signers to the genesis block. + +use alloy_genesis::GenesisAccount; +use reth_primitives::{public_key_to_address, Address, Bytes, B256, U256}; +use secp256k1::{ + rand::{thread_rng, RngCore}, + Keypair, Secp256k1, +}; +use std::collections::{hash_map::Entry, BTreeMap, HashMap}; + +/// This helps create a custom genesis alloc by making it easy to add funded accounts with known +/// signers to the genesis block. +/// +/// # Example +/// ``` +/// # use reth_primitives::{Address, U256, hex, Bytes}; +/// # use reth_testing_utils::GenesisAllocator; +/// # use std::str::FromStr; +/// let mut allocator = GenesisAllocator::default(); +/// +/// // This will add a genesis account to the alloc builder, with the provided balance. The +/// // signer for the account will be returned. +/// let (_signer, _addr) = allocator.new_funded_account(U256::from(100_000_000_000_000_000u128)); +/// +/// // You can also provide code for the account. +/// let code = Bytes::from_str("0x1234").unwrap(); +/// let (_second_signer, _second_addr) = +/// allocator.new_funded_account_with_code(U256::from(100_000_000_000_000_000u128), code); +/// +/// // You can also add an account with a specific address. +/// // This will not return a signer, since the address is provided by the user and the signer +/// // may be unknown. +/// let addr = "0Ac1dF02185025F65202660F8167210A80dD5086".parse::
().unwrap(); +/// allocator.add_funded_account_with_address(addr, U256::from(100_000_000_000_000_000u128)); +/// +/// // Once you're done adding accounts, you can build the alloc. +/// let alloc = allocator.build(); +/// ``` +#[derive(Debug)] +pub struct GenesisAllocator<'a> { + /// The genesis alloc to be built. + alloc: HashMap, + /// The rng to use for generating key pairs. + rng: Box, +} + +impl<'a> GenesisAllocator<'a> { + /// Initialize a new alloc builder with the provided rng. + pub fn new_with_rng(rng: &'a mut R) -> Self + where + R: RngCore + std::fmt::Debug, + { + Self { alloc: HashMap::default(), rng: Box::new(rng) } + } + + /// Use the provided rng for generating key pairs. + pub fn with_rng(mut self, rng: &'a mut R) -> Self + where + R: RngCore + std::fmt::Debug, + { + self.rng = Box::new(rng); + self + } + + /// Add a funded account to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_funded_account(&mut self, balance: U256) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); + + (pair, address) + } + + /// Add a funded account to the genesis alloc with the provided code. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_funded_account_with_code( + &mut self, + balance: U256, + code: Bytes, + ) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc + .insert(address, GenesisAccount::default().with_balance(balance).with_code(Some(code))); + + (pair, address) + } + + /// Adds a funded account to the genesis alloc with the provided storage. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_funded_account_with_storage( + &mut self, + balance: U256, + storage: BTreeMap, + ) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert( + address, + GenesisAccount::default().with_balance(balance).with_storage(Some(storage)), + ); + + (pair, address) + } + + /// Adds an account with code and storage to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_account_with_code_and_storage( + &mut self, + code: Bytes, + storage: BTreeMap, + ) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert( + address, + GenesisAccount::default().with_code(Some(code)).with_storage(Some(storage)), + ); + + (pair, address) + } + + /// Adds an account with code to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_account_with_code(&mut self, code: Bytes) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert(address, GenesisAccount::default().with_code(Some(code))); + + (pair, address) + } + + /// Add a funded account to the genesis alloc with the provided address. + /// + /// Neither the key pair nor the account will be returned, since the address is provided by + /// the user and the signer may be unknown. + pub fn add_funded_account_with_address(&mut self, address: Address, balance: U256) { + self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); + } + + /// Adds the given [GenesisAccount] to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn add_account(&mut self, account: GenesisAccount) -> Address { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert(address, account); + + address + } + + /// Gets the account for the provided address. + /// + /// If it does not exist, this returns `None`. + pub fn get_account(&self, address: &Address) -> Option<&GenesisAccount> { + self.alloc.get(address) + } + + /// Gets a mutable version of the account for the provided address, if it exists. + pub fn get_account_mut(&mut self, address: &Address) -> Option<&mut GenesisAccount> { + self.alloc.get_mut(address) + } + + /// Gets an [Entry] for the provided address. + pub fn account_entry(&mut self, address: Address) -> Entry<'_, Address, GenesisAccount> { + self.alloc.entry(address) + } + + /// Build the genesis alloc. + pub fn build(self) -> HashMap { + self.alloc + } +} + +impl Default for GenesisAllocator<'_> { + fn default() -> Self { + Self { alloc: HashMap::default(), rng: Box::new(thread_rng()) } + } +} + +/// Helper trait that encapsulates [RngCore], and [Debug](std::fmt::Debug) to get around rules +/// for auto traits (Opt-in built-in traits). +trait RngDebug: RngCore + std::fmt::Debug {} + +impl RngDebug for T where T: RngCore + std::fmt::Debug {} diff --git a/testing/testing-utils/src/lib.rs b/testing/testing-utils/src/lib.rs new file mode 100644 index 0000000000000..0cf98c6ff29a5 --- /dev/null +++ b/testing/testing-utils/src/lib.rs @@ -0,0 +1,12 @@ +//! Testing utilities. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod genesis_allocator; + +pub use genesis_allocator::GenesisAllocator; From ead0fbf8fc21b5983aa70f0e893d1c6254aa7995 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 28 Apr 2024 16:40:36 +0200 Subject: [PATCH 373/700] chore: rm anyhow dep (#7944) --- Cargo.lock | 1 - crates/primitives/Cargo.toml | 1 - crates/primitives/benches/integer_list.rs | 7 ++++--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54db17110766e..d20f04fa81d4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7389,7 +7389,6 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-trie", - "anyhow", "arbitrary", "assert_matches", "byteorder", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 4fa5046654ce6..deaee23006835 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -81,7 +81,6 @@ hash-db = "~0.15" plain_hasher = "0.2" sucds = "0.8.1" -anyhow = "1.0.75" # necessary so we don't hit a "undeclared 'std'": # https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 diff --git a/crates/primitives/benches/integer_list.rs b/crates/primitives/benches/integer_list.rs index 3945d48c91966..56b0e9e383d00 100644 --- a/crates/primitives/benches/integer_list.rs +++ b/crates/primitives/benches/integer_list.rs @@ -121,7 +121,8 @@ mod elias_fano { let mut builder = EliasFanoBuilder::new( list.as_ref().iter().max().map_or(0, |max| max + 1), list.as_ref().len(), - )?; + ) + .map_err(|err| EliasFanoError::InvalidInput(err.to_string()))?; builder.extend(list.as_ref().iter().copied()); Ok(Self(builder.build())) } @@ -241,8 +242,8 @@ mod elias_fano { #[derive(Debug, thiserror::Error)] pub enum EliasFanoError { /// The provided input is invalid. - #[error(transparent)] - InvalidInput(#[from] anyhow::Error), + #[error("{0}")] + InvalidInput(String), /// Failed to deserialize data into type. #[error("failed to deserialize data into type")] FailedDeserialize, From cf6d34cf3da62704ebcbfe74d953249727c3d069 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Sun, 28 Apr 2024 09:18:46 -0600 Subject: [PATCH 374/700] expose `PayloadTaskGuard` from `reth_basic_payload_builder` (#7945) --- crates/payload/basic/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 7903dfa8dea54..c89e8b94930a3 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -226,12 +226,13 @@ pub struct PrecachedState { /// Restricts how many generator tasks can be executed at once. #[derive(Debug, Clone)] -struct PayloadTaskGuard(Arc); +pub struct PayloadTaskGuard(Arc); // === impl PayloadTaskGuard === impl PayloadTaskGuard { - fn new(max_payload_tasks: usize) -> Self { + /// Constructs `Self` with a maximum task count of `max_payload_tasks`. + pub fn new(max_payload_tasks: usize) -> Self { Self(Arc::new(Semaphore::new(max_payload_tasks))) } } From 1f6753b84a3ea3f75a6bcb9134e03f7b286ea130 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 12:30:49 +0200 Subject: [PATCH 375/700] chore: mode node-builder to node dir (#7952) --- Cargo.toml | 4 ++-- crates/{node-builder => node/builder}/Cargo.toml | 0 crates/{node-builder => node/builder}/README.md | 0 .../{node-builder => node/builder}/docs/mermaid/builder.mmd | 0 crates/{node-builder => node/builder}/src/builder/mod.rs | 0 crates/{node-builder => node/builder}/src/builder/states.rs | 0 .../{node-builder => node/builder}/src/components/builder.rs | 0 crates/{node-builder => node/builder}/src/components/mod.rs | 0 .../{node-builder => node/builder}/src/components/network.rs | 0 .../{node-builder => node/builder}/src/components/payload.rs | 0 crates/{node-builder => node/builder}/src/components/pool.rs | 0 crates/{node-builder => node/builder}/src/exex.rs | 0 crates/{node-builder => node/builder}/src/handle.rs | 0 crates/{node-builder => node/builder}/src/hooks.rs | 0 crates/{node-builder => node/builder}/src/launch/common.rs | 0 crates/{node-builder => node/builder}/src/launch/mod.rs | 0 crates/{node-builder => node/builder}/src/lib.rs | 0 crates/{node-builder => node/builder}/src/node.rs | 0 crates/{node-builder => node/builder}/src/rpc.rs | 0 crates/{node-builder => node/builder}/src/setup.rs | 0 20 files changed, 2 insertions(+), 2 deletions(-) rename crates/{node-builder => node/builder}/Cargo.toml (100%) rename crates/{node-builder => node/builder}/README.md (100%) rename crates/{node-builder => node/builder}/docs/mermaid/builder.mmd (100%) rename crates/{node-builder => node/builder}/src/builder/mod.rs (100%) rename crates/{node-builder => node/builder}/src/builder/states.rs (100%) rename crates/{node-builder => node/builder}/src/components/builder.rs (100%) rename crates/{node-builder => node/builder}/src/components/mod.rs (100%) rename crates/{node-builder => node/builder}/src/components/network.rs (100%) rename crates/{node-builder => node/builder}/src/components/payload.rs (100%) rename crates/{node-builder => node/builder}/src/components/pool.rs (100%) rename crates/{node-builder => node/builder}/src/exex.rs (100%) rename crates/{node-builder => node/builder}/src/handle.rs (100%) rename crates/{node-builder => node/builder}/src/hooks.rs (100%) rename crates/{node-builder => node/builder}/src/launch/common.rs (100%) rename crates/{node-builder => node/builder}/src/launch/mod.rs (100%) rename crates/{node-builder => node/builder}/src/lib.rs (100%) rename crates/{node-builder => node/builder}/src/node.rs (100%) rename crates/{node-builder => node/builder}/src/rpc.rs (100%) rename crates/{node-builder => node/builder}/src/setup.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index c970bf6a5392a..70e36cd94e7d2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ members = [ "crates/engine-primitives/", "crates/ethereum/engine-primitives/", "crates/node-ethereum/", - "crates/node-builder/", + "crates/node/builder/", "crates/optimism/consensus", "crates/optimism/node/", "crates/optimism/evm/", @@ -223,7 +223,7 @@ reth-dns-discovery = { path = "crates/net/dns" } reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } -reth-node-builder = { path = "crates/node-builder" } +reth-node-builder = { path = "crates/node/builder" } reth-node-ethereum = { path = "crates/node-ethereum" } reth-node-optimism = { path = "crates/optimism/node" } reth-evm-optimism = { path = "crates/optimism/evm" } diff --git a/crates/node-builder/Cargo.toml b/crates/node/builder/Cargo.toml similarity index 100% rename from crates/node-builder/Cargo.toml rename to crates/node/builder/Cargo.toml diff --git a/crates/node-builder/README.md b/crates/node/builder/README.md similarity index 100% rename from crates/node-builder/README.md rename to crates/node/builder/README.md diff --git a/crates/node-builder/docs/mermaid/builder.mmd b/crates/node/builder/docs/mermaid/builder.mmd similarity index 100% rename from crates/node-builder/docs/mermaid/builder.mmd rename to crates/node/builder/docs/mermaid/builder.mmd diff --git a/crates/node-builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs similarity index 100% rename from crates/node-builder/src/builder/mod.rs rename to crates/node/builder/src/builder/mod.rs diff --git a/crates/node-builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs similarity index 100% rename from crates/node-builder/src/builder/states.rs rename to crates/node/builder/src/builder/states.rs diff --git a/crates/node-builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs similarity index 100% rename from crates/node-builder/src/components/builder.rs rename to crates/node/builder/src/components/builder.rs diff --git a/crates/node-builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs similarity index 100% rename from crates/node-builder/src/components/mod.rs rename to crates/node/builder/src/components/mod.rs diff --git a/crates/node-builder/src/components/network.rs b/crates/node/builder/src/components/network.rs similarity index 100% rename from crates/node-builder/src/components/network.rs rename to crates/node/builder/src/components/network.rs diff --git a/crates/node-builder/src/components/payload.rs b/crates/node/builder/src/components/payload.rs similarity index 100% rename from crates/node-builder/src/components/payload.rs rename to crates/node/builder/src/components/payload.rs diff --git a/crates/node-builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs similarity index 100% rename from crates/node-builder/src/components/pool.rs rename to crates/node/builder/src/components/pool.rs diff --git a/crates/node-builder/src/exex.rs b/crates/node/builder/src/exex.rs similarity index 100% rename from crates/node-builder/src/exex.rs rename to crates/node/builder/src/exex.rs diff --git a/crates/node-builder/src/handle.rs b/crates/node/builder/src/handle.rs similarity index 100% rename from crates/node-builder/src/handle.rs rename to crates/node/builder/src/handle.rs diff --git a/crates/node-builder/src/hooks.rs b/crates/node/builder/src/hooks.rs similarity index 100% rename from crates/node-builder/src/hooks.rs rename to crates/node/builder/src/hooks.rs diff --git a/crates/node-builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs similarity index 100% rename from crates/node-builder/src/launch/common.rs rename to crates/node/builder/src/launch/common.rs diff --git a/crates/node-builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs similarity index 100% rename from crates/node-builder/src/launch/mod.rs rename to crates/node/builder/src/launch/mod.rs diff --git a/crates/node-builder/src/lib.rs b/crates/node/builder/src/lib.rs similarity index 100% rename from crates/node-builder/src/lib.rs rename to crates/node/builder/src/lib.rs diff --git a/crates/node-builder/src/node.rs b/crates/node/builder/src/node.rs similarity index 100% rename from crates/node-builder/src/node.rs rename to crates/node/builder/src/node.rs diff --git a/crates/node-builder/src/rpc.rs b/crates/node/builder/src/rpc.rs similarity index 100% rename from crates/node-builder/src/rpc.rs rename to crates/node/builder/src/rpc.rs diff --git a/crates/node-builder/src/setup.rs b/crates/node/builder/src/setup.rs similarity index 100% rename from crates/node-builder/src/setup.rs rename to crates/node/builder/src/setup.rs From 1fbcdeb065b2549bccb8b131abbd08d6a270df84 Mon Sep 17 00:00:00 2001 From: Luca Provini Date: Mon, 29 Apr 2024 13:15:38 +0200 Subject: [PATCH 376/700] Tracking current stage on pipeline unwind (#6558) --- crates/node/events/src/node.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 32bf66e1b795d..2689226ea693e 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -217,6 +217,17 @@ impl NodeState { self.current_stage = None; } } + PipelineEvent::Unwind { stage_id, input } => { + let current_stage = CurrentStage { + stage_id, + eta: Eta::default(), + checkpoint: input.checkpoint, + target: Some(input.unwind_to), + entities_checkpoint: input.checkpoint.entities(), + }; + + self.current_stage = Some(current_stage); + } _ => (), } } From 0501a437110ef4b6ae3ca35b0270e1e51f2e8426 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Mon, 29 Apr 2024 05:34:11 -0600 Subject: [PATCH 377/700] feat: expose `PendingPayload` from `reth-basic-payload-builder` (#7946) --- crates/payload/basic/src/lib.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index c89e8b94930a3..4ee55b388e2fc 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -581,13 +581,23 @@ where /// A future that resolves to the result of the block building job. #[derive(Debug)] -struct PendingPayload

{ +pub struct PendingPayload

{ /// The marker to cancel the job on drop _cancel: Cancelled, /// The channel to send the result to. payload: oneshot::Receiver, PayloadBuilderError>>, } +impl

PendingPayload

{ + /// Constructs a `PendingPayload` future. + pub fn new( + cancel: Cancelled, + payload: oneshot::Receiver, PayloadBuilderError>>, + ) -> Self { + Self { _cancel: cancel, payload } + } +} + impl

Future for PendingPayload

{ type Output = Result, PayloadBuilderError>; From 3fc5cf646112e8d31d456f926fb07023af5829dc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 14:09:07 +0200 Subject: [PATCH 378/700] feat: use ComponentsBuilder as associated type in Node trait (#7957) --- crates/e2e-test-utils/src/lib.rs | 21 ++++---- crates/node-ethereum/src/node.rs | 19 +++----- crates/node-ethereum/tests/it/builder.rs | 8 +++ crates/node/builder/src/builder/mod.rs | 62 +++--------------------- crates/node/builder/src/node.rs | 24 +++------ crates/optimism/node/src/node.rs | 9 ++-- examples/custom-engine-types/src/main.rs | 15 +++--- 7 files changed, 51 insertions(+), 107 deletions(-) diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 8fdaa044b9012..aa7d46428bef8 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -6,8 +6,7 @@ use reth::{ }; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ - components::{Components, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, - FullNodeTypesAdapter, NodeAdapter, + components::NodeComponentsBuilder, FullNodeTypesAdapter, Node, NodeAdapter, RethFullAdapter, }; use reth_primitives::ChainSpec; use reth_provider::providers::BlockchainProvider; @@ -45,10 +44,7 @@ pub async fn setup( is_dev: bool, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + reth_node_builder::Node>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: NetworkBuilder, TmpPool>, - N::PayloadBuilder: PayloadServiceBuilder, TmpPool>, + N: Default + Node>, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -103,11 +99,14 @@ where // Type aliases type TmpDB = Arc>; -type TmpPool = <>>::PoolBuilder as PoolBuilder< - TmpNodeAdapter, ->>::Pool; type TmpNodeAdapter = FullNodeTypesAdapter>; +type Adapter = NodeAdapter< + RethFullAdapter, + <>>::ComponentsBuilder as NodeComponentsBuilder< + RethFullAdapter, + >>::Components, +>; + /// Type alias for a type of NodeHelper -pub type NodeHelperType = - NodeTestContext, Components, TmpPool>>>; +pub type NodeHelperType = NodeTestContext>; diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index e7caa927a5289..5a1a03554a0b1 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -5,8 +5,8 @@ use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGenera use reth_network::NetworkHandle; use reth_node_builder::{ components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, - node::{FullNodeTypes, Node, NodeTypes}, - BuilderContext, PayloadBuilderConfig, + node::{FullNodeTypes, NodeTypes}, + BuilderContext, Node, PayloadBuilderConfig, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::CanonStateSubscriptions; @@ -50,18 +50,11 @@ impl Node for EthereumNode where N: FullNodeTypes, { - type PoolBuilder = EthereumPoolBuilder; - type NetworkBuilder = EthereumNetworkBuilder; - type PayloadBuilder = EthereumPayloadBuilder; + type ComponentsBuilder = + ComponentsBuilder; - fn components( - self, - ) -> ComponentsBuilder { - ComponentsBuilder::default() - .node_types::() - .pool(EthereumPoolBuilder::default()) - .payload(EthereumPayloadBuilder::default()) - .network(EthereumNetworkBuilder::default()) + fn components_builder(self) -> Self::ComponentsBuilder { + Self::components() } } diff --git a/crates/node-ethereum/tests/it/builder.rs b/crates/node-ethereum/tests/it/builder.rs index 7cfc0d705b03b..1f45792367405 100644 --- a/crates/node-ethereum/tests/it/builder.rs +++ b/crates/node-ethereum/tests/it/builder.rs @@ -33,3 +33,11 @@ fn test_basic_setup() { }) .check_launch(); } + +#[test] +fn test_node_setup() { + let config = NodeConfig::test(); + let db = create_test_rw_db(); + let _builder = + NodeBuilder::new(config).with_database(db).node(EthereumNode::default()).check_launch(); +} diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 9649360ebe919..6365fca4de118 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -3,7 +3,7 @@ #![allow(clippy::type_complexity, missing_debug_implementations)] use crate::{ - components::{Components, ComponentsBuilder, NodeComponentsBuilder, PoolBuilder}, + components::NodeComponentsBuilder, node::FullNode, rpc::{RethRpcServerHandles, RpcContext}, DefaultNodeLauncher, Node, NodeHandle, @@ -204,28 +204,11 @@ where pub fn node( self, node: N, - ) -> NodeBuilderWithComponents< - RethFullAdapter, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - > + ) -> NodeBuilderWithComponents, N::ComponentsBuilder> where N: Node>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, { - self.with_types(node.clone()).with_components(node.components()) + self.with_types(node.clone()).with_components(node.components_builder()) } } @@ -271,33 +254,16 @@ where } /// Preconfigures the node with a specific node implementation. + /// + /// This is a convenience method that sets the node's types and components in one call. pub fn node( self, node: N, - ) -> WithLaunchContext< - NodeBuilderWithComponents< - RethFullAdapter, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - >, - > + ) -> WithLaunchContext, N::ComponentsBuilder>> where N: Node>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, { - self.with_types(node.clone()).with_components(node.components()) + self.with_types(node.clone()).with_components(node.components_builder()) } /// Launches a preconfigured [Node] @@ -312,24 +278,12 @@ where NodeHandle< NodeAdapter< RethFullAdapter, - Components< - RethFullAdapter, - >>::Pool, - >, + >>::Components, >, >, > where N: Node>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, { self.node(node).launch().await } diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 766bae14fe88c..7831f29d0b045 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -1,7 +1,4 @@ -use crate::{ - components::ComponentsBuilder, - rpc::{RethRpcServerHandles, RpcRegistry}, -}; +use crate::rpc::{RethRpcServerHandles, RpcRegistry}; use reth_network::NetworkHandle; use reth_node_api::FullNodeComponents; use reth_node_core::{ @@ -19,23 +16,18 @@ use reth_tasks::TaskExecutor; use std::sync::Arc; // re-export the node api types +use crate::components::NodeComponentsBuilder; pub use reth_node_api::{FullNodeTypes, NodeTypes}; -/// A [Node] is a [NodeTypes] that comes with preconfigured components. +/// A [crate::Node] is a [NodeTypes] that comes with preconfigured components. /// /// This can be used to configure the builder with a preset of components. -pub trait Node: NodeTypes + Clone { - /// The type that builds the node's pool. - type PoolBuilder; - /// The type that builds the node's network. - type NetworkBuilder; - /// The type that builds the node's payload service. - type PayloadBuilder; +pub trait Node: NodeTypes + Clone { + /// The type that builds the node's components. + type ComponentsBuilder: NodeComponentsBuilder; - /// Returns the [ComponentsBuilder] for the node. - fn components( - self, - ) -> ComponentsBuilder; + /// Returns a [NodeComponentsBuilder] for the node. + fn components_builder(self) -> Self::ComponentsBuilder; } /// The launched node with all components including RPC handlers. diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 0d6e4996a6010..8f6a3c19b47ad 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -55,13 +55,10 @@ impl Node for OptimismNode where N: FullNodeTypes, { - type PoolBuilder = OptimismPoolBuilder; - type NetworkBuilder = OptimismNetworkBuilder; - type PayloadBuilder = OptimismPayloadBuilder; + type ComponentsBuilder = + ComponentsBuilder; - fn components( - self, - ) -> ComponentsBuilder { + fn components_builder(self) -> Self::ComponentsBuilder { let Self { args } = self; Self::components(args) } diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index a2ade9cc10659..79639e1baa12b 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -202,13 +202,14 @@ impl Node for MyCustomNode where N: FullNodeTypes, { - type PoolBuilder = EthereumPoolBuilder; - type NetworkBuilder = EthereumNetworkBuilder; - type PayloadBuilder = CustomPayloadServiceBuilder; - - fn components( - self, - ) -> ComponentsBuilder { + type ComponentsBuilder = ComponentsBuilder< + N, + EthereumPoolBuilder, + CustomPayloadServiceBuilder, + EthereumNetworkBuilder, + >; + + fn components_builder(self) -> Self::ComponentsBuilder { ComponentsBuilder::default() .node_types::() .pool(EthereumPoolBuilder::default()) From 76e3aa9fa8db0f0e0c65821db95a712e0b5f4229 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 29 Apr 2024 14:54:15 +0200 Subject: [PATCH 379/700] feat: enable optimism hardforks on genesis parse (#7935) --- crates/primitives/src/chain/spec.rs | 97 +++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index a1ae18ad01ea3..4ce26f84b84e7 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -996,6 +996,9 @@ impl ChainSpec { impl From for ChainSpec { fn from(genesis: Genesis) -> Self { + #[cfg(feature = "optimism")] + let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + // Block-based hardforks let hardfork_opts = [ (Hardfork::Homestead, genesis.config.homestead_block), @@ -1011,6 +1014,8 @@ impl From for ChainSpec { (Hardfork::London, genesis.config.london_block), (Hardfork::ArrowGlacier, genesis.config.arrow_glacier_block), (Hardfork::GrayGlacier, genesis.config.gray_glacier_block), + #[cfg(feature = "optimism")] + (Hardfork::Bedrock, optimism_genesis_info.bedrock_block), ]; let mut hardforks = hardfork_opts .iter() @@ -1037,6 +1042,12 @@ impl From for ChainSpec { let time_hardfork_opts = [ (Hardfork::Shanghai, genesis.config.shanghai_time), (Hardfork::Cancun, genesis.config.cancun_time), + #[cfg(feature = "optimism")] + (Hardfork::Regolith, optimism_genesis_info.regolith_time), + #[cfg(feature = "optimism")] + (Hardfork::Ecotone, optimism_genesis_info.ecotone_time), + #[cfg(feature = "optimism")] + (Hardfork::Canyon, optimism_genesis_info.canyon_time), ]; let time_hardforks = time_hardfork_opts @@ -1691,6 +1702,42 @@ impl DepositContract { } } +#[cfg(feature = "optimism")] +struct OptimismGenesisInfo { + bedrock_block: Option, + regolith_time: Option, + ecotone_time: Option, + canyon_time: Option, +} + +#[cfg(feature = "optimism")] +impl OptimismGenesisInfo { + fn extract_from(genesis: &Genesis) -> Self { + Self { + bedrock_block: genesis + .config + .extra_fields + .get("bedrockBlock") + .and_then(|value| value.as_u64()), + regolith_time: genesis + .config + .extra_fields + .get("regolithTime") + .and_then(|value| value.as_u64()), + ecotone_time: genesis + .config + .extra_fields + .get("ecotoneTime") + .and_then(|value| value.as_u64()), + canyon_time: genesis + .config + .extra_fields + .get("canyonTime") + .and_then(|value| value.as_u64()), + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -3272,4 +3319,54 @@ Post-merge hard forks (timestamp based): fn is_bedrock_active() { assert!(!OP_MAINNET.is_bedrock_active_at_block(1)) } + + #[cfg(feature = "optimism")] + #[test] + fn parse_optimism_hardforks() { + let geth_genesis = r#" + { + "config": { + "bedrockBlock": 10, + "regolithTime": 20, + "ecotoneTime": 30, + "canyonTime": 40, + "optimism": { + "eip1559Elasticity": 50, + "eip1559Denominator": 60, + "eip1559DenominatorCanyon": 70 + } + } + } + "#; + let genesis: Genesis = serde_json::from_str(geth_genesis).unwrap(); + + let actual_bedrock_block = genesis.config.extra_fields.get("bedrockBlock"); + assert_eq!(actual_bedrock_block, Some(serde_json::Value::from(10)).as_ref()); + let actual_regolith_timestamp = genesis.config.extra_fields.get("regolithTime"); + assert_eq!(actual_regolith_timestamp, Some(serde_json::Value::from(20)).as_ref()); + let actual_ecotone_timestamp = genesis.config.extra_fields.get("ecotoneTime"); + assert_eq!(actual_ecotone_timestamp, Some(serde_json::Value::from(30)).as_ref()); + let actual_canyon_timestamp = genesis.config.extra_fields.get("canyonTime"); + assert_eq!(actual_canyon_timestamp, Some(serde_json::Value::from(40)).as_ref()); + + let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); + assert_eq!( + optimism_object, + &serde_json::json!({ + "eip1559Elasticity": 50, + "eip1559Denominator": 60, + "eip1559DenominatorCanyon": 70 + }) + ); + let chain_spec: ChainSpec = genesis.into(); + assert!(!chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 0)); + + assert!(chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 40)); + } } From 14ed7196c258545992efa478739a361f2fde7f1e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 29 Apr 2024 13:56:22 +0100 Subject: [PATCH 380/700] chore(storage): use `TAKE` const instead of `UNWIND` (#7959) --- .../provider/src/providers/database/provider.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ba85a4a4005ad..c96a059379f47 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -387,7 +387,7 @@ impl DatabaseProvider { /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - pub fn unwind_or_peek_state( + pub fn unwind_or_peek_state( &self, range: RangeInclusive, ) -> ProviderResult { @@ -408,8 +408,8 @@ impl DatabaseProvider { let storage_range = BlockNumberAddress::range(range.clone()); let storage_changeset = - self.get_or_take::(storage_range)?; - let account_changeset = self.get_or_take::(range)?; + self.get_or_take::(storage_range)?; + let account_changeset = self.get_or_take::(range)?; // iterate previous value and get plain state value to create changeset // Double option around Account represent if Account state is know (first option) and @@ -478,7 +478,7 @@ impl DatabaseProvider { .push(old_storage); } - if UNWIND { + if TAKE { // iterate over local plain state remove all account and all storages. for (address, (old_account, new_account, storage)) in state.iter() { // revert account if needed. @@ -515,7 +515,7 @@ impl DatabaseProvider { // iterate over block body and create ExecutionResult let mut receipt_iter = self - .get_or_take::(from_transaction_num..=to_transaction_num)? + .get_or_take::(from_transaction_num..=to_transaction_num)? .into_iter(); let mut receipts = Vec::new(); From af2da06a0dbc64ae9581ac3f1061eb6574c0158a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 15:18:23 +0200 Subject: [PATCH 381/700] chore: add required trait bounds to DB type (#7960) --- crates/node/api/src/node.rs | 13 ++++++++----- crates/node/builder/src/builder/mod.rs | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 3fd158b6a442c..db4bdd9617406 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,7 +1,10 @@ //! Traits for configuring a node use crate::{primitives::NodePrimitives, ConfigureEvm, EngineTypes}; -use reth_db::database::Database; +use reth_db::{ + database::Database, + database_metrics::{DatabaseMetadata, DatabaseMetrics}, +}; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::FullProvider; @@ -25,11 +28,11 @@ pub trait NodeTypes: Send + Sync + 'static { fn evm_config(&self) -> Self::Evm; } -/// A helper type that is downstream of the [NodeTypes] trait and adds stateful components to the +/// A helper trait that is downstream of the [NodeTypes] trait and adds stateful components to the /// node. pub trait FullNodeTypes: NodeTypes + 'static { - /// Underlying database type. - type DB: Database + Clone + 'static; + /// Underlying database type used by the node to store and retrieve data. + type DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static; /// The provider type used to interact with the node. type Provider: FullProvider; } @@ -71,7 +74,7 @@ impl FullNodeTypes for FullNodeTypesAdapter, - DB: Database + Clone + 'static, + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { type DB = DB; type Provider = Provider; diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 6365fca4de118..1da23fc04f794 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -187,7 +187,7 @@ impl NodeBuilder { impl NodeBuilder where - DB: Database + Unpin + Clone + 'static, + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { /// Configures the types of the node. pub fn with_types(self, types: T) -> NodeBuilderWithTypes> From b2c3d0c0b341c8e5e2762ea5f6a1ae018a1c521c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 15:21:07 +0200 Subject: [PATCH 382/700] chore: update codeowners (#7961) --- CODEOWNERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index be8243ea2c2c9..bd86e2e584412 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -7,7 +7,7 @@ crates/exex @onbjerg @shekhirin crates/metrics @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk -crates/node-builder/ @mattsse @Rjected @onbjerg +crates/node/ @mattsse @Rjected @onbjerg crates/node-core/ @mattsse @Rjected @onbjerg crates/node-ethereum/ @mattsse @Rjected crates/payload/ @mattsse @Rjected @@ -23,4 +23,5 @@ crates/tracing @onbjerg crates/transaction-pool/ @mattsse crates/trie @rkrasiuk crates/trie-parallel @rkrasiuk +crates/optimism @mattsse .github/ @onbjerg @gakonst @DaniPopes From ee70351751abe61da3bc9f3b52f2e47c839c2328 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 15:25:53 +0200 Subject: [PATCH 383/700] test: rm redundant helper trait (#7962) --- .../testing-utils/src/genesis_allocator.rs | 20 ++++++++++--------- testing/testing-utils/src/lib.rs | 1 + 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/testing/testing-utils/src/genesis_allocator.rs b/testing/testing-utils/src/genesis_allocator.rs index 33b7188d19119..067f68343a4b0 100644 --- a/testing/testing-utils/src/genesis_allocator.rs +++ b/testing/testing-utils/src/genesis_allocator.rs @@ -7,7 +7,10 @@ use secp256k1::{ rand::{thread_rng, RngCore}, Keypair, Secp256k1, }; -use std::collections::{hash_map::Entry, BTreeMap, HashMap}; +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap}, + fmt, +}; /// This helps create a custom genesis alloc by making it easy to add funded accounts with known /// signers to the genesis block. @@ -37,19 +40,18 @@ use std::collections::{hash_map::Entry, BTreeMap, HashMap}; /// // Once you're done adding accounts, you can build the alloc. /// let alloc = allocator.build(); /// ``` -#[derive(Debug)] pub struct GenesisAllocator<'a> { /// The genesis alloc to be built. alloc: HashMap, /// The rng to use for generating key pairs. - rng: Box, + rng: Box, } impl<'a> GenesisAllocator<'a> { /// Initialize a new alloc builder with the provided rng. pub fn new_with_rng(rng: &'a mut R) -> Self where - R: RngCore + std::fmt::Debug, + R: RngCore, { Self { alloc: HashMap::default(), rng: Box::new(rng) } } @@ -197,8 +199,8 @@ impl Default for GenesisAllocator<'_> { } } -/// Helper trait that encapsulates [RngCore], and [Debug](std::fmt::Debug) to get around rules -/// for auto traits (Opt-in built-in traits). -trait RngDebug: RngCore + std::fmt::Debug {} - -impl RngDebug for T where T: RngCore + std::fmt::Debug {} +impl fmt::Debug for GenesisAllocator<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GenesisAllocator").field("alloc", &self.alloc).finish_non_exhaustive() + } +} diff --git a/testing/testing-utils/src/lib.rs b/testing/testing-utils/src/lib.rs index 0cf98c6ff29a5..27b54b19e5ba4 100644 --- a/testing/testing-utils/src/lib.rs +++ b/testing/testing-utils/src/lib.rs @@ -6,6 +6,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] pub mod genesis_allocator; From a8cd1f71a03c773c24659fc28bfed2ba5f2bd97b Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:32:42 +0100 Subject: [PATCH 384/700] chore: add test to `Compact` derived types to avoid backwards incompatibilities (#7822) Co-authored-by: Oliver Nordbjerg Co-authored-by: Oliver Nordbjerg --- .../codecs/derive/src/compact/flags.rs | 25 ++++- .../storage/codecs/derive/src/compact/mod.rs | 32 ++++--- crates/storage/codecs/src/alloy/withdrawal.rs | 18 ++++ crates/storage/db/Cargo.toml | 12 ++- .../storage/db/src/tables/codecs/compact.rs | 92 +++++++++++++++++++ 5 files changed, 163 insertions(+), 16 deletions(-) diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index 650d97ea82412..24757d8e6b91c 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -36,7 +36,7 @@ pub(crate) fn generate_flag_struct( }; if total_bits == 0 { - return placeholder_flag_struct(&flags_ident) + return placeholder_flag_struct(ident, &flags_ident) } let (total_bytes, unused_bits) = pad_flag_struct(total_bits, &mut field_flags); @@ -51,9 +51,16 @@ pub(crate) fn generate_flag_struct( let docs = format!("Fieldset that facilitates compacting the parent type. Used bytes: {total_bytes} | Unused bits: {unused_bits}"); + let bitflag_encoded_bytes = format!("Used bytes by [`{flags_ident}`]"); // Generate the flag struct. quote! { + impl #ident { + #[doc = #bitflag_encoded_bytes] + pub const fn bitflag_encoded_bytes() -> usize { + #total_bytes as usize + } + } pub use #mod_flags_ident::#flags_ident; #[allow(non_snake_case)] mod #mod_flags_ident { @@ -146,8 +153,22 @@ fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec) -> (u8, } /// Placeholder struct for when there are no bitfields to be added. -fn placeholder_flag_struct(flags: &Ident) -> TokenStream2 { +fn placeholder_flag_struct(ident: &Ident, flags: &Ident) -> TokenStream2 { + let bitflag_encoded_bytes = format!("Used bytes by [`{flags}`]"); + let bitflag_unused_bits = format!("Unused bits for new fields by [`{flags}`]"); quote! { + impl #ident { + #[doc = #bitflag_encoded_bytes] + pub const fn bitflag_encoded_bytes() -> usize { + 0 + } + + #[doc = #bitflag_unused_bits] + pub const fn bitflag_unused_bits() -> usize { + 0 + } + } + /// Placeholder struct for when there is no need for a fieldset. Doesn't actually write or read any data. #[derive(Debug, Default)] pub struct #flags { diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index 7614fa8328411..e67adb6fd99a1 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -185,18 +185,18 @@ mod tests { #[test] fn gen() { let f_struct = quote! { - #[derive(Debug, PartialEq, Clone)] - pub struct TestStruct { - f_u64: u64, - f_u256: U256, - f_bool_t: bool, - f_bool_f: bool, - f_option_none: Option, - f_option_some: Option, - f_option_some_u64: Option, - f_vec_empty: Vec, - f_vec_some: Vec

, - } + #[derive(Debug, PartialEq, Clone)] + pub struct TestStruct { + f_u64: u64, + f_u256: U256, + f_bool_t: bool, + f_bool_f: bool, + f_option_none: Option, + f_option_some: Option, + f_option_some_u64: Option, + f_vec_empty: Vec, + f_vec_some: Vec
, + } }; // Generate code that will impl the `Compact` trait. @@ -208,7 +208,15 @@ mod tests { // Expected output in a TokenStream format. Commas matter! let should_output = quote! { + impl TestStruct { + #[doc = "Used bytes by [`TestStructFlags`]"] + pub const fn bitflag_encoded_bytes() -> usize { + 2u8 as usize + } + } + pub use TestStruct_flags::TestStructFlags; + #[allow(non_snake_case)] mod TestStruct_flags { use bytes::Buf; diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 0849b7e4a49fe..5cdc1a6675c3a 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -59,4 +59,22 @@ mod tests { assert_eq!(withdrawal, decoded) } } + + // each value in the database has an extra field named flags that encodes metadata about other + // fields in the value, e.g. offset and length. + // + // this check is to ensure we do not inadvertently add too many fields to a struct which would + // expand the flags field and break backwards compatibility + #[test] + fn test_ensure_backwards_compatibility() { + #[cfg(not(feature = "optimism"))] + { + assert_eq!(Withdrawal::bitflag_encoded_bytes(), 2); + } + + #[cfg(feature = "optimism")] + { + assert_eq!(Withdrawal::bitflag_encoded_bytes(), 2); + } + } } diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index f816cc2c0bfa7..97b556346d7c4 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -16,7 +16,10 @@ workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-codecs.workspace = true -reth-libmdbx = { workspace = true, optional = true, features = ["return-borrowed", "read-tx-timeouts"] } +reth-libmdbx = { workspace = true, optional = true, features = [ + "return-borrowed", + "read-tx-timeouts", +] } reth-nippy-jar.workspace = true reth-tracing.workspace = true @@ -58,7 +61,11 @@ serde_json.workspace = true tempfile.workspace = true test-fuzz.workspace = true -pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } +pprof = { workspace = true, features = [ + "flamegraph", + "frame-pointer", + "criterion", +] } criterion.workspace = true iai-callgrind = "0.10.2" @@ -81,6 +88,7 @@ arbitrary = [ "dep:proptest", "dep:proptest-derive", ] +optimism = [] [[bench]] name = "hash_keys" diff --git a/crates/storage/db/src/tables/codecs/compact.rs b/crates/storage/db/src/tables/codecs/compact.rs index c302c6a4823b1..452f5c6324474 100644 --- a/crates/storage/db/src/tables/codecs/compact.rs +++ b/crates/storage/db/src/tables/codecs/compact.rs @@ -121,3 +121,95 @@ macro_rules! add_wrapper_struct { add_wrapper_struct!((U256, CompactU256)); add_wrapper_struct!((u64, CompactU64)); add_wrapper_struct!((ClientVersion, CompactClientVersion)); + +#[cfg(test)] +mod tests { + use crate::{ + codecs::{ + compact::{CompactClientVersion, CompactU64}, + CompactU256, + }, + models::{StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, + }; + use reth_primitives::{ + stage::{ + AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, + ExecutionCheckpoint, HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, + StageUnitCheckpoint, StorageHashingCheckpoint, + }, + Account, Header, PruneCheckpoint, PruneMode, PruneSegment, Receipt, ReceiptWithBloom, + SealedHeader, TxEip1559, TxEip2930, TxEip4844, TxLegacy, Withdrawals, + }; + + // each value in the database has an extra field named flags that encodes metadata about other + // fields in the value, e.g. offset and length. + // + // this check is to ensure we do not inadvertently add too many fields to a struct which would + // expand the flags field and break backwards compatibility + #[test] + fn test_ensure_backwards_compatibility() { + #[cfg(not(feature = "optimism"))] + { + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 1); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); + } + + #[cfg(feature = "optimism")] + { + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 2); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); + } + } +} From fd8fdcfd4ba2d830f2795c7c17f7f5bfa49ba388 Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Mon, 29 Apr 2024 23:30:42 +0800 Subject: [PATCH 385/700] refactor: remove futureUnordered in ipc (#7920) --- crates/rpc/ipc/src/server/connection.rs | 13 +- crates/rpc/ipc/src/server/future.rs | 27 +-- crates/rpc/ipc/src/server/ipc.rs | 3 +- crates/rpc/ipc/src/server/mod.rs | 237 ++++++++++++++++-------- 4 files changed, 162 insertions(+), 118 deletions(-) diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index 05f7a53a9d787..2aadc6e2bca0d 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -9,7 +9,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; -use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; +use tokio::io::{AsyncRead, AsyncWrite}; use tokio_util::codec::Framed; use tower::Service; @@ -18,17 +18,6 @@ pub(crate) type JsonRpcStream = Framed; #[pin_project::pin_project] pub(crate) struct IpcConn(#[pin] pub(crate) T); -impl IpcConn> -where - T: AsyncRead + AsyncWrite + Unpin, -{ - /// Create a response for when the server is busy and can't accept more requests. - pub(crate) async fn reject_connection(self) { - let mut parts = self.0.into_parts(); - let _ = parts.io.write_all(b"Too many connections. Please try again later.").await; - } -} - impl Stream for IpcConn> where T: AsyncRead + AsyncWrite, diff --git a/crates/rpc/ipc/src/server/future.rs b/crates/rpc/ipc/src/server/future.rs index f807af4499316..85c69c2a64b01 100644 --- a/crates/rpc/ipc/src/server/future.rs +++ b/crates/rpc/ipc/src/server/future.rs @@ -27,8 +27,7 @@ //! Utilities for handling async code. use std::sync::Arc; - -use tokio::sync::{watch, OwnedSemaphorePermit, Semaphore, TryAcquireError}; +use tokio::sync::watch; #[derive(Debug, Clone)] pub(crate) struct StopHandle(watch::Receiver<()>); @@ -59,27 +58,3 @@ impl ServerHandle { self.0.closed().await } } - -/// Limits the number of connections. -pub(crate) struct ConnectionGuard(Arc); - -impl ConnectionGuard { - pub(crate) fn new(limit: usize) -> Self { - Self(Arc::new(Semaphore::new(limit))) - } - - pub(crate) fn try_acquire(&self) -> Option { - match self.0.clone().try_acquire_owned() { - Ok(guard) => Some(guard), - Err(TryAcquireError::Closed) => { - unreachable!("Semaphore::Close is never called and can't be closed") - } - Err(TryAcquireError::NoPermits) => None, - } - } - - #[allow(dead_code)] - pub(crate) fn available_connections(&self) -> usize { - self.0.available_permits() - } -} diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index daf7d1dc0e626..c73d9bb93674b 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -1,7 +1,5 @@ //! IPC request handling adapted from [`jsonrpsee`] http request handling -use std::sync::Arc; - use futures::{stream::FuturesOrdered, StreamExt}; use jsonrpsee::{ batch_response_error, @@ -17,6 +15,7 @@ use jsonrpsee::{ }, BatchResponseBuilder, MethodResponse, ResponsePayload, }; +use std::sync::Arc; use tokio::sync::OwnedSemaphorePermit; use tokio_util::either::Either; use tracing::instrument; diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 7239249e1c194..ed0eadb4a6df5 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -2,16 +2,17 @@ use crate::server::{ connection::{IpcConn, JsonRpcStream}, - future::{ConnectionGuard, StopHandle}, + future::StopHandle, }; use futures::StreamExt; -use futures_util::{future::Either, stream::FuturesUnordered}; +use futures_util::{future::Either, AsyncWriteExt}; use interprocess::local_socket::tokio::{LocalSocketListener, LocalSocketStream}; use jsonrpsee::{ core::TEN_MB_SIZE_BYTES, server::{ middleware::rpc::{RpcLoggerLayer, RpcServiceT}, - AlreadyStoppedError, IdProvider, RandomIntegerIdProvider, + AlreadyStoppedError, ConnectionGuard, ConnectionPermit, IdProvider, + RandomIntegerIdProvider, }, BoundedSubscriptions, MethodSink, Methods, }; @@ -24,10 +25,10 @@ use std::{ }; use tokio::{ io::{AsyncRead, AsyncWrite}, - sync::{oneshot, watch, OwnedSemaphorePermit}, + sync::{oneshot, watch}, }; use tower::{layer::util::Identity, Layer, Service}; -use tracing::{debug, trace, warn, Instrument}; +use tracing::{debug, instrument, trace, warn, Instrument}; // re-export so can be used during builder setup use crate::{ server::{ @@ -150,68 +151,44 @@ where // signal that we're ready to accept connections on_ready.send(Ok(())).ok(); - let message_buffer_capacity = self.cfg.message_buffer_capacity; - let max_request_body_size = self.cfg.max_request_body_size; - let max_response_body_size = self.cfg.max_response_body_size; - let max_log_length = self.cfg.max_log_length; - let id_provider = self.id_provider; - let max_subscriptions_per_connection = self.cfg.max_subscriptions_per_connection; - let mut id: u32 = 0; let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize); - let mut connections = FuturesUnordered::new(); let stopped = stop_handle.clone().shutdown(); tokio::pin!(stopped); + let (drop_on_completion, mut process_connection_awaiter) = mpsc::channel::<()>(1); + trace!("accepting ipc connections"); loop { match try_accept_conn(&listener, stopped).await { AcceptConnection::Established { local_socket_stream, stop } => { - trace!("established new connection"); - let ipc = IpcConn(tokio_util::codec::Decoder::framed( - StreamCodec::stream_incoming(), - local_socket_stream.compat(), - )); - - let conn = match connection_guard.try_acquire() { - Some(conn) => conn, - None => { - warn!("Too many IPC connections. Please try again later."); - connections.push(tokio::spawn(ipc.reject_connection().in_current_span())); - stopped = stop; - continue; - } + let Some(conn_permit) = connection_guard.try_acquire() else { + let (mut _reader, mut writer) = local_socket_stream.into_split(); + let _ = writer.write_all(b"Too many connections. Please try again later.").await; + drop((_reader, writer)); + stopped = stop; + continue; }; - let (tx, rx) = mpsc::channel::(message_buffer_capacity as usize); - let method_sink = MethodSink::new_with_limit(tx, max_response_body_size); - let tower_service = TowerServiceNoHttp { - inner: ServiceData { - methods: methods.clone(), - max_request_body_size, - max_response_body_size, - max_log_length, - id_provider: id_provider.clone(), - stop_handle: stop_handle.clone(), - max_subscriptions_per_connection, - conn_id: id, - conn: Arc::new(conn), - bounded_subscriptions: BoundedSubscriptions::new( - max_subscriptions_per_connection, - ), - method_sink, - }, - rpc_middleware: self.rpc_middleware.clone(), - }; + let max_conns = connection_guard.max_connections(); + let curr_conns = max_conns - connection_guard.available_connections(); + trace!("Accepting new connection {}/{}", curr_conns, max_conns); + + let conn_permit = Arc::new(conn_permit); - let service = self.http_middleware.service(tower_service); - connections.push(tokio::spawn(process_connection( - ipc, - service, - stop_handle.clone(), - rx, - ).in_current_span())); + process_connection(ProcessConnection{ + http_middleware: &self.http_middleware, + rpc_middleware: self.rpc_middleware.clone(), + conn_permit, + conn_id: id, + server_cfg: self.cfg.clone(), + stop_handle: stop_handle.clone(), + drop_on_completion: drop_on_completion.clone(), + methods: methods.clone(), + id_provider: self.id_provider.clone(), + local_socket_stream, + }); id = id.wrapping_add(1); stopped = stop; @@ -224,11 +201,14 @@ where } } - // FuturesUnordered won't poll anything until this line but because the - // tasks are spawned (so that they can progress independently) - // then this just makes sure that all tasks are completed before - // returning from this function. - while connections.next().await.is_some() {} + // Drop the last Sender + drop(drop_on_completion); + + // Once this channel is closed it is safe to assume that all connections have been gracefully shutdown + while process_connection_awaiter.recv().await.is_some() { + // Generally, messages should not be sent across this channel, + // but we'll loop here to wait for `None` just to be on the safe side + } } } @@ -279,30 +259,22 @@ pub struct IpcServerStartError { pub(crate) struct ServiceData { /// Registered server methods. pub(crate) methods: Methods, - /// Max request body size. - pub(crate) max_request_body_size: u32, - /// Max request body size. - pub(crate) max_response_body_size: u32, - /// Max length for logging for request and response - /// - /// Logs bigger than this limit will be truncated. - pub(crate) max_log_length: u32, /// Subscription ID provider. pub(crate) id_provider: Arc, /// Stop handle. pub(crate) stop_handle: StopHandle, - /// Max subscriptions per connection. - pub(crate) max_subscriptions_per_connection: u32, /// Connection ID pub(crate) conn_id: u32, - /// Handle to hold a `connection permit`. - pub(crate) conn: Arc, + /// Connection Permit. + pub(crate) conn_permit: Arc, /// Limits the number of subscriptions for this connection pub(crate) bounded_subscriptions: BoundedSubscriptions, /// Sink that is used to send back responses to the connection. /// /// This is used for subscriptions. pub(crate) method_sink: MethodSink, + /// ServerConfig + pub(crate) server_cfg: Settings, } /// Similar to [`tower::ServiceBuilder`] but doesn't @@ -407,21 +379,21 @@ where let cfg = RpcServiceCfg::CallsAndSubscriptions { bounded_subscriptions: BoundedSubscriptions::new( - self.inner.max_subscriptions_per_connection, + self.inner.server_cfg.max_subscriptions_per_connection, ), id_provider: self.inner.id_provider.clone(), sink: self.inner.method_sink.clone(), }; - let max_response_body_size = self.inner.max_response_body_size as usize; - let max_request_body_size = self.inner.max_request_body_size as usize; + let max_response_body_size = self.inner.server_cfg.max_response_body_size as usize; + let max_request_body_size = self.inner.server_cfg.max_request_body_size as usize; + let conn = self.inner.conn_permit.clone(); let rpc_service = self.rpc_middleware.service(RpcService::new( self.inner.methods.clone(), max_response_body_size, self.inner.conn_id as usize, cfg, )); - let conn = self.inner.conn.clone(); // an ipc connection needs to handle read+write concurrently // even if the underlying rpc handler spawns the actual work or is does a lot of async any // additional overhead performed by `handle_request` can result in I/O latencies, for @@ -443,9 +415,81 @@ where } } +struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> { + http_middleware: &'a tower::ServiceBuilder, + rpc_middleware: RpcServiceBuilder, + conn_permit: Arc, + conn_id: u32, + server_cfg: Settings, + stop_handle: StopHandle, + drop_on_completion: mpsc::Sender<()>, + methods: Methods, + id_provider: Arc, + local_socket_stream: LocalSocketStream, +} + /// Spawns the IPC connection onto a new task -async fn process_connection( - conn: IpcConn>, +#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")] +fn process_connection<'b, RpcMiddleware, HttpMiddleware>( + params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>, +) where + RpcMiddleware: Layer + Clone + Send + 'static, + for<'a> >::Service: RpcServiceT<'a>, + HttpMiddleware: Layer> + Send + 'static, + >>::Service: Send + + Service< + String, + Response = Option, + Error = Box, + >, + <>>::Service as Service>::Future: + Send + Unpin, + { + let ProcessConnection { + http_middleware, + rpc_middleware, + conn_permit, + conn_id, + server_cfg, + stop_handle, + drop_on_completion, + id_provider, + methods, + local_socket_stream, + } = params; + + let ipc = IpcConn(tokio_util::codec::Decoder::framed( + StreamCodec::stream_incoming(), + local_socket_stream.compat(), + )); + + let (tx, rx) = mpsc::channel::(server_cfg.message_buffer_capacity as usize); + let method_sink = MethodSink::new_with_limit(tx, server_cfg.max_response_body_size); + let tower_service = TowerServiceNoHttp { + inner: ServiceData { + methods, + id_provider, + stop_handle: stop_handle.clone(), + server_cfg: server_cfg.clone(), + conn_id, + conn_permit, + bounded_subscriptions: BoundedSubscriptions::new( + server_cfg.max_subscriptions_per_connection, + ), + method_sink, + }, + rpc_middleware, + }; + + let service = http_middleware.service(tower_service); + tokio::spawn(async { + to_ipc_service(ipc, service, stop_handle, rx).in_current_span().await; + drop(drop_on_completion) + }); +} + +async fn to_ipc_service( + ipc: IpcConn>, service: S, stop_handle: StopHandle, rx: mpsc::Receiver, @@ -457,7 +501,7 @@ async fn process_connection( { let rx_item = ReceiverStream::new(rx); let conn = IpcConnDriver { - conn, + conn: ipc, service, pending_calls: Default::default(), items: Default::default(), @@ -799,6 +843,7 @@ mod tests { types::Request, PendingSubscriptionSink, RpcModule, SubscriptionMessage, }; + use reth_tracing::init_test_tracing; use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; @@ -864,6 +909,7 @@ mod tests { #[tokio::test] async fn can_set_the_max_request_body_size() { + init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().max_request_body_size(100).build(&endpoint); let mut module = RpcModule::new(()); @@ -888,8 +934,43 @@ mod tests { assert!(response.is_err()); } + #[tokio::test] + async fn can_set_max_connections() { + init_test_tracing(); + + let endpoint = dummy_endpoint(); + let server = Builder::default().max_connections(2).build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "succeed").unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client1 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let client2 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let client3 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + + let response1: Result = client1.request("anything", rpc_params![]).await; + let response2: Result = client2.request("anything", rpc_params![]).await; + let response3: Result = client3.request("anything", rpc_params![]).await; + + assert!(response1.is_ok()); + assert!(response2.is_ok()); + // Third connection is rejected + assert!(response3.is_err()); + + // Decrement connection count + drop(client2); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // Can connect again + let client4 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let response4: Result = client4.request("anything", rpc_params![]).await; + assert!(response4.is_ok()); + } + #[tokio::test] async fn test_rpc_request() { + init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().build(&endpoint); let mut module = RpcModule::new(()); From 12873d515a9cea30d553fe938dc42a12c072562b Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Mon, 29 Apr 2024 10:09:16 -0600 Subject: [PATCH 386/700] feat: expose fields of `ResolveBestPayload` for remote construction (#7947) Co-authored-by: Matthias Seitz --- crates/payload/basic/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 4ee55b388e2fc..6aa3ccbc18a1e 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -528,11 +528,11 @@ where #[derive(Debug)] pub struct ResolveBestPayload { /// Best payload so far. - best_payload: Option, + pub best_payload: Option, /// Regular payload job that's currently running that might produce a better payload. - maybe_better: Option>, + pub maybe_better: Option>, /// The empty payload building job in progress. - empty_payload: Option>>, + pub empty_payload: Option>>, } impl Future for ResolveBestPayload From 33f4c3fa91a423d33dbe9ec8f7adec5ac9364aa9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 29 Apr 2024 20:49:41 +0200 Subject: [PATCH 387/700] fix(op): genesis (#7969) --- crates/primitives/res/genesis/optimism.json | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/primitives/res/genesis/optimism.json b/crates/primitives/res/genesis/optimism.json index 2fb05781e9bcc..50c45b68eb9cc 100644 --- a/crates/primitives/res/genesis/optimism.json +++ b/crates/primitives/res/genesis/optimism.json @@ -12,10 +12,10 @@ "istanbulBlock": 0, "muirGlacierBlock": 0, "berlinBlock": 3950000, - "londonBlock": 3950000, - "arrowGlacierBlock": 3950000, - "grayGlacierBlock": 3950000, - "mergeNetsplitBlock": 3950000, + "londonBlock": 105235063, + "arrowGlacierBlock": 105235063, + "grayGlacierBlock": 105235063, + "mergeNetsplitBlock": 105235063, "bedrockBlock": 105235063, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true, @@ -28,5 +28,6 @@ "difficulty": "1", "gasLimit": "15000000", "extradata": "0x000000000000000000000000000000000000000000000000000000000000000000000398232e2064f896018496b4b44b3d62751f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stateRoot": "0xeddb4c1786789419153a27c4c80ff44a2226b6eda04f7e22ce5bae892ea568eb", "alloc": {} } \ No newline at end of file From 3754b1e1831b620a5eb35d396b8a2f4384a4c74a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 29 Apr 2024 21:22:01 +0200 Subject: [PATCH 388/700] feat(ci): add cfg check (#7965) Co-authored-by: Oliver Nordbjerg --- .github/workflows/lint.yml | 14 +++++++++++++- Makefile | 5 ++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ff3dad495eabb..b939e159d4bcc 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -126,11 +126,23 @@ jobs: with: cmd: jq empty etc/grafana/dashboards/overview.json + check-cfg: + name: check-cfg + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - run: cargo +nightly -Zcheck-cfg c + lint-success: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana] + needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana, check-cfg] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/Makefile b/Makefile index 82994b3c282c3..fd5a252f17ca0 100644 --- a/Makefile +++ b/Makefile @@ -414,8 +414,11 @@ test: make test-doc && \ make test-other-targets +cfg-check: + cargo +nightly -Zcheck-cfg c + pr: - make fmt && \ + make cfg-check && \ make lint && \ make docs && \ make test From 55017ef028adc298eff1dda9b9136dfed73d1f59 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 21:45:05 +0200 Subject: [PATCH 389/700] chore: less restrictive bounds (#7970) --- crates/node/builder/src/builder/mod.rs | 7 ++----- crates/node/builder/src/launch/mod.rs | 20 +++++++------------- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 1da23fc04f794..10977360aef4b 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -34,12 +34,9 @@ use std::{str::FromStr, sync::Arc}; mod states; -/// The builtin provider type of the reth node. +/// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. -pub type RethFullProviderType = BlockchainProvider; - -/// The adapter type for a reth node with the given types -pub type RethFullAdapter = FullNodeTypesAdapter>; +pub type RethFullAdapter = FullNodeTypesAdapter>; #[cfg_attr(doc, aquamarine::aquamarine)] /// Declaratively construct a node. diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 408e47cd7a29a..39c549e06e7bd 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -5,7 +5,7 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, node::FullNode, - BuilderContext, NodeBuilderWithComponents, NodeHandle, RethFullAdapter, + BuilderContext, NodeBuilderWithComponents, NodeHandle, }; use futures::{future, future::Either, stream, stream_select, StreamExt}; use reth_auto_seal_consensus::AutoSealConsensus; @@ -17,14 +17,10 @@ use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; use reth_consensus::Consensus; -use reth_db::{ - database::Database, - database_metrics::{DatabaseMetadata, DatabaseMetrics}, -}; use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; use reth_interfaces::p2p::either::EitherDownloader; use reth_network::NetworkEvents; -use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_api::{FullNodeComponents, FullNodeTypes}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, engine_api_store::EngineApiStore, @@ -74,18 +70,16 @@ impl DefaultNodeLauncher { } } -impl LaunchNode, CB>> - for DefaultNodeLauncher +impl LaunchNode> for DefaultNodeLauncher where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypes, - CB: NodeComponentsBuilder>, + T: FullNodeTypes::DB>>, + CB: NodeComponentsBuilder, { - type Node = NodeHandle, CB::Components>>; + type Node = NodeHandle>; async fn launch_node( self, - target: NodeBuilderWithComponents, CB>, + target: NodeBuilderWithComponents, ) -> eyre::Result { let Self { ctx } = self; let NodeBuilderWithComponents { From 081978002738d17f08008357602dcabfb2cd67ca Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Mon, 29 Apr 2024 15:48:05 -0400 Subject: [PATCH 390/700] feat: add `extract_chain_id` method (#7921) --- crates/primitives/src/transaction/mod.rs | 17 +++-------- .../primitives/src/transaction/signature.rs | 29 ++++++++++++++----- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 76d9b01978faa..f401b0ef631ff 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -32,7 +32,7 @@ pub use sidecar::generate_blob_sidecar; #[cfg(feature = "c-kzg")] pub use sidecar::{BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError}; -pub use signature::Signature; +pub use signature::{extract_chain_id, Signature}; pub use tx_type::{ TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; @@ -1740,18 +1740,9 @@ impl TryFrom for TransactionSignedEcRecovered { // If the transaction type is Legacy, adjust the v component of the // signature according to the Ethereum specification TxType::Legacy => { - // Calculate the new v value based on the EIP-155 formula: - // v = {0,1} + CHAIN_ID * 2 + 35 - !(signature.v - - U256::from(if let Some(chain_id) = transaction.chain_id() { - // If CHAIN_ID is available, calculate the new v value - // accordingly - chain_id.saturating_mul(2).saturating_add(35) - } else { - // If CHAIN_ID is not available, set v = {0,1} + 27 - 27 - })) - .is_zero() + extract_chain_id(signature.v.to()) + .map_err(|_| ConversionError::InvalidSignature)? + .0 } _ => !signature.v.is_zero(), } diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 8cd57dc7f8bff..29db729e9bbb1 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -114,16 +114,11 @@ impl Signature { if v == 0 && r.is_zero() && s.is_zero() { return Ok((Self { r, s, odd_y_parity: false }, None)) } - return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) } - let odd_y_parity = v == 28; - Ok((Self { r, s, odd_y_parity }, None)) - } else { - // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 - let odd_y_parity = ((v - 35) % 2) != 0; - let chain_id = (v - 35) >> 1; - Ok((Self { r, s, odd_y_parity }, Some(chain_id))) } + + let (odd_y_parity, chain_id) = extract_chain_id(v)?; + Ok((Self { r, s, odd_y_parity }, chain_id)) } /// Output the length of the signature without the length of the RLP header @@ -201,6 +196,24 @@ impl Signature { } } +/// Outputs (odd_y_parity, chain_id) from the `v` value. +/// This doesn't check validity of the `v` value for optimism. +#[inline] +pub fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> { + if v < 35 { + // non-EIP-155 legacy scheme, v = 27 for even y-parity, v = 28 for odd y-parity + if v != 27 && v != 28 { + return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) + } + Ok((v == 28, None)) + } else { + // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 + let odd_y_parity = ((v - 35) % 2) != 0; + let chain_id = (v - 35) >> 1; + Ok((odd_y_parity, Some(chain_id))) + } +} + #[cfg(test)] mod tests { use crate::{transaction::signature::SECP256K1N_HALF, Address, Signature, B256, U256}; From 593b2b6d04c6f8aac9c3e9cca8555ae738964f03 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 29 Apr 2024 22:20:51 +0200 Subject: [PATCH 391/700] feat(rlp): custom rlp encoding tx type (#7968) --- crates/primitives/src/receipt.rs | 5 +- crates/primitives/src/transaction/tx_type.rs | 55 ++++++++++++++++++++ 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 2a25b2de81f15..63955a1d13b1f 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -2,7 +2,7 @@ use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; use crate::{logs_bloom, Bloom, Bytes, PruneSegmentError, TxType, B256}; use alloy_primitives::Log; -use alloy_rlp::{length_of_length, Decodable, Encodable}; +use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; #[cfg(any(test, feature = "arbitrary"))] use proptest::strategy::Strategy; @@ -18,7 +18,8 @@ use std::{ #[cfg_attr(feature = "zstd-codec", main_codec(no_arbitrary, zstd))] #[cfg_attr(not(feature = "zstd-codec"), main_codec(no_arbitrary))] #[add_arbitrary_tests] -#[derive(Clone, Debug, PartialEq, Eq, Default)] +#[derive(Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable)] +#[rlp(trailing)] pub struct Receipt { /// Receipt type. pub tx_type: TxType, diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 11df417d4bd4e..d203ecf773273 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,4 +1,5 @@ use crate::{U64, U8}; +use alloy_rlp::{Decodable, Encodable}; use bytes::Buf; use reth_codecs::{derive_arbitrary, Compact}; use serde::{Deserialize, Serialize}; @@ -181,8 +182,30 @@ impl PartialEq for u8 { } } +impl Encodable for TxType { + fn encode(&self, out: &mut dyn bytes::BufMut) { + (*self as u8).encode(out); + } + + fn length(&self) -> usize { + 1 + } +} + +impl Decodable for TxType { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let ty = u8::decode(buf)?; + + TxType::try_from(ty).map_err(alloy_rlp::Error::Custom) + } +} + #[cfg(test)] mod tests { + use rand::Rng; + + use crate::hex; + use super::*; #[test] @@ -249,4 +272,36 @@ mod tests { ); } } + + #[test] + fn decode_tx_type() { + // Test for Legacy transaction + let tx_type = TxType::decode(&mut &hex!("80")[..]).unwrap(); + assert_eq!(tx_type, TxType::Legacy); + + // Test for EIP2930 transaction + let tx_type = TxType::decode(&mut &[1u8][..]).unwrap(); + assert_eq!(tx_type, TxType::Eip2930); + + // Test for EIP1559 transaction + let tx_type = TxType::decode(&mut &[2u8][..]).unwrap(); + assert_eq!(tx_type, TxType::Eip1559); + + // Test for EIP4844 transaction + let tx_type = TxType::decode(&mut &[3u8][..]).unwrap(); + assert_eq!(tx_type, TxType::Eip4844); + + // Test random byte not in range + let buf = [rand::thread_rng().gen_range(4..=u8::MAX)]; + println!("{buf:?}"); + assert!(TxType::decode(&mut &buf[..]).is_err()); + + // Test for Deposit transaction + #[cfg(feature = "optimism")] + { + let buf = [126u8]; + let tx_type = TxType::decode(&mut &buf[..]).unwrap(); + assert_eq!(tx_type, TxType::Deposit); + } + } } From 6619faf42b7f2706d7bbf8df804e2c21139b1072 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 29 Apr 2024 23:44:44 +0200 Subject: [PATCH 392/700] docs: correct `encode_for_signing` docs (#7973) --- crates/primitives/src/transaction/eip1559.rs | 2 +- crates/primitives/src/transaction/eip2930.rs | 2 +- crates/primitives/src/transaction/eip4844.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 68da7d8d927df..06cbc129c4291 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -193,7 +193,7 @@ impl TxEip1559 { self.input.len() // input } - /// Encodes the legacy transaction in RLP for signing. + /// Encodes the EIP-1559 transaction in RLP for signing. /// /// This encodes the transaction as: /// `tx_type || rlp(chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, to, diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index 86794a5126df0..b0d1291e810c3 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -157,7 +157,7 @@ impl TxEip2930 { TxType::Eip2930 } - /// Encodes the legacy transaction in RLP for signing. + /// Encodes the EIP-2930 transaction in RLP for signing. /// /// This encodes the transaction as: /// `tx_type || rlp(chain_id, nonce, gas_price, gas_limit, to, value, input, access_list)` diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 0a3790abeca2b..8356d678833c5 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -291,7 +291,7 @@ impl TxEip4844 { TxType::Eip4844 } - /// Encodes the legacy transaction in RLP for signing. + /// Encodes the EIP-4844 transaction in RLP for signing. /// /// This encodes the transaction as: /// `tx_type || rlp(chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, to, From d9faaa80cfb58527d160dc1d65ca44ee851f5cea Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 29 Apr 2024 23:45:21 +0200 Subject: [PATCH 393/700] chore: rm unused consensus fns (#7972) --- crates/consensus/common/src/validation.rs | 238 +--------------------- 1 file changed, 6 insertions(+), 232 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 06b2303a86a04..c6e4e0aee857b 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -7,12 +7,9 @@ use reth_primitives::{ eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, MAXIMUM_EXTRA_DATA_SIZE, }, - BlockNumber, ChainSpec, GotExpected, Hardfork, Header, InvalidTransactionError, SealedBlock, - SealedHeader, Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, - TxLegacy, + ChainSpec, GotExpected, Hardfork, Header, SealedBlock, SealedHeader, }; -use reth_provider::{AccountReader, HeaderProvider, WithdrawalsProvider}; -use std::collections::{hash_map::Entry, HashMap}; +use reth_provider::{HeaderProvider, WithdrawalsProvider}; /// Validate header standalone pub fn validate_header_standalone( @@ -59,148 +56,6 @@ pub fn validate_header_standalone( Ok(()) } -/// Validate a transaction with regard to a block header. -/// -/// The only parameter from the header that affects the transaction is `base_fee`. -pub fn validate_transaction_regarding_header( - transaction: &Transaction, - chain_spec: &ChainSpec, - at_block_number: BlockNumber, - at_timestamp: u64, - base_fee: Option, -) -> Result<(), ConsensusError> { - #[allow(unreachable_patterns)] - let chain_id = match transaction { - Transaction::Legacy(TxLegacy { chain_id, .. }) => { - // EIP-155: Simple replay attack protection: https://eips.ethereum.org/EIPS/eip-155 - if !chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(at_block_number) && - chain_id.is_some() - { - return Err(InvalidTransactionError::OldLegacyChainId.into()) - } - *chain_id - } - Transaction::Eip2930(TxEip2930 { chain_id, .. }) => { - // EIP-2930: Optional access lists: https://eips.ethereum.org/EIPS/eip-2930 (New transaction type) - if !chain_spec.fork(Hardfork::Berlin).active_at_block(at_block_number) { - return Err(InvalidTransactionError::Eip2930Disabled.into()) - } - Some(*chain_id) - } - Transaction::Eip1559(TxEip1559 { - chain_id, - max_fee_per_gas, - max_priority_fee_per_gas, - .. - }) => { - // EIP-1559: Fee market change for ETH 1.0 chain https://eips.ethereum.org/EIPS/eip-1559 - if !chain_spec.fork(Hardfork::London).active_at_block(at_block_number) { - return Err(InvalidTransactionError::Eip1559Disabled.into()) - } - - // EIP-1559: add more constraints to the tx validation - // https://github.com/ethereum/EIPs/pull/3594 - if max_priority_fee_per_gas > max_fee_per_gas { - return Err(InvalidTransactionError::TipAboveFeeCap.into()) - } - - Some(*chain_id) - } - Transaction::Eip4844(TxEip4844 { - chain_id, - max_fee_per_gas, - max_priority_fee_per_gas, - .. - }) => { - // EIP-4844: Shard Blob Transactions https://eips.ethereum.org/EIPS/eip-4844 - if !chain_spec.is_cancun_active_at_timestamp(at_timestamp) { - return Err(InvalidTransactionError::Eip4844Disabled.into()) - } - - // EIP-1559: add more constraints to the tx validation - // https://github.com/ethereum/EIPs/pull/3594 - if max_priority_fee_per_gas > max_fee_per_gas { - return Err(InvalidTransactionError::TipAboveFeeCap.into()) - } - - Some(*chain_id) - } - _ => { - // Op Deposit - None - } - }; - if let Some(chain_id) = chain_id { - if chain_id != chain_spec.chain().id() { - return Err(InvalidTransactionError::ChainIdMismatch.into()) - } - } - // Check basefee and few checks that are related to that. - // https://github.com/ethereum/EIPs/pull/3594 - if let Some(base_fee_per_gas) = base_fee { - if transaction.max_fee_per_gas() < base_fee_per_gas as u128 { - return Err(InvalidTransactionError::FeeCapTooLow.into()) - } - } - - Ok(()) -} - -/// Iterate over all transactions, validate them against each other and against the block. -/// There is no gas check done as [REVM](https://github.com/bluealloy/revm/blob/fd0108381799662098b7ab2c429ea719d6dfbf28/crates/revm/src/evm_impl.rs#L113-L131) already checks that. -pub fn validate_all_transaction_regarding_block_and_nonces< - 'a, - Provider: HeaderProvider + AccountReader, ->( - transactions: impl Iterator, - header: &Header, - provider: Provider, - chain_spec: &ChainSpec, -) -> RethResult<()> { - let mut account_nonces = HashMap::new(); - - for transaction in transactions { - validate_transaction_regarding_header( - transaction, - chain_spec, - header.number, - header.timestamp, - header.base_fee_per_gas, - )?; - - // Get nonce, if there is previous transaction from same sender we need - // to take that nonce. - let nonce = match account_nonces.entry(transaction.signer()) { - Entry::Occupied(mut entry) => { - let nonce = *entry.get(); - *entry.get_mut() += 1; - nonce - } - Entry::Vacant(entry) => { - let account = provider.basic_account(transaction.signer())?.unwrap_or_default(); - // Signer account shouldn't have bytecode. Presence of bytecode means this is a - // smartcontract. - if account.has_bytecode() { - return Err(ConsensusError::from( - InvalidTransactionError::SignerAccountHasBytecode, - ) - .into()) - } - let nonce = account.nonce; - entry.insert(account.nonce + 1); - nonce - } - }; - - // check nonce - if transaction.nonce() != nonce { - return Err(ConsensusError::from(InvalidTransactionError::NonceNotConsistent).into()) - } - } - - Ok(()) -} - /// Validate a block without regard for state: /// /// - Compares the ommer hash in the block header to the block body @@ -345,10 +200,11 @@ mod tests { test_utils::generators::{self, Rng}, }; use reth_primitives::{ - hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, Bytes, - ChainSpecBuilder, Signature, TransactionSigned, TxKind, Withdrawal, Withdrawals, MAINNET, - U256, + hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, + BlockNumber, Bytes, ChainSpecBuilder, Signature, Transaction, TransactionSigned, TxEip4844, + TxKind, Withdrawal, Withdrawals, U256, }; + use reth_provider::AccountReader; use std::ops::RangeBounds; mock! { @@ -382,15 +238,6 @@ mod tests { withdrawals_provider: MockWithdrawalsProvider::new(), } } - /// New provider where is_known is always true - fn new_known() -> Self { - Self { - is_known: true, - parent: None, - account: None, - withdrawals_provider: MockWithdrawalsProvider::new(), - } - } } impl AccountReader for Provider { @@ -457,25 +304,6 @@ mod tests { } } - fn mock_tx(nonce: u64) -> TransactionSignedEcRecovered { - let request = Transaction::Eip2930(TxEip2930 { - chain_id: 1u64, - nonce, - gas_price: 0x28f000fff, - gas_limit: 10, - to: TxKind::Call(Address::default()), - value: U256::from(3_u64), - input: Bytes::from(vec![1, 2]), - access_list: Default::default(), - }); - - let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; - - let tx = TransactionSigned::from_transaction_and_signature(request, signature); - let signer = Address::ZERO; - TransactionSignedEcRecovered::from_signed_transaction(tx, signer) - } - fn mock_blob_tx(nonce: u64, num_blobs: usize) -> TransactionSigned { let mut rng = generators::rng(); let request = Transaction::Eip4844(TxEip4844 { @@ -539,60 +367,6 @@ mod tests { (SealedBlock { header: header.seal_slow(), body, ommers, withdrawals: None }, parent) } - #[test] - fn sanity_tx_nonce_check() { - let (block, _) = mock_block(); - let tx1 = mock_tx(0); - let tx2 = mock_tx(1); - let provider = Provider::new_known(); - - let txs = vec![tx1, tx2]; - validate_all_transaction_regarding_block_and_nonces( - txs.iter(), - &block.header, - provider, - &MAINNET, - ) - .expect("To Pass"); - } - - #[test] - fn nonce_gap_in_first_transaction() { - let (block, _) = mock_block(); - let tx1 = mock_tx(1); - let provider = Provider::new_known(); - - let txs = vec![tx1]; - assert_eq!( - validate_all_transaction_regarding_block_and_nonces( - txs.iter(), - &block.header, - provider, - &MAINNET, - ), - Err(ConsensusError::from(InvalidTransactionError::NonceNotConsistent).into()) - ) - } - - #[test] - fn nonce_gap_on_second_tx_from_same_signer() { - let (block, _) = mock_block(); - let tx1 = mock_tx(0); - let tx2 = mock_tx(3); - let provider = Provider::new_known(); - - let txs = vec![tx1, tx2]; - assert_eq!( - validate_all_transaction_regarding_block_and_nonces( - txs.iter(), - &block.header, - provider, - &MAINNET, - ), - Err(ConsensusError::from(InvalidTransactionError::NonceNotConsistent).into()) - ); - } - #[test] fn valid_withdrawal_index() { let chain_spec = ChainSpecBuilder::mainnet().shanghai_activated().build(); From 4c01856e6d0c35423a7ccb9727961f6ecbb7883a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 29 Apr 2024 23:45:43 +0200 Subject: [PATCH 394/700] chore: pool type must be unpin (#7974) --- crates/node/api/src/node.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index db4bdd9617406..2eb14011f2b12 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -83,7 +83,7 @@ where /// Encapsulates all types and components of the node. pub trait FullNodeComponents: FullNodeTypes + 'static { /// The transaction pool of the node. - type Pool: TransactionPool; + type Pool: TransactionPool + Unpin; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; From 490fbb0f52e0b73eb65076912d42875093074694 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 01:32:08 +0200 Subject: [PATCH 395/700] docs: minor doc nits in reth basic payload (#7977) --- crates/payload/basic/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 6aa3ccbc18a1e..c32961c72cad4 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -53,9 +53,9 @@ mod metrics; pub struct BasicPayloadJobGenerator { /// The client that can interact with the chain. client: Client, - /// txpool + /// The transaction pool to pull transactions from. pool: Pool, - /// How to spawn building tasks + /// The task executor to spawn payload building tasks on. executor: Tasks, /// The configuration for the job generator. config: BasicPayloadJobGeneratorConfig, From 9fc75c624783fe6fad4dd97dedd0ef152ddb234c Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 30 Apr 2024 10:55:34 +0200 Subject: [PATCH 396/700] chore: pin ethereum tests (#7986) --- .github/workflows/unit.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 91a247fac5e28..05ff0960916c5 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -57,6 +57,7 @@ jobs: uses: actions/checkout@v4 with: repository: ethereum/tests + ref: 1c23e3c27ac53b794de0844d2d5e19cd2495b9d8 path: testing/ef-tests/ethereum-tests submodules: recursive fetch-depth: 1 From d281699c2a4df0c61d9459f11ec7157095dabee7 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 10:55:50 +0200 Subject: [PATCH 397/700] chore: rm clap as dep of `reth-static-file` (#7980) --- Cargo.lock | 1 - bin/reth/Cargo.toml | 6 ++++-- crates/static-file/Cargo.toml | 2 -- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d20f04fa81d4e..b283e8eaf710b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7749,7 +7749,6 @@ name = "reth-static-file" version = "0.2.0-beta.6" dependencies = [ "assert_matches", - "clap", "parking_lot 0.12.2", "rayon", "reth-db", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 995b296107508..c323017d0ab33 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -43,7 +43,7 @@ reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-discv4.workspace = true reth-prune.workspace = true -reth-static-file = { workspace = true, features = ["clap"] } +reth-static-file = { workspace = true } reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-node-api.workspace = true @@ -79,7 +79,9 @@ rand.workspace = true # tui comfy-table = "7.0" crossterm = "0.27.0" -ratatui = { version = "0.26", default-features = false, features = ["crossterm"] } +ratatui = { version = "0.26", default-features = false, features = [ + "crossterm", +] } human_bytes = "0.4.1" # async diff --git a/crates/static-file/Cargo.toml b/crates/static-file/Cargo.toml index b3fc1b93d3684..1345b2f232fac 100644 --- a/crates/static-file/Cargo.toml +++ b/crates/static-file/Cargo.toml @@ -25,7 +25,6 @@ tokio-stream.workspace = true # misc tracing.workspace = true -clap = { workspace = true, features = ["derive"], optional = true } rayon.workspace = true parking_lot = { workspace = true, features = ["send_guard", "arc_lock"] } @@ -37,4 +36,3 @@ assert_matches.workspace = true tempfile.workspace = true [features] -clap = ["dep:clap"] From 5d795b9342aef986adc7e6fcaf0b053104675fcf Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 30 Apr 2024 12:02:51 +0200 Subject: [PATCH 398/700] fix: handle Multiplex P2PStream.poll_ready errors (#7988) --- crates/net/eth-wire/src/multiplex.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 27d0f0a0016df..8677ae77c1058 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -19,6 +19,7 @@ use std::{ use crate::{ capability::{Capability, SharedCapabilities, SharedCapability, UnsupportedCapabilityError}, errors::{EthStreamError, P2PStreamError}, + p2pstream::DisconnectP2P, CanDisconnect, DisconnectReason, EthStream, P2PStream, Status, UnauthedEthStream, }; use bytes::{Bytes, BytesMut}; @@ -465,7 +466,7 @@ where let mut conn_ready = true; loop { match this.inner.conn.poll_ready_unpin(cx) { - Poll::Ready(_) => { + Poll::Ready(Ok(())) => { if let Some(msg) = this.inner.out_buffer.pop_front() { if let Err(err) = this.inner.conn.start_send_unpin(msg) { return Poll::Ready(Some(Err(err.into()))) @@ -474,6 +475,14 @@ where break } } + Poll::Ready(Err(err)) => { + if let Err(disconnect_err) = + this.inner.conn.start_disconnect(DisconnectReason::DisconnectRequested) + { + return Poll::Ready(Some(Err(disconnect_err.into()))); + } + return Poll::Ready(Some(Err(err.into()))); + } Poll::Pending => { conn_ready = false; break From 053b14abdce7f3fe6cd205f6b34c3012e8a0d46e Mon Sep 17 00:00:00 2001 From: Jacob Kaufmann Date: Tue, 30 Apr 2024 04:12:11 -0600 Subject: [PATCH 399/700] feat: add method to EthBuiltPayload to get blob sidecars (#7979) Co-authored-by: Matthias Seitz --- crates/ethereum/engine-primitives/src/payload.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index a354e0588844f..264355ac22a83 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -57,6 +57,11 @@ impl EthBuiltPayload { self.fees } + /// Returns the blob sidecars. + pub fn sidecars(&self) -> &[BlobTransactionSidecar] { + &self.sidecars + } + /// Adds sidecars to the payload. pub fn extend_sidecars(&mut self, sidecars: Vec) { self.sidecars.extend(sidecars) From 1fe00a7c359d1b08bb381ef375f91d546d9da582 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 30 Apr 2024 12:23:02 +0200 Subject: [PATCH 400/700] feat: use `FnOnce` for node hooks (#7975) Co-authored-by: Roman Krasiuk --- crates/node/builder/src/builder/states.rs | 12 +++++++----- crates/node/builder/src/hooks.rs | 20 +++++++++---------- crates/node/builder/src/launch/mod.rs | 3 +-- crates/node/builder/src/rpc.rs | 24 +++++++++++++---------- 4 files changed, 32 insertions(+), 27 deletions(-) diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 975590c5fe900..b77588df4955b 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -157,7 +157,7 @@ impl> NodeBuilderWithComponents(mut self, hook: F) -> Self where - F: Fn(NodeAdapter) -> eyre::Result<()> + Send + 'static, + F: FnOnce(NodeAdapter) -> eyre::Result<()> + Send + 'static, { self.add_ons.hooks.set_on_component_initialized(hook); self @@ -166,7 +166,7 @@ impl> NodeBuilderWithComponents(mut self, hook: F) -> Self where - F: Fn(FullNode>) -> eyre::Result<()> + Send + 'static, + F: FnOnce(FullNode>) -> eyre::Result<()> + Send + 'static, { self.add_ons.hooks.set_on_node_started(hook); self @@ -175,7 +175,7 @@ impl> NodeBuilderWithComponents(mut self, hook: F) -> Self where - F: Fn( + F: FnOnce( RpcContext<'_, NodeAdapter>, RethRpcServerHandles, ) -> eyre::Result<()> @@ -189,7 +189,9 @@ impl> NodeBuilderWithComponents(mut self, hook: F) -> Self where - F: Fn(RpcContext<'_, NodeAdapter>) -> eyre::Result<()> + Send + 'static, + F: FnOnce(RpcContext<'_, NodeAdapter>) -> eyre::Result<()> + + Send + + 'static, { self.add_ons.rpc.set_extend_rpc_modules(hook); self @@ -202,7 +204,7 @@ impl> NodeBuilderWithComponents(mut self, exex_id: impl Into, exex: F) -> Self where - F: Fn(ExExContext>) -> R + Send + 'static, + F: FnOnce(ExExContext>) -> R + Send + 'static, R: Future> + Send, E: Future> + Send, { diff --git a/crates/node/builder/src/hooks.rs b/crates/node/builder/src/hooks.rs index 9d2127f5a5822..468c84e85182b 100644 --- a/crates/node/builder/src/hooks.rs +++ b/crates/node/builder/src/hooks.rs @@ -77,15 +77,15 @@ pub trait OnComponentInitializedHook: Send { /// Consumes the event hook and runs it. /// /// If this returns an error, the node launch will be aborted. - fn on_event(&self, node: Node) -> eyre::Result<()>; + fn on_event(self: Box, node: Node) -> eyre::Result<()>; } impl OnComponentInitializedHook for F where - F: Fn(Node) -> eyre::Result<()> + Send, + F: FnOnce(Node) -> eyre::Result<()> + Send, { - fn on_event(&self, node: Node) -> eyre::Result<()> { - self(node) + fn on_event(self: Box, node: Node) -> eyre::Result<()> { + (*self)(node) } } @@ -94,27 +94,27 @@ pub trait OnNodeStartedHook: Send { /// Consumes the event hook and runs it. /// /// If this returns an error, the node launch will be aborted. - fn on_event(&self, node: FullNode) -> eyre::Result<()>; + fn on_event(self: Box, node: FullNode) -> eyre::Result<()>; } impl OnNodeStartedHook for F where Node: FullNodeComponents, - F: Fn(FullNode) -> eyre::Result<()> + Send, + F: FnOnce(FullNode) -> eyre::Result<()> + Send, { - fn on_event(&self, node: FullNode) -> eyre::Result<()> { - self(node) + fn on_event(self: Box, node: FullNode) -> eyre::Result<()> { + (*self)(node) } } impl OnComponentInitializedHook for () { - fn on_event(&self, _node: Node) -> eyre::Result<()> { + fn on_event(self: Box, _node: Node) -> eyre::Result<()> { Ok(()) } } impl OnNodeStartedHook for () { - fn on_event(&self, _node: FullNode) -> eyre::Result<()> { + fn on_event(self: Box, _node: FullNode) -> eyre::Result<()> { Ok(()) } } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 39c549e06e7bd..4f1f00e4e5b7e 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -232,8 +232,7 @@ where async move { while let Ok(notification) = canon_state_notifications.recv().await { handle.send_async(notification.into()).await.expect( - "blockchain tree notification could not be sent to exex -manager", + "blockchain tree notification could not be sent to exex manager", ); } }, diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 3efeba7f5fa37..a65dcfce5f8c1 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -98,7 +98,7 @@ impl fmt::Debug for RpcHooks { pub trait OnRpcStarted: Send { /// The hook that is called once the rpc server is started. fn on_rpc_started( - &self, + self: Box, ctx: RpcContext<'_, Node>, handles: RethRpcServerHandles, ) -> eyre::Result<()>; @@ -106,20 +106,24 @@ pub trait OnRpcStarted: Send { impl OnRpcStarted for F where - F: Fn(RpcContext<'_, Node>, RethRpcServerHandles) -> eyre::Result<()> + Send, + F: FnOnce(RpcContext<'_, Node>, RethRpcServerHandles) -> eyre::Result<()> + Send, Node: FullNodeComponents, { fn on_rpc_started( - &self, + self: Box, ctx: RpcContext<'_, Node>, handles: RethRpcServerHandles, ) -> eyre::Result<()> { - self(ctx, handles) + (*self)(ctx, handles) } } impl OnRpcStarted for () { - fn on_rpc_started(&self, _: RpcContext<'_, Node>, _: RethRpcServerHandles) -> eyre::Result<()> { + fn on_rpc_started( + self: Box, + _: RpcContext<'_, Node>, + _: RethRpcServerHandles, + ) -> eyre::Result<()> { Ok(()) } } @@ -127,21 +131,21 @@ impl OnRpcStarted for () { /// Event hook that is called when the rpc server is started. pub trait ExtendRpcModules: Send { /// The hook that is called once the rpc server is started. - fn extend_rpc_modules(&self, ctx: RpcContext<'_, Node>) -> eyre::Result<()>; + fn extend_rpc_modules(self: Box, ctx: RpcContext<'_, Node>) -> eyre::Result<()>; } impl ExtendRpcModules for F where - F: Fn(RpcContext<'_, Node>) -> eyre::Result<()> + Send, + F: FnOnce(RpcContext<'_, Node>) -> eyre::Result<()> + Send, Node: FullNodeComponents, { - fn extend_rpc_modules(&self, ctx: RpcContext<'_, Node>) -> eyre::Result<()> { - self(ctx) + fn extend_rpc_modules(self: Box, ctx: RpcContext<'_, Node>) -> eyre::Result<()> { + (*self)(ctx) } } impl ExtendRpcModules for () { - fn extend_rpc_modules(&self, _: RpcContext<'_, Node>) -> eyre::Result<()> { + fn extend_rpc_modules(self: Box, _: RpcContext<'_, Node>) -> eyre::Result<()> { Ok(()) } } From b3d7b7d501d7a6b81da9c1f9643c5ec55786bb96 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 30 Apr 2024 12:30:04 +0200 Subject: [PATCH 401/700] feat: `StaticFileProviderFactory` (#7983) --- bin/reth/src/commands/db/clear.rs | 2 +- bin/reth/src/commands/db/get.rs | 1 + bin/reth/src/commands/debug_cmd/execution.rs | 5 ++++- .../src/commands/debug_cmd/in_memory_merkle.rs | 3 ++- bin/reth/src/commands/debug_cmd/replay_engine.rs | 5 ++++- bin/reth/src/commands/import.rs | 2 +- bin/reth/src/commands/stage/drop.rs | 2 +- bin/reth/src/commands/stage/run.rs | 4 +++- bin/reth/src/commands/stage/unwind.rs | 1 + crates/blockchain-tree/src/externals.rs | 2 +- crates/consensus/beacon/src/engine/sync.rs | 2 +- crates/consensus/beacon/src/engine/test_utils.rs | 1 + crates/node-core/src/init.rs | 2 +- crates/node/builder/src/launch/common.rs | 2 +- crates/prune/src/pruner.rs | 4 +++- crates/stages-api/src/pipeline/mod.rs | 1 + crates/stages/src/lib.rs | 1 + crates/stages/src/sets.rs | 1 + crates/stages/src/stages/bodies.rs | 4 +++- crates/stages/src/stages/execution.rs | 5 ++++- crates/stages/src/stages/headers.rs | 4 +++- crates/stages/src/stages/merkle.rs | 2 +- crates/stages/src/stages/sender_recovery.rs | 5 ++++- crates/stages/src/stages/tx_lookup.rs | 2 +- crates/stages/src/test_utils/macros.rs | 10 +++++----- crates/stages/src/test_utils/test_db.rs | 2 +- crates/static-file/src/static_file_producer.rs | 2 +- .../storage/provider/src/providers/database/mod.rs | 14 ++++++++------ crates/storage/provider/src/providers/mod.rs | 10 ++++++++-- .../provider/src/providers/state/historical.rs | 1 + crates/storage/provider/src/traits/full.rs | 3 +++ crates/storage/provider/src/traits/mod.rs | 3 +++ .../provider/src/traits/static_file_provider.rs | 7 +++++++ 33 files changed, 82 insertions(+), 33 deletions(-) create mode 100644 crates/storage/provider/src/traits/static_file_provider.rs diff --git a/bin/reth/src/commands/db/clear.rs b/bin/reth/src/commands/db/clear.rs index a7c32cac1b4e7..f985be8ab4aba 100644 --- a/bin/reth/src/commands/db/clear.rs +++ b/bin/reth/src/commands/db/clear.rs @@ -7,7 +7,7 @@ use reth_db::{ TableViewer, Tables, }; use reth_primitives::{static_file::find_fixed_range, StaticFileSegment}; -use reth_provider::ProviderFactory; +use reth_provider::{ProviderFactory, StaticFileProviderFactory}; /// The arguments for the `reth db clear` command #[derive(Parser, Debug)] diff --git a/bin/reth/src/commands/db/get.rs b/bin/reth/src/commands/db/get.rs index 958ced09fe5c6..80e3ae393d1b0 100644 --- a/bin/reth/src/commands/db/get.rs +++ b/bin/reth/src/commands/db/get.rs @@ -7,6 +7,7 @@ use reth_db::{ tables, RawKey, RawTable, Receipts, TableViewer, Transactions, }; use reth_primitives::{BlockHash, Header, StaticFileSegment}; +use reth_provider::StaticFileProviderFactory; use tracing::error; /// The arguments for the `reth db get` command diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 2384a9af07580..df6b4d1119570 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -29,7 +29,10 @@ use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, }; -use reth_provider::{BlockExecutionWriter, HeaderSyncMode, ProviderFactory, StageCheckpointReader}; +use reth_provider::{ + BlockExecutionWriter, HeaderSyncMode, ProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, +}; use reth_stages::{ sets::DefaultStages, stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage}, diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 3632f4cff6f92..f13b503f1f768 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -21,7 +21,8 @@ use reth_node_ethereum::EthEvmConfig; use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec}; use reth_provider::{ AccountExtReader, ExecutorFactory, HashingWriter, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StorageReader, + OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StaticFileProviderFactory, + StorageReader, }; use reth_tasks::TaskExecutor; use reth_trie::{updates::TrieKey, StateRoot}; diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index d9b6e98659cbd..f59af621868d9 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -24,7 +24,10 @@ use reth_node_core::engine_api_store::{EngineApiStore, StoredEngineApiMessage}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{fs, ChainSpec, PruneModes}; -use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory}; +use reth_provider::{ + providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory, + StaticFileProviderFactory, +}; use reth_revm::EvmProcessorFactory; use reth_stages::Pipeline; use reth_static_file::StaticFileProducer; diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 0136e0e5e67f0..4731bf56558fa 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -31,7 +31,7 @@ use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; use reth_provider::{ BlockNumReader, HeaderProvider, HeaderSyncMode, ProviderError, ProviderFactory, - StageCheckpointReader, + StageCheckpointReader, StaticFileProviderFactory, }; use reth_stages::{ prelude::*, diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index e79a4c33b4f19..5c14406027e29 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -15,7 +15,7 @@ use reth_node_core::init::{insert_genesis_header, insert_genesis_history, insert use reth_primitives::{ fs, stage::StageId, static_file::find_fixed_range, ChainSpec, StaticFileSegment, }; -use reth_provider::{providers::StaticFileWriter, ProviderFactory}; +use reth_provider::{providers::StaticFileWriter, ProviderFactory, StaticFileProviderFactory}; use std::sync::Arc; /// `reth drop-stage` command diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 32550718f4dc1..66fb25b477e2c 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -21,7 +21,9 @@ use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; use reth_exex::ExExManagerHandle; use reth_node_ethereum::EthEvmConfig; use reth_primitives::ChainSpec; -use reth_provider::{ProviderFactory, StageCheckpointReader, StageCheckpointWriter}; +use reth_provider::{ + ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, +}; use reth_stages::{ stages::{ AccountHashingStage, BodyStage, ExecutionStage, ExecutionStageThresholds, diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 2682683d4ceef..9ffaad97952ad 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -25,6 +25,7 @@ use reth_node_ethereum::EthEvmConfig; use reth_primitives::{BlockHashOrNumber, ChainSpec, PruneModes, B256}; use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, + StaticFileProviderFactory, }; use reth_prune::PrunerBuilder; use reth_stages::{ diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index c3bda1ae21e9e..36f3041738592 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -6,7 +6,7 @@ use reth_db::{ }; use reth_interfaces::RethResult; use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; -use reth_provider::{ProviderFactory, StatsReader}; +use reth_provider::{ProviderFactory, StaticFileProviderFactory, StatsReader}; use std::{collections::BTreeMap, sync::Arc}; /// A container for external components. diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 96163e9963380..9e206176a28f9 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -437,7 +437,7 @@ mod tests { }; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, - BundleStateWithReceipts, + BundleStateWithReceipts, StaticFileProviderFactory, }; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_static_file::StaticFileProducer; diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 67225b7c71ce6..513987e7581f9 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -27,6 +27,7 @@ use reth_provider::{ providers::BlockchainProvider, test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, BundleStateWithReceipts, ExecutorFactory, HeaderSyncMode, PrunableBlockExecutor, + StaticFileProviderFactory, }; use reth_prune::Pruner; use reth_revm::EvmProcessorFactory; diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index eb513cc4004be..883bb437a5f0c 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -15,7 +15,7 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, - ProviderFactory, + ProviderFactory, StaticFileProviderFactory, }; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; use serde::{Deserialize, Serialize}; diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index c57e12cf668b0..28453a047c3ab 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -17,7 +17,7 @@ use reth_node_core::{ node_config::NodeConfig, }; use reth_primitives::{BlockNumber, Chain, ChainSpec, Head, PruneModes, B256}; -use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; use reth_prune::PrunerBuilder; use reth_rpc::JwtSecret; use reth_static_file::StaticFileProducer; diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index f3bf963e0e13c..55a998709d8e1 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -10,7 +10,9 @@ use reth_primitives::{ BlockNumber, FinishedExExHeight, PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, StaticFileSegment, }; -use reth_provider::{DatabaseProviderRW, ProviderFactory, PruneCheckpointReader}; +use reth_provider::{ + DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, StaticFileProviderFactory, +}; use reth_tokio_util::EventListeners; use std::{ collections::BTreeMap, diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 1b455a9395c25..89bb9107d55f8 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -13,6 +13,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::StaticFileWriter, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, + StaticFileProviderFactory, }; use reth_static_file::StaticFileProducer; use reth_tokio_util::EventListeners; diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 3fea3e04df12b..92c2b3a09a6fd 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -24,6 +24,7 @@ //! # use tokio::sync::watch; //! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::ProviderFactory; +//! # use reth_provider::StaticFileProviderFactory; //! # use reth_provider::HeaderSyncMode; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 4f04e9b10f208..99edf05b7212e 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -15,6 +15,7 @@ //! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PruneModes, MAINNET}; //! # use reth_evm_ethereum::EthEvmConfig; +//! # use reth_provider::StaticFileProviderFactory; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index 6dfe7a6a8d2cd..5080b9b9ee689 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -381,6 +381,7 @@ mod tests { use assert_matches::assert_matches; use reth_primitives::stage::StageUnitCheckpoint; + use reth_provider::StaticFileProviderFactory; use test_utils::*; use crate::test_utils::{ @@ -632,7 +633,8 @@ mod tests { StaticFileSegment, TxNumber, B256, }; use reth_provider::{ - providers::StaticFileWriter, HeaderProvider, ProviderFactory, TransactionsProvider, + providers::StaticFileWriter, HeaderProvider, ProviderFactory, + StaticFileProviderFactory, TransactionsProvider, }; use reth_stages_api::{ExecInput, ExecOutput, UnwindInput}; diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 6fb6f58e7fca2..1771e2570d6dd 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -616,7 +616,10 @@ mod tests { Bytecode, ChainSpecBuilder, PruneMode, ReceiptsLogPruneConfig, SealedBlock, StorageEntry, B256, U256, }; - use reth_provider::{test_utils::create_test_provider_factory, AccountReader, ReceiptProvider}; + use reth_provider::{ + test_utils::create_test_provider_factory, AccountReader, ReceiptProvider, + StaticFileProviderFactory, + }; use reth_revm::EvmProcessorFactory; use std::collections::BTreeMap; diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index 548048dd713a6..f0a8c181177b3 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -387,7 +387,9 @@ mod tests { use reth_primitives::{ stage::StageUnitCheckpoint, BlockBody, SealedBlock, SealedBlockWithSenders, B256, }; - use reth_provider::{BlockWriter, BundleStateWithReceipts, ProviderFactory}; + use reth_provider::{ + BlockWriter, BundleStateWithReceipts, ProviderFactory, StaticFileProviderFactory, + }; use reth_trie::{updates::TrieUpdates, HashedPostState}; use test_runner::HeadersTestRunner; diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 562cff1830b75..186382e36c234 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -377,7 +377,7 @@ mod tests { use reth_primitives::{ keccak256, stage::StageUnitCheckpoint, SealedBlock, StaticFileSegment, StorageEntry, U256, }; - use reth_provider::providers::StaticFileWriter; + use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use reth_trie::test_utils::{state_root, state_root_prehashed}; use std::collections::BTreeMap; diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 5ddb2dfc08424..e078fd95421ca 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -292,7 +292,10 @@ mod tests { stage::StageUnitCheckpoint, BlockNumber, PruneCheckpoint, PruneMode, SealedBlock, TransactionSigned, B256, }; - use reth_provider::{providers::StaticFileWriter, PruneCheckpointWriter, TransactionsProvider}; + use reth_provider::{ + providers::StaticFileWriter, PruneCheckpointWriter, StaticFileProviderFactory, + TransactionsProvider, + }; use super::*; use crate::test_utils::{ diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 918be21c5ba94..101c52258a8e8 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -248,7 +248,7 @@ mod tests { generators::{random_block, random_block_range}, }; use reth_primitives::{stage::StageUnitCheckpoint, BlockNumber, SealedBlock, B256}; - use reth_provider::providers::StaticFileWriter; + use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use std::ops::Sub; // Implement stage test suite. diff --git a/crates/stages/src/test_utils/macros.rs b/crates/stages/src/test_utils/macros.rs index 0ce346d704f30..11fb46cde0b1c 100644 --- a/crates/stages/src/test_utils/macros.rs +++ b/crates/stages/src/test_utils/macros.rs @@ -13,7 +13,7 @@ macro_rules! stage_test_suite { // Run stage execution let result = runner.execute(input).await; - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); // Check that the result is returned and the stage does not panic. // The return result with empty db is stage-specific. @@ -46,7 +46,7 @@ macro_rules! stage_test_suite { // Assert the successful result let result = rx.await.unwrap(); - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( result, @@ -76,7 +76,7 @@ macro_rules! stage_test_suite { // Run stage unwind let rx = runner.unwind(input).await; - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( rx, @@ -110,7 +110,7 @@ macro_rules! stage_test_suite { // Assert the successful execution result let result = rx.await.unwrap(); - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( result, @@ -179,7 +179,7 @@ macro_rules! stage_test_suite_ext { // Assert the successful result let result = rx.await.unwrap(); - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( result, diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index a080c9c8f8e6c..5fe65a737349b 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -18,7 +18,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::{StaticFileProviderRWRefMut, StaticFileWriter}, - HistoryWriter, ProviderError, ProviderFactory, + HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, }; use std::{collections::BTreeMap, path::Path, sync::Arc}; use tempfile::TempDir; diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index 2af4f8cac0651..0b0720e21044a 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -255,7 +255,7 @@ mod tests { }; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - ProviderFactory, + ProviderFactory, StaticFileProviderFactory, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::{ diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index a2bf883d5b177..1e2f73cbc9c64 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -5,7 +5,7 @@ use crate::{ BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; use reth_evm::ConfigureEvmEnv; @@ -69,11 +69,6 @@ impl ProviderFactory { &self.db } - /// Returns static file provider - pub fn static_file_provider(&self) -> StaticFileProvider { - self.static_file_provider.clone() - } - #[cfg(any(test, feature = "test-utils"))] /// Consumes Self and returns DB pub fn into_db(self) -> DB { @@ -161,6 +156,13 @@ impl DatabaseProviderFactory for ProviderFactory { } } +impl StaticFileProviderFactory for ProviderFactory { + /// Returns static file provider + fn static_file_provider(&self) -> StaticFileProvider { + self.static_file_provider.clone() + } +} + impl HeaderSyncGapProvider for ProviderFactory { fn sync_gap( &self, diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index c9ebd042cdd21..f58f77dd02240 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -4,8 +4,8 @@ use crate::{ CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, - StateProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, - WithdrawalsProvider, + StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, + TreeViewer, WithdrawalsProvider, }; use reth_db::{ database::Database, @@ -142,6 +142,12 @@ where } } +impl StaticFileProviderFactory for BlockchainProvider { + fn static_file_provider(&self) -> StaticFileProvider { + self.database.static_file_provider() + } +} + impl HeaderProvider for BlockchainProvider where DB: Database, diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index e87be25c969f7..ed64314aa2ba3 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -405,6 +405,7 @@ mod tests { providers::state::historical::{HistoryInfo, LowestAvailableBlocks}, test_utils::create_test_provider_factory, AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, + StaticFileProviderFactory, }; use reth_db::{ models::{storage_sharded_key::StorageShardedKey, AccountBeforeTx, ShardedKey}, diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 78ef740852ff0..9214cc273ac8d 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -3,12 +3,14 @@ use crate::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, StageCheckpointReader, StateProviderFactory, + StaticFileProviderFactory, }; use reth_db::database::Database; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory + + StaticFileProviderFactory + BlockReaderIdExt + AccountReader + StateProviderFactory @@ -25,6 +27,7 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory + + StaticFileProviderFactory + BlockReaderIdExt + AccountReader + StateProviderFactory diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 44884acb019a6..6d78cf5834855 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -75,6 +75,9 @@ pub use prune_checkpoint::{PruneCheckpointReader, PruneCheckpointWriter}; mod database_provider; pub use database_provider::DatabaseProviderFactory; +mod static_file_provider; +pub use static_file_provider::StaticFileProviderFactory; + mod stats; pub use stats::StatsReader; diff --git a/crates/storage/provider/src/traits/static_file_provider.rs b/crates/storage/provider/src/traits/static_file_provider.rs new file mode 100644 index 0000000000000..24d69569205c3 --- /dev/null +++ b/crates/storage/provider/src/traits/static_file_provider.rs @@ -0,0 +1,7 @@ +use crate::providers::StaticFileProvider; + +/// Static file provider factory. +pub trait StaticFileProviderFactory { + /// Create new instance of static file provider. + fn static_file_provider(&self) -> StaticFileProvider; +} From 0f9658cfa17cc9dd97ccf8ee3b7ff855757c4b59 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 30 Apr 2024 13:27:56 +0200 Subject: [PATCH 402/700] chore: use `FnOnce` on `WithLaunchContext` methods (#7989) --- bin/reth/src/optimism.rs | 2 +- crates/node/builder/src/builder/mod.rs | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/bin/reth/src/optimism.rs b/bin/reth/src/optimism.rs index 0c0a483dd41f5..a651314b8c5f8 100644 --- a/bin/reth/src/optimism.rs +++ b/bin/reth/src/optimism.rs @@ -31,7 +31,7 @@ fn main() { .node(OptimismNode::new(rollup_args.clone())) .extend_rpc_modules(move |ctx| { // register sequencer tx forwarder - if let Some(sequencer_http) = rollup_args.sequencer_http.clone() { + if let Some(sequencer_http) = rollup_args.sequencer_http { ctx.registry.set_eth_raw_transaction_forwarder(Arc::new(SequencerClient::new( sequencer_http, ))); diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 10977360aef4b..7f898ca210bbc 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -316,7 +316,7 @@ where /// Sets the hook that is run once the node's components are initialized. pub fn on_component_initialized(self, hook: F) -> Self where - F: Fn(NodeAdapter, CB::Components>) -> eyre::Result<()> + F: FnOnce(NodeAdapter, CB::Components>) -> eyre::Result<()> + Send + 'static, { @@ -330,7 +330,9 @@ where /// Sets the hook that is run once the node has started. pub fn on_node_started(self, hook: F) -> Self where - F: Fn(FullNode, CB::Components>>) -> eyre::Result<()> + F: FnOnce( + FullNode, CB::Components>>, + ) -> eyre::Result<()> + Send + 'static, { @@ -344,7 +346,7 @@ where /// Sets the hook that is run once the rpc server is started. pub fn on_rpc_started(self, hook: F) -> Self where - F: Fn( + F: FnOnce( RpcContext<'_, NodeAdapter, CB::Components>>, RethRpcServerHandles, ) -> eyre::Result<()> @@ -361,7 +363,7 @@ where /// Sets the hook that is run to configure the rpc modules. pub fn extend_rpc_modules(self, hook: F) -> Self where - F: Fn( + F: FnOnce( RpcContext<'_, NodeAdapter, CB::Components>>, ) -> eyre::Result<()> + Send @@ -381,7 +383,7 @@ where /// The ExEx ID must be unique. pub fn install_exex(self, exex_id: impl Into, exex: F) -> Self where - F: Fn(ExExContext, CB::Components>>) -> R + F: FnOnce(ExExContext, CB::Components>>) -> R + Send + 'static, R: Future> + Send, From c3cdd8c646f123c281c9035a180676b975a740d4 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 30 Apr 2024 13:00:18 +0100 Subject: [PATCH 403/700] feat(examples): sqlite rollup exex (#7826) --- Cargo.lock | 27 + Cargo.toml | 1 + crates/primitives/src/block.rs | 12 +- .../storage/provider/src/bundle_state/mod.rs | 2 +- .../src/bundle_state/state_reverts.rs | 6 +- examples/README.md | 1 + examples/exex/rollup/Cargo.toml | 38 ++ examples/exex/rollup/rollup_abi.json | 626 ++++++++++++++++++ examples/exex/rollup/src/db.rs | 460 +++++++++++++ examples/exex/rollup/src/main.rs | 586 ++++++++++++++++ 10 files changed, 1753 insertions(+), 6 deletions(-) create mode 100644 examples/exex/rollup/Cargo.toml create mode 100644 examples/exex/rollup/rollup_abi.json create mode 100644 examples/exex/rollup/src/db.rs create mode 100644 examples/exex/rollup/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index b283e8eaf710b..c7eca223b4862 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2881,6 +2881,33 @@ dependencies = [ "tokio", ] +[[package]] +name = "exex-rollup" +version = "0.0.0" +dependencies = [ + "alloy-rlp", + "alloy-sol-types", + "eyre", + "futures", + "once_cell", + "reth", + "reth-cli-runner", + "reth-exex", + "reth-interfaces", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-tracing", + "reth-trie", + "rusqlite", + "secp256k1", + "serde_json", + "tokio", +] + [[package]] name = "eyre" version = "0.6.12" diff --git a/Cargo.toml b/Cargo.toml index 70e36cd94e7d2..e7bf2ec5c382b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -89,6 +89,7 @@ members = [ "examples/custom-inspector/", "examples/exex/minimal/", "examples/exex/op-bridge/", + "examples/exex/rollup/", "examples/db-access", "testing/ef-tests/", "testing/testing-utils", diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 864e7954f58bf..4b4831b9c1ed8 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -49,7 +49,7 @@ pub struct Block { } impl Block { - /// Create SealedBLock that will create all header hashes. + /// Calculate the header hash and seal the block so that it can't be changed. pub fn seal_slow(self) -> SealedBlock { SealedBlock { header: self.header.seal_slow(), @@ -175,7 +175,7 @@ impl TryFrom for Block { .collect(), reth_rpc_types::BlockTransactions::Hashes(_) | reth_rpc_types::BlockTransactions::Uncle => { - return Err(ConversionError::MissingFullTransactions); + return Err(ConversionError::MissingFullTransactions) } }; transactions? @@ -214,6 +214,12 @@ impl BlockWithSenders { SealedBlockWithSenders { block: block.seal(hash), senders } } + /// Calculate the header hash and seal the block with senders so that it can't be changed. + #[inline] + pub fn seal_slow(self) -> SealedBlockWithSenders { + SealedBlockWithSenders { block: self.block.seal_slow(), senders: self.senders } + } + /// Split Structure to its components #[inline] pub fn into_components(self) -> (Block, Vec
) { @@ -456,7 +462,7 @@ impl std::ops::DerefMut for SealedBlock { } /// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SealedBlockWithSenders { /// Sealed block pub block: SealedBlock, diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs index 3f5da6ec62420..5df4a213aaf92 100644 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ b/crates/storage/provider/src/bundle_state/mod.rs @@ -10,4 +10,4 @@ pub use bundle_state_with_receipts::{ }; pub use hashed_state_changes::HashedStateChanges; pub use state_changes::StateChanges; -pub use state_reverts::StateReverts; +pub use state_reverts::{StateReverts, StorageRevertsIter}; diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index e61572cf5b614..006f87b4053c2 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -108,7 +108,8 @@ impl StateReverts { /// Iterator over storage reverts. /// See [StorageRevertsIter::next] for more details. -struct StorageRevertsIter { +#[allow(missing_debug_implementations)] +pub struct StorageRevertsIter { reverts: Peekable, wiped: Peekable, } @@ -118,7 +119,8 @@ where R: Iterator, W: Iterator, { - fn new( + /// Create a new iterator over storage reverts. + pub fn new( reverts: impl IntoIterator, wiped: impl IntoIterator, ) -> Self { diff --git a/examples/README.md b/examples/README.md index ea2c87c1bb355..0885aa294e7ab 100644 --- a/examples/README.md +++ b/examples/README.md @@ -27,6 +27,7 @@ to make a PR! | ---------------------------------- | --------------------------------------------------------------------------------- | | [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | | [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | +| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | ## RPC diff --git a/examples/exex/rollup/Cargo.toml b/examples/exex/rollup/Cargo.toml new file mode 100644 index 0000000000000..8d338c241e720 --- /dev/null +++ b/examples/exex/rollup/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "exex-rollup" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +# reth +reth.workspace = true +reth-cli-runner.workspace = true +reth-exex.workspace = true +reth-interfaces.workspace = true +reth-node-api.workspace = true +reth-node-core.workspace = true +reth-node-ethereum.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-tracing.workspace = true +reth-trie.workspace = true + +# async +tokio.workspace = true +futures.workspace = true + +# misc +alloy-sol-types = { workspace = true, features = ["json"] } +alloy-rlp.workspace = true +eyre.workspace = true +rusqlite = { version = "0.31.0", features = ["bundled"] } +serde_json.workspace = true +once_cell.workspace = true + +[dev-dependencies] +reth-interfaces = { workspace = true, features = ["test-utils"] } +secp256k1.workspace = true + diff --git a/examples/exex/rollup/rollup_abi.json b/examples/exex/rollup/rollup_abi.json new file mode 100644 index 0000000000000..08bc23f0e7829 --- /dev/null +++ b/examples/exex/rollup/rollup_abi.json @@ -0,0 +1,626 @@ +[ + { + "inputs": [ + { "internalType": "address", "name": "admin", "type": "address" } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { "inputs": [], "name": "AccessControlBadConfirmation", "type": "error" }, + { + "inputs": [ + { "internalType": "uint48", "name": "schedule", "type": "uint48" } + ], + "name": "AccessControlEnforcedDefaultAdminDelay", + "type": "error" + }, + { + "inputs": [], + "name": "AccessControlEnforcedDefaultAdminRules", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "defaultAdmin", + "type": "address" + } + ], + "name": "AccessControlInvalidDefaultAdmin", + "type": "error" + }, + { + "inputs": [ + { "internalType": "address", "name": "account", "type": "address" }, + { + "internalType": "bytes32", + "name": "neededRole", + "type": "bytes32" + } + ], + "name": "AccessControlUnauthorizedAccount", + "type": "error" + }, + { + "inputs": [ + { "internalType": "uint256", "name": "expected", "type": "uint256" } + ], + "name": "BadSequence", + "type": "error" + }, + { "inputs": [], "name": "BadSignature", "type": "error" }, + { "inputs": [], "name": "BlockExpired", "type": "error" }, + { + "inputs": [ + { + "internalType": "address", + "name": "sequencer", + "type": "address" + } + ], + "name": "NotSequencer", + "type": "error" + }, + { "inputs": [], "name": "OrderExpired", "type": "error" }, + { + "inputs": [ + { "internalType": "uint8", "name": "bits", "type": "uint8" }, + { "internalType": "uint256", "name": "value", "type": "uint256" } + ], + "name": "SafeCastOverflowedUintDowncast", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sequencer", + "type": "address" + }, + { + "components": [ + { + "internalType": "uint256", + "name": "rollupChainId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "sequence", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "confirmBy", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "address", + "name": "rewardAddress", + "type": "address" + } + ], + "indexed": true, + "internalType": "struct CalldataZenith.BlockHeader", + "name": "header", + "type": "tuple" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "blockData", + "type": "bytes" + } + ], + "name": "BlockSubmitted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [], + "name": "DefaultAdminDelayChangeCanceled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint48", + "name": "newDelay", + "type": "uint48" + }, + { + "indexed": false, + "internalType": "uint48", + "name": "effectSchedule", + "type": "uint48" + } + ], + "name": "DefaultAdminDelayChangeScheduled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [], + "name": "DefaultAdminTransferCanceled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "newAdmin", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint48", + "name": "acceptSchedule", + "type": "uint48" + } + ], + "name": "DefaultAdminTransferScheduled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "rollupRecipient", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "Enter", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "hostRecipient", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "ExitFilled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "previousAdminRole", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "newAdminRole", + "type": "bytes32" + } + ], + "name": "RoleAdminChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "RoleGranted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "RoleRevoked", + "type": "event" + }, + { "stateMutability": "payable", "type": "fallback" }, + { + "inputs": [], + "name": "DEFAULT_ADMIN_ROLE", + "outputs": [ + { "internalType": "bytes32", "name": "", "type": "bytes32" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "SEQUENCER_ROLE", + "outputs": [ + { "internalType": "bytes32", "name": "", "type": "bytes32" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "acceptDefaultAdminTransfer", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "address", "name": "newAdmin", "type": "address" } + ], + "name": "beginDefaultAdminTransfer", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "rollupChainId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "sequence", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "confirmBy", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "address", + "name": "rewardAddress", + "type": "address" + } + ], + "internalType": "struct CalldataZenith.BlockHeader", + "name": "header", + "type": "tuple" + }, + { "internalType": "bytes", "name": "blockData", "type": "bytes" } + ], + "name": "blockCommitment", + "outputs": [ + { "internalType": "bytes32", "name": "commit", "type": "bytes32" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "cancelDefaultAdminTransfer", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "uint48", "name": "newDelay", "type": "uint48" } + ], + "name": "changeDefaultAdminDelay", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "defaultAdmin", + "outputs": [ + { "internalType": "address", "name": "", "type": "address" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "defaultAdminDelay", + "outputs": [{ "internalType": "uint48", "name": "", "type": "uint48" }], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "defaultAdminDelayIncreaseWait", + "outputs": [{ "internalType": "uint48", "name": "", "type": "uint48" }], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "rollupRecipient", + "type": "address" + } + ], + "name": "enter", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + } + ], + "internalType": "struct HostPassage.ExitOrder[]", + "name": "orders", + "type": "tuple[]" + } + ], + "name": "fulfillExits", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" } + ], + "name": "getRoleAdmin", + "outputs": [ + { "internalType": "bytes32", "name": "", "type": "bytes32" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" }, + { "internalType": "address", "name": "account", "type": "address" } + ], + "name": "grantRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" }, + { "internalType": "address", "name": "account", "type": "address" } + ], + "name": "hasRole", + "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { "internalType": "uint256", "name": "", "type": "uint256" } + ], + "name": "nextSequence", + "outputs": [ + { "internalType": "uint256", "name": "", "type": "uint256" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { "internalType": "address", "name": "", "type": "address" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pendingDefaultAdmin", + "outputs": [ + { + "internalType": "address", + "name": "newAdmin", + "type": "address" + }, + { "internalType": "uint48", "name": "schedule", "type": "uint48" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pendingDefaultAdminDelay", + "outputs": [ + { "internalType": "uint48", "name": "newDelay", "type": "uint48" }, + { "internalType": "uint48", "name": "schedule", "type": "uint48" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" }, + { "internalType": "address", "name": "account", "type": "address" } + ], + "name": "renounceRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" }, + { "internalType": "address", "name": "account", "type": "address" } + ], + "name": "revokeRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "rollbackDefaultAdminDelay", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "rollupChainId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "sequence", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "confirmBy", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "address", + "name": "rewardAddress", + "type": "address" + } + ], + "internalType": "struct CalldataZenith.BlockHeader", + "name": "header", + "type": "tuple" + }, + { "internalType": "bytes", "name": "blockData", "type": "bytes" }, + { "internalType": "uint8", "name": "v", "type": "uint8" }, + { "internalType": "bytes32", "name": "r", "type": "bytes32" }, + { "internalType": "bytes32", "name": "s", "type": "bytes32" } + ], + "name": "submitBlock", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes4", + "name": "interfaceId", + "type": "bytes4" + } + ], + "name": "supportsInterface", + "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], + "stateMutability": "view", + "type": "function" + }, + { "stateMutability": "payable", "type": "receive" } +] diff --git a/examples/exex/rollup/src/db.rs b/examples/exex/rollup/src/db.rs new file mode 100644 index 0000000000000..39c2b418b81e9 --- /dev/null +++ b/examples/exex/rollup/src/db.rs @@ -0,0 +1,460 @@ +use std::{ + collections::{hash_map::Entry, HashMap}, + str::FromStr, + sync::{Arc, Mutex, MutexGuard}, +}; + +use reth_primitives::{ + revm_primitives::{AccountInfo, Bytecode}, + Address, Bytes, SealedBlockWithSenders, StorageEntry, B256, U256, +}; +use reth_provider::{bundle_state::StorageRevertsIter, OriginalValuesKnown}; +use reth_revm::db::{ + states::{PlainStorageChangeset, PlainStorageRevert}, + BundleState, +}; +use rusqlite::Connection; + +/// Type used to initialize revms bundle state. +type BundleStateInit = + HashMap, Option, HashMap)>; + +/// Types used inside RevertsInit to initialize revms reverts. +pub type AccountRevertInit = (Option>, Vec); + +/// Type used to initialize revms reverts. +pub type RevertsInit = HashMap; + +pub struct Database { + connection: Arc>, +} + +impl Database { + /// Create new database with the provided connection. + pub fn new(connection: Connection) -> eyre::Result { + let database = Self { connection: Arc::new(Mutex::new(connection)) }; + database.create_tables()?; + Ok(database) + } + + fn connection(&self) -> MutexGuard<'_, Connection> { + self.connection.lock().expect("failed to acquire database lock") + } + + fn create_tables(&self) -> eyre::Result<()> { + self.connection().execute_batch( + "CREATE TABLE IF NOT EXISTS block ( + id INTEGER PRIMARY KEY, + number TEXT UNIQUE, + data TEXT + ); + CREATE TABLE IF NOT EXISTS account ( + id INTEGER PRIMARY KEY, + address TEXT UNIQUE, + data TEXT + ); + CREATE TABLE IF NOT EXISTS account_revert ( + id INTEGER PRIMARY KEY, + block_number TEXT, + address TEXT, + data TEXT, + UNIQUE (block_number, address) + ); + CREATE TABLE IF NOT EXISTS storage ( + id INTEGER PRIMARY KEY, + address TEXT, + key TEXT, + data TEXT, + UNIQUE (address, key) + ); + CREATE TABLE IF NOT EXISTS storage_revert ( + id INTEGER PRIMARY KEY, + block_number TEXT, + address TEXT, + key TEXT, + data TEXT, + UNIQUE (block_number, address, key) + ); + CREATE TABLE IF NOT EXISTS bytecode ( + id INTEGER PRIMARY KEY, + hash TEXT UNIQUE, + data TEXT + );", + )?; + Ok(()) + } + + /// Insert block with bundle into the database. + pub fn insert_block_with_bundle( + &self, + block: &SealedBlockWithSenders, + bundle: BundleState, + ) -> eyre::Result<()> { + let mut connection = self.connection(); + let tx = connection.transaction()?; + + tx.execute( + "INSERT INTO block (number, data) VALUES (?, ?)", + (block.header.number.to_string(), serde_json::to_string(block)?), + )?; + + let (changeset, reverts) = bundle.into_plain_state_and_reverts(OriginalValuesKnown::Yes); + + for (address, account) in changeset.accounts { + if let Some(account) = account { + tx.execute( + "INSERT INTO account (address, data) VALUES (?, ?) ON CONFLICT(address) DO UPDATE SET data = excluded.data", + (address.to_string(), serde_json::to_string(&account)?), + )?; + } else { + tx.execute("DELETE FROM account WHERE address = ?", (address.to_string(),))?; + } + } + + if reverts.accounts.len() > 1 { + eyre::bail!("too many blocks in account reverts"); + } + for (address, account) in + reverts.accounts.first().ok_or(eyre::eyre!("no account reverts"))? + { + tx.execute( + "INSERT INTO account_revert (block_number, address, data) VALUES (?, ?, ?) ON CONFLICT(block_number, address) DO UPDATE SET data = excluded.data", + (block.header.number.to_string(), address.to_string(), serde_json::to_string(account)?), + )?; + } + + for PlainStorageChangeset { address, wipe_storage, storage } in changeset.storage { + if wipe_storage { + tx.execute("DELETE FROM storage WHERE address = ?", (address.to_string(),))?; + } + + for (key, data) in storage { + tx.execute( + "INSERT INTO storage (address, key, data) VALUES (?, ?, ?) ON CONFLICT(address, key) DO UPDATE SET data = excluded.data", + (address.to_string(), B256::from(key).to_string(), data.to_string()), + )?; + } + } + + if reverts.storage.len() > 1 { + eyre::bail!("too many blocks in storage reverts"); + } + for PlainStorageRevert { address, wiped, storage_revert } in + reverts.storage.into_iter().next().ok_or(eyre::eyre!("no storage reverts"))? + { + let storage = storage_revert + .into_iter() + .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) + .collect::>(); + let wiped_storage = if wiped { get_storages(&tx, address)? } else { Vec::new() }; + for (key, data) in StorageRevertsIter::new(storage, wiped_storage) { + tx.execute( + "INSERT INTO storage_revert (block_number, address, key, data) VALUES (?, ?, ?, ?) ON CONFLICT(block_number, address, key) DO UPDATE SET data = excluded.data", + (block.header.number.to_string(), address.to_string(), key.to_string(), data.to_string()), + )?; + } + } + + for (hash, bytecode) in changeset.contracts { + tx.execute( + "INSERT INTO bytecode (hash, data) VALUES (?, ?) ON CONFLICT(hash) DO NOTHING", + (hash.to_string(), bytecode.bytes().to_string()), + )?; + } + + tx.commit()?; + + Ok(()) + } + + /// Reverts the tip block from the database, checking it against the provided block number. + /// + /// The code is adapted from + pub fn revert_tip_block(&self, block_number: U256) -> eyre::Result<()> { + let mut connection = self.connection(); + let tx = connection.transaction()?; + + let tip_block_number = tx + .query_row::( + "SELECT number FROM block ORDER BY number DESC LIMIT 1", + [], + |row| row.get(0), + ) + .map(|data| U256::from_str(&data))??; + if block_number != tip_block_number { + eyre::bail!("Reverts can only be done from the tip. Attempted to revert block {} with tip block {}", block_number, tip_block_number); + } + + tx.execute("DELETE FROM block WHERE number = ?", (block_number.to_string(),))?; + + let mut state = BundleStateInit::new(); + let mut reverts = RevertsInit::new(); + + let account_reverts = tx + .prepare("SELECT address, data FROM account_revert WHERE block_number = ?")? + .query((block_number.to_string(),))? + .mapped(|row| { + Ok(( + Address::from_str(row.get_ref(0)?.as_str()?), + serde_json::from_str::>(row.get_ref(1)?.as_str()?), + )) + }) + .map(|result| { + let (address, data) = result?; + Ok((address?, data?)) + }) + .collect::>>()?; + + for (address, old_info) in account_reverts { + // insert old info into reverts + reverts.entry(address).or_default().0 = Some(old_info.clone()); + + match state.entry(address) { + Entry::Vacant(entry) => { + let new_info = get_account(&tx, address)?; + entry.insert((old_info, new_info, HashMap::new())); + } + Entry::Occupied(mut entry) => { + // overwrite old account state + entry.get_mut().0 = old_info; + } + } + } + + let storage_reverts = tx + .prepare("SELECT address, key, data FROM storage_revert WHERE block_number = ?")? + .query((block_number.to_string(),))? + .mapped(|row| { + Ok(( + Address::from_str(row.get_ref(0)?.as_str()?), + B256::from_str(row.get_ref(1)?.as_str()?), + U256::from_str(row.get_ref(2)?.as_str()?), + )) + }) + .map(|result| { + let (address, key, data) = result?; + Ok((address?, key?, data?)) + }) + .collect::>>()?; + + for (address, key, old_data) in storage_reverts.into_iter().rev() { + let old_storage = StorageEntry { key, value: old_data }; + + // insert old info into reverts + reverts.entry(address).or_default().1.push(old_storage); + + // get account state or insert from plain state + let account_state = match state.entry(address) { + Entry::Vacant(entry) => { + let present_info = get_account(&tx, address)?; + entry.insert((present_info.clone(), present_info, HashMap::new())) + } + Entry::Occupied(entry) => entry.into_mut(), + }; + + // match storage + match account_state.2.entry(old_storage.key) { + Entry::Vacant(entry) => { + let new_value = get_storage(&tx, address, old_storage.key)?.unwrap_or_default(); + entry.insert((old_storage.value, new_value)); + } + Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; + } + }; + } + + // iterate over local plain state remove all account and all storages + for (address, (old_account, new_account, storage)) in state { + // revert account if needed + if old_account != new_account { + if let Some(account) = old_account { + upsert_account(&tx, address, |_| Ok(account))?; + } else { + delete_account(&tx, address)?; + } + } + + // revert storages + for (storage_key, (old_storage_value, _new_storage_value)) in storage { + // delete previous value + delete_storage(&tx, address, storage_key)?; + + // insert value if needed + if !old_storage_value.is_zero() { + upsert_storage(&tx, address, storage_key, old_storage_value)?; + } + } + } + + tx.commit()?; + + Ok(()) + } + + /// Get block by number. + pub fn get_block(&self, number: U256) -> eyre::Result> { + let block = self.connection().query_row::( + "SELECT data FROM block WHERE number = ?", + (number.to_string(),), + |row| row.get(0), + ); + match block { + Ok(data) => Ok(Some(serde_json::from_str(&data)?)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } + } + + /// Insert new account if it does not exist, update otherwise. The provided closure is called + /// with the current account, if it exists. + pub fn upsert_account( + &self, + address: Address, + f: impl FnOnce(Option) -> eyre::Result, + ) -> eyre::Result<()> { + upsert_account(&self.connection(), address, f) + } + + /// Get account by address. + pub fn get_account(&self, address: Address) -> eyre::Result> { + get_account(&self.connection(), address) + } +} + +/// Insert new account if it does not exist, update otherwise. The provided closure is called +/// with the current account, if it exists. Connection can be either +/// [rusqlite::Transaction] or [rusqlite::Connection]. +fn upsert_account( + connection: &Connection, + address: Address, + f: impl FnOnce(Option) -> eyre::Result, +) -> eyre::Result<()> { + let account = get_account(connection, address)?; + let account = f(account)?; + connection.execute( + "INSERT INTO account (address, data) VALUES (?, ?) ON CONFLICT(address) DO UPDATE SET data = excluded.data", + (address.to_string(), serde_json::to_string(&account)?), + )?; + + Ok(()) +} + +/// Delete account by address. Connection can be either [rusqlite::Transaction] or +/// [rusqlite::Connection]. +fn delete_account(connection: &Connection, address: Address) -> eyre::Result<()> { + connection.execute("DELETE FROM account WHERE address = ?", (address.to_string(),))?; + Ok(()) +} + +/// Get account by address using the database connection. Connection can be either +/// [rusqlite::Transaction] or [rusqlite::Connection]. +fn get_account(connection: &Connection, address: Address) -> eyre::Result> { + match connection.query_row::( + "SELECT data FROM account WHERE address = ?", + (address.to_string(),), + |row| row.get(0), + ) { + Ok(account_info) => Ok(Some(serde_json::from_str(&account_info)?)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } +} + +/// Insert new storage if it does not exist, update otherwise. Connection can be either +/// [rusqlite::Transaction] or [rusqlite::Connection]. +fn upsert_storage( + connection: &Connection, + address: Address, + key: B256, + data: U256, +) -> eyre::Result<()> { + connection.execute( + "INSERT INTO storage (address, key, data) VALUES (?, ?, ?) ON CONFLICT(address, key) DO UPDATE SET data = excluded.data", + (address.to_string(), key.to_string(), data.to_string()), + )?; + Ok(()) +} + +/// Delete storage by address and key. Connection can be either [rusqlite::Transaction] or +/// [rusqlite::Connection]. +fn delete_storage(connection: &Connection, address: Address, key: B256) -> eyre::Result<()> { + connection.execute( + "DELETE FROM storage WHERE address = ? AND key = ?", + (address.to_string(), key.to_string()), + )?; + Ok(()) +} + +/// Get all storages for the provided address using the database connection. Connection can be +/// either [rusqlite::Transaction] or [rusqlite::Connection]. +fn get_storages(connection: &Connection, address: Address) -> eyre::Result> { + connection + .prepare("SELECT key, data FROM storage WHERE address = ?")? + .query((address.to_string(),))? + .mapped(|row| { + Ok(( + B256::from_str(row.get_ref(0)?.as_str()?), + U256::from_str(row.get_ref(1)?.as_str()?), + )) + }) + .map(|result| { + let (key, data) = result?; + Ok((key?, data?)) + }) + .collect() +} + +/// Get storage for the provided address by key using the database connection. Connection can be +/// either [rusqlite::Transaction] or [rusqlite::Connection]. +fn get_storage(connection: &Connection, address: Address, key: B256) -> eyre::Result> { + match connection.query_row::( + "SELECT data FROM storage WHERE address = ? AND key = ?", + (address.to_string(), key.to_string()), + |row| row.get(0), + ) { + Ok(data) => Ok(Some(U256::from_str(&data)?)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } +} + +impl reth_revm::Database for Database { + type Error = eyre::Report; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + self.get_account(address) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + let bytecode = self.connection().query_row::( + "SELECT data FROM bytecode WHERE hash = ?", + (code_hash.to_string(),), + |row| row.get(0), + ); + match bytecode { + Ok(data) => Ok(Bytecode::new_raw(Bytes::from_str(&data).unwrap())), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(Bytecode::default()), + Err(err) => Err(err.into()), + } + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + get_storage(&self.connection(), address, index.into()).map(|data| data.unwrap_or_default()) + } + + fn block_hash(&mut self, number: U256) -> Result { + let block_hash = self.connection().query_row::( + "SELECT hash FROM block WHERE number = ?", + (number.to_string(),), + |row| row.get(0), + ); + match block_hash { + Ok(data) => Ok(B256::from_str(&data).unwrap()), + // No special handling for `QueryReturnedNoRows` is needed, because revm does block + // number bound checks on its own. + // See https://github.com/bluealloy/revm/blob/1ca3d39f6a9e9778f8eb0fcb74fe529345a531b4/crates/interpreter/src/instructions/host.rs#L106-L123. + Err(err) => Err(err.into()), + } + } +} diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs new file mode 100644 index 0000000000000..cd2b0c94d3445 --- /dev/null +++ b/examples/exex/rollup/src/main.rs @@ -0,0 +1,586 @@ +//! Example of a simple rollup that derives its state from the L1 chain by executing transactions, +//! processing deposits and storing all related data in an SQLite database. +//! +//! The rollup contract accepts blocks of transactions and deposits of ETH and is deployed on +//! Holesky at [ROLLUP_CONTRACT_ADDRESS], see . + +use alloy_rlp::Decodable; +use alloy_sol_types::{sol, SolEventInterface, SolInterface}; +use db::Database; +use eyre::OptionExt; +use once_cell::sync::Lazy; +use reth_exex::{ExExContext, ExExEvent}; +use reth_interfaces::executor::BlockValidationError; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeComponents}; +use reth_node_ethereum::{EthEvmConfig, EthereumNode}; +use reth_primitives::{ + address, constants, + revm::env::fill_tx_env, + revm_primitives::{CfgEnvWithHandlerCfg, EVMError, ExecutionResult, ResultAndState}, + Address, Block, BlockWithSenders, Bytes, ChainSpec, ChainSpecBuilder, Genesis, Hardfork, + Header, Receipt, SealedBlockWithSenders, TransactionSigned, U256, +}; +use reth_provider::Chain; +use reth_revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + DatabaseCommit, StateBuilder, +}; +use reth_tracing::tracing::{debug, error, info}; +use rusqlite::Connection; +use std::sync::Arc; + +mod db; + +sol!(RollupContract, "rollup_abi.json"); +use RollupContract::{RollupContractCalls, RollupContractEvents}; + +const DATABASE_PATH: &str = "rollup.db"; +const ROLLUP_CONTRACT_ADDRESS: Address = address!("74ae65DF20cB0e3BF8c022051d0Cdd79cc60890C"); +const ROLLUP_SUBMITTER_ADDRESS: Address = address!("B01042Db06b04d3677564222010DF5Bd09C5A947"); +const CHAIN_ID: u64 = 17001; +static CHAIN_SPEC: Lazy> = Lazy::new(|| { + Arc::new( + ChainSpecBuilder::default() + .chain(CHAIN_ID.into()) + .genesis(Genesis::clique_genesis(CHAIN_ID, ROLLUP_SUBMITTER_ADDRESS)) + .shanghai_activated() + .build(), + ) +}); + +struct Rollup { + ctx: ExExContext, + db: Database, +} + +impl Rollup { + fn new(ctx: ExExContext, connection: Connection) -> eyre::Result { + let db = Database::new(connection)?; + Ok(Self { ctx, db }) + } + + async fn start(mut self) -> eyre::Result<()> { + // Process all new chain state notifications + while let Some(notification) = self.ctx.notifications.recv().await { + if let Some(reverted_chain) = notification.reverted_chain() { + self.revert(&reverted_chain)?; + } + + if let Some(committed_chain) = notification.committed_chain() { + self.commit(&committed_chain)?; + self.ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + + Ok(()) + } + + /// Process a new chain commit. + /// + /// This function decodes all transactions to the rollup contract into events, executes the + /// corresponding actions and inserts the results into the database. + fn commit(&mut self, chain: &Chain) -> eyre::Result<()> { + let events = decode_chain_into_rollup_events(chain); + + for (_, tx, event) in events { + match event { + // A new block is submitted to the rollup contract. + // The block is executed on top of existing rollup state and committed into the + // database. + RollupContractEvents::BlockSubmitted(_) => { + let call = RollupContractCalls::abi_decode(tx.input(), true)?; + + if let RollupContractCalls::submitBlock(RollupContract::submitBlockCall { + header, + blockData, + .. + }) = call + { + match execute_block(&mut self.db, &header, blockData) { + Ok((block, bundle, _, _)) => { + let block = block.seal_slow(); + self.db.insert_block_with_bundle(&block, bundle)?; + info!( + tx_hash = %tx.hash, + chain_id = %header.rollupChainId, + sequence = %header.sequence, + transactions = block.body.len(), + "Block submitted, executed and inserted into database" + ); + } + Err(err) => { + error!( + %err, + tx_hash = %tx.hash, + chain_id = %header.rollupChainId, + sequence = %header.sequence, + "Failed to execute block" + ); + } + } + } + } + // A deposit of ETH to the rollup contract. The deposit is added to the recipient's + // balance and committed into the database. + RollupContractEvents::Enter(RollupContract::Enter { + token, + rollupRecipient, + amount, + }) => { + if token != Address::ZERO { + error!(tx_hash = %tx.hash, "Only ETH deposits are supported"); + continue + } + + self.db.upsert_account(rollupRecipient, |account| { + let mut account = account.unwrap_or_default(); + account.balance += amount; + Ok(account) + })?; + + info!( + tx_hash = %tx.hash, + %amount, + recipient = %rollupRecipient, + "Deposit", + ); + } + _ => (), + } + } + + Ok(()) + } + + /// Process a chain revert. + /// + /// This function decodes all transactions to the rollup contract into events, reverts the + /// corresponding actions and updates the database. + fn revert(&mut self, chain: &Chain) -> eyre::Result<()> { + let mut events = decode_chain_into_rollup_events(chain); + // Reverse the order of events to start reverting from the tip + events.reverse(); + + for (_, tx, event) in events { + match event { + // The block is reverted from the database. + RollupContractEvents::BlockSubmitted(_) => { + let call = RollupContractCalls::abi_decode(tx.input(), true)?; + + if let RollupContractCalls::submitBlock(RollupContract::submitBlockCall { + header, + .. + }) = call + { + self.db.revert_tip_block(header.sequence)?; + info!( + tx_hash = %tx.hash, + chain_id = %header.rollupChainId, + sequence = %header.sequence, + "Block reverted" + ); + } + } + // The deposit is subtracted from the recipient's balance. + RollupContractEvents::Enter(RollupContract::Enter { + token, + rollupRecipient, + amount, + }) => { + if token != Address::ZERO { + error!(tx_hash = %tx.hash, "Only ETH deposits are supported"); + continue + } + + self.db.upsert_account(rollupRecipient, |account| { + let mut account = account.ok_or(eyre::eyre!("account not found"))?; + account.balance -= amount; + Ok(account) + })?; + + info!( + tx_hash = %tx.hash, + %amount, + recipient = %rollupRecipient, + "Deposit reverted", + ); + } + _ => (), + } + } + + Ok(()) + } +} + +/// Decode chain of blocks into a flattened list of receipt logs, filter only transactions to the +/// Rollup contract [ROLLUP_CONTRACT_ADDRESS] and extract [RollupContractEvents]. +fn decode_chain_into_rollup_events( + chain: &Chain, +) -> Vec<(&SealedBlockWithSenders, &TransactionSigned, RollupContractEvents)> { + chain + // Get all blocks and receipts + .blocks_and_receipts() + // Get all receipts + .flat_map(|(block, receipts)| { + block + .body + .iter() + .zip(receipts.iter().flatten()) + .map(move |(tx, receipt)| (block, tx, receipt)) + }) + // Filter only transactions to the rollup contract + .filter(|(_, tx, _)| tx.to() == Some(ROLLUP_CONTRACT_ADDRESS)) + // Get all logs + .flat_map(|(block, tx, receipt)| receipt.logs.iter().map(move |log| (block, tx, log))) + // Decode and filter rollup events + .filter_map(|(block, tx, log)| { + RollupContractEvents::decode_raw_log(log.topics(), &log.data.data, true) + .ok() + .map(|event| (block, tx, event)) + }) + .collect() +} + +/// Execute a rollup block and return (block with recovered senders)[BlockWithSenders], (bundle +/// state)[BundleState] and list of (receipts)[Receipt]. +fn execute_block( + db: &mut Database, + header: &RollupContract::BlockHeader, + block_data: Bytes, +) -> eyre::Result<(BlockWithSenders, BundleState, Vec, Vec)> { + if header.rollupChainId != U256::from(CHAIN_ID) { + eyre::bail!("Invalid rollup chain ID") + } + + let block_number = u64::try_from(header.sequence)?; + let parent_block = if !header.sequence.is_zero() { + db.get_block(header.sequence - U256::from(1))? + } else { + None + }; + + // Calculate base fee per gas for EIP-1559 transactions + let base_fee_per_gas = if CHAIN_SPEC.fork(Hardfork::London).transitions_at_block(block_number) { + constants::EIP1559_INITIAL_BASE_FEE + } else { + parent_block + .as_ref() + .ok_or(eyre::eyre!("parent block not found"))? + .header + .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) + .ok_or(eyre::eyre!("failed to calculate base fee"))? + }; + + // Construct header + let header = Header { + parent_hash: parent_block.map(|block| block.header.hash()).unwrap_or_default(), + number: block_number, + gas_limit: u64::try_from(header.gasLimit)?, + timestamp: u64::try_from(header.confirmBy)?, + base_fee_per_gas: Some(base_fee_per_gas), + ..Default::default() + }; + + // Decode block data, filter only transactions with the correct chain ID and recover senders + let transactions = Vec::::decode(&mut block_data.as_ref())? + .into_iter() + .filter(|tx| tx.chain_id() == Some(CHAIN_ID)) + .map(|tx| { + let sender = tx.recover_signer().ok_or(eyre::eyre!("failed to recover signer"))?; + Ok((tx, sender)) + }) + .collect::>>()?; + + // Execute block + let state = StateBuilder::new_with_database( + Box::new(db) as Box + Send> + ) + .with_bundle_update() + .build(); + let mut evm = EthEvmConfig::default().evm(state); + + // Set state clear flag. + evm.db_mut().set_state_clear_flag( + CHAIN_SPEC.fork(Hardfork::SpuriousDragon).active_at_block(header.number), + ); + + let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(evm.cfg().clone(), evm.spec_id()); + EthEvmConfig::fill_cfg_and_block_env( + &mut cfg, + evm.block_mut(), + &CHAIN_SPEC, + &header, + U256::ZERO, + ); + *evm.cfg_mut() = cfg.cfg_env; + + let mut receipts = Vec::with_capacity(transactions.len()); + let mut executed_txs = Vec::with_capacity(transactions.len()); + let mut results = Vec::with_capacity(transactions.len()); + if !transactions.is_empty() { + let mut cumulative_gas_used = 0; + for (transaction, sender) in transactions { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas { + // TODO(alexey): what to do here? + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + // Execute transaction. + // Fill revm structure. + fill_tx_env(evm.tx_mut(), &transaction, sender); + + let ResultAndState { result, state } = match evm.transact() { + Ok(result) => result, + Err(err) => { + match err { + EVMError::Transaction(err) => { + // if the transaction is invalid, we can skip it + debug!(%err, ?transaction, "Skipping invalid transaction"); + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + eyre::bail!(err) + } + } + } + }; + + debug!(?transaction, ?result, ?state, "Executed transaction"); + + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + #[allow(clippy::needless_update)] // side-effect of optimism fields + receipts.push(Receipt { + tx_type: transaction.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.logs().iter().cloned().map(Into::into).collect(), + ..Default::default() + }); + + // append transaction to the list of executed transactions + executed_txs.push(transaction); + results.push(result); + } + + evm.db_mut().merge_transitions(BundleRetention::Reverts); + } + + // Construct block and recover senders + let block = Block { header, body: executed_txs, ..Default::default() } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + let bundle = evm.db_mut().take_bundle(); + + Ok((block, bundle, receipts, results)) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("Rollup", move |ctx| async { + let connection = Connection::open(DATABASE_PATH)?; + + Ok(Rollup::new(ctx, connection)?.start()) + }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} + +#[cfg(test)] +mod tests { + use std::time::{SystemTime, UNIX_EPOCH}; + + use alloy_sol_types::{sol, SolCall}; + use reth_interfaces::test_utils::generators::{self, sign_tx_with_key_pair}; + use reth_primitives::{ + bytes, + constants::ETH_TO_WEI, + public_key_to_address, + revm_primitives::{AccountInfo, ExecutionResult, Output, TransactTo, TxEnv}, + BlockNumber, Receipt, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, + }; + use reth_revm::Evm; + use rusqlite::Connection; + use secp256k1::{Keypair, Secp256k1}; + + use crate::{ + db::Database, execute_block, RollupContract::BlockHeader, CHAIN_ID, + ROLLUP_SUBMITTER_ADDRESS, + }; + + sol!( + WETH, + r#" +[ + { + "constant":true, + "inputs":[ + { + "name":"", + "type":"address" + } + ], + "name":"balanceOf", + "outputs":[ + { + "name":"", + "type":"uint256" + } + ], + "payable":false, + "stateMutability":"view", + "type":"function" + } +] + "# + ); + + #[test] + fn test_execute_block() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let mut database = Database::new(Connection::open_in_memory()?)?; + + // Create key pair + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut generators::rng()); + let sender_address = public_key_to_address(key_pair.public_key()); + + // Deposit some ETH to the sender and insert it into database + database.upsert_account(sender_address, |_| { + Ok(AccountInfo { balance: U256::from(ETH_TO_WEI), nonce: 1, ..Default::default() }) + })?; + + // WETH deployment transaction + let (_, _, results) = execute_transaction( + &mut database, + key_pair, + 0, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 1, + gas_limit: 1_500_000, + gas_price: 1_500_000_000, + to: TxKind::Create, + // WETH9 bytecode + input: bytes!("60606040526040805190810160405280600d81526020017f57726170706564204574686572000000000000000000000000000000000000008152506000908051906020019061004f9291906100c8565b506040805190810160405280600481526020017f57455448000000000000000000000000000000000000000000000000000000008152506001908051906020019061009b9291906100c8565b506012600260006101000a81548160ff021916908360ff16021790555034156100c357600080fd5b61016d565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061010957805160ff1916838001178555610137565b82800160010185558215610137579182015b8281111561013657825182559160200191906001019061011b565b5b5090506101449190610148565b5090565b61016a91905b8082111561016657600081600090555060010161014e565b5090565b90565b610c348061017c6000396000f3006060604052600436106100af576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100b9578063095ea7b31461014757806318160ddd146101a157806323b872dd146101ca5780632e1a7d4d14610243578063313ce5671461026657806370a082311461029557806395d89b41146102e2578063a9059cbb14610370578063d0e30db0146103ca578063dd62ed3e146103d4575b6100b7610440565b005b34156100c457600080fd5b6100cc6104dd565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561010c5780820151818401526020810190506100f1565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610187600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061057b565b604051808215151515815260200191505060405180910390f35b34156101ac57600080fd5b6101b461066d565b6040518082815260200191505060405180910390f35b34156101d557600080fd5b610229600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061068c565b604051808215151515815260200191505060405180910390f35b341561024e57600080fd5b61026460048080359060200190919050506109d9565b005b341561027157600080fd5b610279610b05565b604051808260ff1660ff16815260200191505060405180910390f35b34156102a057600080fd5b6102cc600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610b18565b6040518082815260200191505060405180910390f35b34156102ed57600080fd5b6102f5610b30565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561033557808201518184015260208101905061031a565b50505050905090810190601f1680156103625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037b57600080fd5b6103b0600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610bce565b604051808215151515815260200191505060405180910390f35b6103d2610440565b005b34156103df57600080fd5b61042a600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610be3565b6040518082815260200191505060405180910390f35b34600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055503373ffffffffffffffffffffffffffffffffffffffff167fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c346040518082815260200191505060405180910390a2565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105735780601f1061054857610100808354040283529160200191610573565b820191906000526020600020905b81548152906001019060200180831161055657829003601f168201915b505050505081565b600081600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1631905090565b600081600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054101515156106dc57600080fd5b3373ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141580156107b457507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205414155b156108cf5781600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561084457600080fd5b81600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055505b81600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600360008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610a2757600080fd5b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f193505050501515610ab457600080fd5b3373ffffffffffffffffffffffffffffffffffffffff167f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65826040518082815260200191505060405180910390a250565b600260009054906101000a900460ff1681565b60036020528060005260406000206000915090505481565b60018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610bc65780601f10610b9b57610100808354040283529160200191610bc6565b820191906000526020600020905b815481529060010190602001808311610ba957829003601f168201915b505050505081565b6000610bdb33848461068c565b905092915050565b60046020528160005260406000206020528060005260406000206000915091505054815600a165627a7a72305820deb4c2ccab3c2fdca32ab3f46728389c2fe2c165d5fafa07661e4e004f6c344a0029"), + ..Default::default() + }) + )?; + + let weth_address = match results.first() { + Some(ExecutionResult::Success { output: Output::Create(_, Some(address)), .. }) => { + *address + } + _ => eyre::bail!("WETH contract address not found"), + }; + + // WETH deposit transaction + execute_transaction( + &mut database, + key_pair, + 1, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 2, + gas_limit: 50000, + gas_price: 1_500_000_000, + to: TxKind::Call(weth_address), + value: U256::from(0.5 * ETH_TO_WEI as f64), + input: bytes!("d0e30db0"), + ..Default::default() + }), + )?; + + // Verify WETH balance + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!( + result.output(), + Some(&U256::from(0.5 * ETH_TO_WEI as f64).to_be_bytes_vec().into()) + ); + drop(evm); + + // Verify nonce + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 3); + + // Revert block with WETH deposit transaction + database.revert_tip_block(U256::from(1))?; + + // Verify WETH balance after revert + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!(result.output(), Some(&U256::ZERO.to_be_bytes_vec().into())); + drop(evm); + + // Verify nonce after revert + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 2); + + Ok(()) + } + + fn execute_transaction( + database: &mut Database, + key_pair: Keypair, + sequence: BlockNumber, + tx: Transaction, + ) -> eyre::Result<(SealedBlockWithSenders, Vec, Vec)> { + let signed_tx = sign_tx_with_key_pair(key_pair, tx); + + // Construct block header and data + let block_header = BlockHeader { + rollupChainId: U256::from(CHAIN_ID), + sequence: U256::from(sequence), + confirmBy: U256::from(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()), + gasLimit: U256::from(30_000_000), + rewardAddress: ROLLUP_SUBMITTER_ADDRESS, + }; + let block_data = alloy_rlp::encode(vec![signed_tx.envelope_encoded()]); + + // Execute block and insert into database + let (block, bundle, receipts, results) = + execute_block(database, &block_header, block_data.into())?; + let block = block.seal_slow(); + database.insert_block_with_bundle(&block, bundle)?; + + Ok((block, receipts, results)) + } +} From a6661d695324b17e4bb59d7af1e343953fbadcab Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 30 Apr 2024 13:19:39 +0100 Subject: [PATCH 404/700] chore(exex): display `exex_id` log field using `Display` (#7994) --- crates/exex/src/manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 81e523718cf8b..1037395b8544d 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -331,7 +331,7 @@ impl Future for ExExManager { // handle incoming exex events for exex in self.exex_handles.iter_mut() { while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) { - debug!(exex_id = exex.id, ?event, "Received event from exex"); + debug!(exex_id = %exex.id, ?event, "Received event from exex"); exex.metrics.events_sent_total.increment(1); match event { ExExEvent::FinishedHeight(height) => exex.finished_height = Some(height), From 996f1efb731473d7c4c0fe3b0b5e96777a26e376 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 30 Apr 2024 13:25:25 +0100 Subject: [PATCH 405/700] docs(storage): chain of blocks should not be empty (#7809) Co-authored-by: Oliver Nordbjerg --- crates/storage/provider/src/chain.rs | 35 +++++++++++++++++++++------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 5acd845997483..a596d93eace4d 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -16,6 +16,10 @@ use std::{borrow::Cow, collections::BTreeMap, fmt, ops::RangeInclusive}; /// changesets for those blocks (and their transactions), as well as the blocks themselves. /// /// Used inside the BlockchainTree. +/// +/// # Warning +/// +/// A chain of blocks should not be empty. #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Chain { /// All blocks in this chain. @@ -33,16 +37,19 @@ pub struct Chain { impl Chain { /// Create new Chain from blocks and state. + /// + /// # Warning + /// + /// A chain of blocks should not be empty. pub fn new( blocks: impl IntoIterator, state: BundleStateWithReceipts, trie_updates: Option, ) -> Self { - Self { - blocks: BTreeMap::from_iter(blocks.into_iter().map(|b| (b.number, b))), - state, - trie_updates, - } + let blocks = BTreeMap::from_iter(blocks.into_iter().map(|b| (b.number, b))); + debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); + + Self { blocks, state, trie_updates } } /// Create new Chain from a single block and its state. @@ -158,16 +165,20 @@ impl Chain { } /// Get the first block in this chain. + /// + /// # Panics + /// + /// If chain doesn't have any blocks. #[track_caller] pub fn first(&self) -> &SealedBlockWithSenders { - self.blocks.first_key_value().expect("Chain has at least one block for first").1 + self.blocks.first_key_value().expect("Chain should have at least one block").1 } /// Get the tip of the chain. /// - /// # Note + /// # Panics /// - /// Chains always have at least one block. + /// If chain doesn't have any blocks. #[track_caller] pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 @@ -179,6 +190,10 @@ impl Chain { } /// Returns the range of block numbers in the chain. + /// + /// # Panics + /// + /// If chain doesn't have any blocks. pub fn range(&self) -> RangeInclusive { self.first().number..=self.tip().number } @@ -255,6 +270,10 @@ impl Chain { /// The second chain only contains the changes that were reverted on the first chain; however, /// it retains the up to date state as if the chains were one, i.e. the second chain is an /// extension of the first. + /// + /// # Panics + /// + /// If chain doesn't have any blocks. #[track_caller] pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key(); From d03150e13c170725bea9045ff985372eeadcb796 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 14:31:16 +0200 Subject: [PATCH 406/700] chore: rm more unused deps (#7995) --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e7bf2ec5c382b..ab330d87e7b2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -333,7 +333,6 @@ parking_lot = "0.12" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation metrics = "0.21.1" modular-bitfield = "0.11.2" -hex-literal = "0.4" once_cell = "1.17" syn = "2.0" nybbles = "0.2.1" From 29e0e8150c9b979b2672ad257a628a82ef31e68c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 30 Apr 2024 15:05:57 +0200 Subject: [PATCH 407/700] feat: add with_canon_state_notification_sender fn (#7997) --- crates/blockchain-tree/src/blockchain_tree.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index b98cc664ad70d..eee4163c7cae9 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -156,6 +156,18 @@ where }) } + /// Replaces the canon state notification sender. + /// + /// Caution: this will close any existing subscriptions to the previous sender. + #[doc(hidden)] + pub fn with_canon_state_notification_sender( + mut self, + canon_state_notification_sender: CanonStateNotificationSender, + ) -> Self { + self.canon_state_notification_sender = canon_state_notification_sender; + self + } + /// Set the sync metric events sender. /// /// A transmitter for sending synchronization metrics. This is used for monitoring the node's From b99d367c1a6c4e5c339eb5e5fc16a2ad77f099e7 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 16:17:31 +0200 Subject: [PATCH 408/700] fix: ensure peer exists b4 marking txs as received (#7998) --- crates/net/network/src/transactions/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index f7d03520ff2fe..ee14e4c82f08c 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -945,14 +945,13 @@ where return } + let Some(peer) = self.peers.get_mut(&peer_id) else { return }; let mut transactions = transactions.0; // mark the transactions as received self.transaction_fetcher .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| *tx.hash())); - let Some(peer) = self.peers.get_mut(&peer_id) else { return }; - // track that the peer knows these transaction, but only if this is a new broadcast. // If we received the transactions as the response to our `GetPooledTransactions`` // requests (based on received `NewPooledTransactionHashes`) then we already From d0973bb6dcc434271bfb83fd6f3dd0bd3bff9d6f Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 16:23:32 +0200 Subject: [PATCH 409/700] fix: correct expiration for discv4 lookup requests (#7996) --- crates/net/discv4/src/config.rs | 6 ++++++ crates/net/discv4/src/lib.rs | 5 +++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 8da6db4b7fd3b..c9007a910c614 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -197,6 +197,12 @@ impl Discv4ConfigBuilder { self } + /// Sets the expiration duration for lookup neighbor requests + pub fn lookup_neighbours_expiration(&mut self, duration: Duration) -> &mut Self { + self.config.neighbours_expiration = duration; + self + } + /// Sets the expiration duration for a bond with a peer pub fn bond_expiration(&mut self, duration: Duration) -> &mut Self { self.config.bond_expiration = duration; diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 061e4a33b0100..ddc9564c22826 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -1421,7 +1421,7 @@ impl Discv4Service { let mut failed_lookups = Vec::new(); self.pending_lookup.retain(|node_id, (lookup_sent_at, _)| { - if now.duration_since(*lookup_sent_at) > self.config.ping_expiration { + if now.duration_since(*lookup_sent_at) > self.config.request_timeout { failed_lookups.push(*node_id); return false } @@ -1441,7 +1441,7 @@ impl Discv4Service { fn evict_failed_neighbours(&mut self, now: Instant) { let mut failed_neighbours = Vec::new(); self.pending_find_nodes.retain(|node_id, find_node_request| { - if now.duration_since(find_node_request.sent_at) > self.config.request_timeout { + if now.duration_since(find_node_request.sent_at) > self.config.neighbours_expiration { if !find_node_request.answered { // node actually responded but with fewer entries than expected, but we don't // treat this as an hard error since it responded. @@ -2549,6 +2549,7 @@ mod tests { let config = Discv4Config::builder() .request_timeout(Duration::from_millis(200)) .ping_expiration(Duration::from_millis(200)) + .lookup_neighbours_expiration(Duration::from_millis(200)) .add_eip868_pair("eth", fork_id) .build(); let (_disv4, mut service) = create_discv4_with_config(config).await; From 6d7cd53ad25f0b79c89fd60a4db2a0f2fe097efe Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 16:30:00 +0200 Subject: [PATCH 410/700] chore(discv4): limit number of queued pings (#7999) Co-authored-by: Matthias Seitz --- crates/net/discv4/src/lib.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index ddc9564c22826..b6fb978270fdf 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -119,10 +119,21 @@ const MIN_PACKET_SIZE: usize = 32 + 65 + 1; /// Concurrency factor for `FindNode` requests to pick `ALPHA` closest nodes, const ALPHA: usize = 3; -/// Maximum number of nodes to ping at concurrently. 2 full `Neighbours` responses with 16 _new_ -/// nodes. This will apply some backpressure in recursive lookups. +/// Maximum number of nodes to ping at concurrently. +/// +/// This corresponds to 2 full `Neighbours` responses with 16 _new_ nodes. This will apply some +/// backpressure in recursive lookups. const MAX_NODES_PING: usize = 2 * MAX_NODES_PER_BUCKET; +/// Maximum number of pings to keep queued. +/// +/// If we are currently sending too many pings, any new pings will be queued. To prevent unbounded +/// growth of the queue, the queue has a maximum capacity, after which any additional pings will be +/// discarded. +/// +/// This corresponds to 2 full `Neighbours` responses with 16 new nodes. +const MAX_QUEUED_PINGS: usize = 2 * MAX_NODES_PER_BUCKET; + /// The size of the datagram is limited [`MAX_PACKET_SIZE`], 16 nodes, as the discv4 specifies don't /// fit in one datagram. The safe number of nodes that always fit in a datagram is 12, with worst /// case all of them being IPv6 nodes. This is calculated by `(MAX_PACKET_SIZE - (header + expire + @@ -570,7 +581,7 @@ impl Discv4Service { _tasks: tasks, ingress: ingress_rx, egress: egress_tx, - queued_pings: Default::default(), + queued_pings: VecDeque::with_capacity(MAX_QUEUED_PINGS), pending_pings: Default::default(), pending_lookup: Default::default(), pending_find_nodes: Default::default(), @@ -1131,7 +1142,7 @@ impl Discv4Service { if self.pending_pings.len() < MAX_NODES_PING { self.send_ping(node, reason); - } else { + } else if self.queued_pings.len() < MAX_QUEUED_PINGS { self.queued_pings.push_back((node, reason)); } } From e158542d31bf576e8a6b6e61337b62f9839734cf Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 17:33:30 +0200 Subject: [PATCH 411/700] feat: ensure offset size is at most 8 bytes (#8000) --- crates/storage/nippy-jar/src/error.rs | 5 +++++ crates/storage/nippy-jar/src/lib.rs | 17 +++++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index c769f0db8630b..3763be3dcfe1f 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -37,6 +37,11 @@ pub enum NippyJarError { PHFMissing, #[error("nippy jar was built without an index")] UnsupportedFilterQuery, + #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] + OffsetSizeTooBig { + /// The read offset size in number of bytes. + offset_size: u64, + }, #[error("compression or decompression requires a bigger destination output")] OutputTooSmall, #[error("dictionary is not loaded.")] diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index cc4f2b0f51471..59fc586e4b395 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -486,14 +486,15 @@ impl DataReader { // SAFETY: File is read-only and its descriptor is kept alive as long as the mmap handle. let offset_mmap = unsafe { Mmap::map(&offset_file)? }; - Ok(Self { - data_file, - data_mmap, - offset_file, - // First byte is the size of one offset in bytes - offset_size: offset_mmap[0] as u64, - offset_mmap, - }) + // First byte is the size of one offset in bytes + let offset_size = offset_mmap[0] as u64; + + // Ensure that the size of an offset is at most 8 bytes. + if offset_size > 8 { + return Err(NippyJarError::OffsetSizeTooBig { offset_size }) + } + + Ok(Self { data_file, data_mmap, offset_file, offset_size, offset_mmap }) } /// Returns the offset for the requested data index From 9153d8848f56e8651c0becd95e2e0e565132b1da Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Wed, 1 May 2024 00:14:25 +0800 Subject: [PATCH 412/700] chore: remove unnecessary Debug implmentation (#8001) --- crates/rpc/rpc-builder/src/auth.rs | 14 +------------- crates/rpc/rpc-builder/src/lib.rs | 14 ++------------ 2 files changed, 3 insertions(+), 25 deletions(-) diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 3726172576f42..72345aca6b6af 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -34,7 +34,6 @@ use reth_rpc_api::servers::*; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use reth_transaction_pool::TransactionPool; use std::{ - fmt, net::{IpAddr, Ipv4Addr, SocketAddr}, time::{Duration, SystemTime, UNIX_EPOCH}, }; @@ -218,6 +217,7 @@ impl AuthServerConfig { } /// Builder type for configuring an `AuthServerConfig`. +#[derive(Debug)] pub struct AuthServerConfigBuilder { socket_addr: Option, secret: JwtSecret, @@ -226,18 +226,6 @@ pub struct AuthServerConfigBuilder { ipc_endpoint: Option, } -impl fmt::Debug for AuthServerConfigBuilder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AuthServerConfig") - .field("socket_addr", &self.socket_addr) - .field("secret", &self.secret) - .field("server_config", &self.server_config) - .field("ipc_server_config", &self.ipc_server_config) - .field("ipc_endpoint", &self.ipc_endpoint) - .finish() - } -} - // === impl AuthServerConfigBuilder === impl AuthServerConfigBuilder { diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 4bd367060dbe4..cea80398efa0f 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -2113,7 +2113,7 @@ impl fmt::Debug for RpcServer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RpcServer") .field("http", &self.ws_http.http_local_addr.is_some()) - .field("ws", &self.ws_http.http_local_addr.is_some()) + .field("ws", &self.ws_http.ws_local_addr.is_some()) .field("ipc", &self.ipc.is_some()) .finish() } @@ -2122,7 +2122,7 @@ impl fmt::Debug for RpcServer { /// A handle to the spawned servers. /// /// When this type is dropped or [RpcServerHandle::stop] has been called the server will be stopped. -#[derive(Clone)] +#[derive(Clone, Debug)] #[must_use = "Server stops if dropped"] pub struct RpcServerHandle { /// The address of the http/ws server @@ -2225,16 +2225,6 @@ impl RpcServerHandle { } } -impl fmt::Debug for RpcServerHandle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RpcServerHandle") - .field("http", &self.http.is_some()) - .field("ws", &self.ws.is_some()) - .field("ipc", &self.ipc.is_some()) - .finish() - } -} - #[cfg(test)] mod tests { use super::*; From 581682605cc86d2c3eeaa69ab829340422c8f93b Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 30 Apr 2024 18:38:52 +0200 Subject: [PATCH 413/700] fix(net, discv4): call find_node with valid endpoint (#8002) --- crates/net/discv4/src/lib.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index b6fb978270fdf..1a942a5b97e4a 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -1387,7 +1387,16 @@ impl Discv4Service { BucketEntry::SelfEntry => { // we received our own node entry } - _ => self.find_node(&closest, ctx.clone()), + BucketEntry::Present(mut entry, _) => { + if entry.value_mut().has_endpoint_proof { + self.find_node(&closest, ctx.clone()); + } + } + BucketEntry::Pending(mut entry, _) => { + if entry.value().has_endpoint_proof { + self.find_node(&closest, ctx.clone()); + } + } } } } From d04d9556fa4954340e0c2518cc93f095c36631ff Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 30 Apr 2024 18:56:34 +0200 Subject: [PATCH 414/700] feat: make nodetypes stateless and move evm to components (#7992) --- crates/blockchain-tree/src/noop.rs | 9 + crates/evm/src/lib.rs | 2 +- crates/node-ethereum/src/node.rs | 44 +++-- crates/node-ethereum/tests/it/builder.rs | 2 +- crates/node-ethereum/tests/it/exex.rs | 2 +- crates/node/api/src/node.rs | 38 ++-- crates/node/builder/src/builder/mod.rs | 41 ++--- crates/node/builder/src/builder/states.rs | 23 +-- crates/node/builder/src/components/builder.rs | 166 ++++++++++++++---- crates/node/builder/src/components/execute.rs | 34 ++++ crates/node/builder/src/components/mod.rs | 27 ++- crates/node/builder/src/launch/mod.rs | 64 ++++--- crates/node/builder/src/rpc.rs | 2 +- crates/optimism/node/src/node.rs | 62 +++++-- crates/optimism/node/tests/it/builder.rs | 2 +- crates/storage/provider/src/providers/mod.rs | 7 + examples/custom-engine-types/src/main.rs | 13 +- examples/custom-evm/src/main.rs | 29 +-- examples/custom-node-components/src/main.rs | 2 +- examples/custom-payload-builder/src/main.rs | 2 +- 20 files changed, 401 insertions(+), 170 deletions(-) create mode 100644 crates/node/builder/src/components/execute.rs diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 9fa82025511ae..bb99f9b55b2b8 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -27,6 +27,15 @@ pub struct NoopBlockchainTree { pub canon_state_notification_sender: Option, } +impl NoopBlockchainTree { + /// Create a new NoopBlockchainTree with a canon state notification sender. + pub fn with_canon_state_notifications( + canon_state_notification_sender: CanonStateNotificationSender, + ) -> Self { + Self { canon_state_notification_sender: Some(canon_state_notification_sender) } + } +} + impl BlockchainTreeEngine for NoopBlockchainTree { fn buffer_block(&self, _block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { Ok(()) diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 78a76e54ccd5f..9179abc3355ab 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -80,7 +80,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This represents the set of methods used to configure the EVM's environment before block /// execution. -pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone { +pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { /// The type of the transaction metadata that should be used to fill fields in the transaction /// environment. /// diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 5a1a03554a0b1..4f52027b4509c 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -4,7 +4,9 @@ use crate::{EthEngineTypes, EthEvmConfig}; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_network::NetworkHandle; use reth_node_builder::{ - components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, + components::{ + ComponentsBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + }, node::{FullNodeTypes, NodeTypes}, BuilderContext, Node, PayloadBuilderConfig, }; @@ -23,8 +25,13 @@ pub struct EthereumNode; impl EthereumNode { /// Returns a [ComponentsBuilder] configured for a regular Ethereum node. - pub fn components( - ) -> ComponentsBuilder + pub fn components() -> ComponentsBuilder< + Node, + EthereumPoolBuilder, + EthereumPayloadBuilder, + EthereumNetworkBuilder, + EthereumExecutorBuilder, + > where Node: FullNodeTypes, { @@ -33,31 +40,48 @@ impl EthereumNode { .pool(EthereumPoolBuilder::default()) .payload(EthereumPayloadBuilder::default()) .network(EthereumNetworkBuilder::default()) + .executor(EthereumExecutorBuilder::default()) } } impl NodeTypes for EthereumNode { type Primitives = (); type Engine = EthEngineTypes; - type Evm = EthEvmConfig; - - fn evm_config(&self) -> Self::Evm { - EthEvmConfig::default() - } } impl Node for EthereumNode where N: FullNodeTypes, { - type ComponentsBuilder = - ComponentsBuilder; + type ComponentsBuilder = ComponentsBuilder< + N, + EthereumPoolBuilder, + EthereumPayloadBuilder, + EthereumNetworkBuilder, + EthereumExecutorBuilder, + >; fn components_builder(self) -> Self::ComponentsBuilder { Self::components() } } +/// A regular ethereum evm and executor builder. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct EthereumExecutorBuilder; + +impl ExecutorBuilder for EthereumExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = EthEvmConfig; + + async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { + Ok(EthEvmConfig::default()) + } +} + /// A basic ethereum transaction pool. /// /// This contains various settings that can be configured and take precedence over the node's diff --git a/crates/node-ethereum/tests/it/builder.rs b/crates/node-ethereum/tests/it/builder.rs index 1f45792367405..b48e58679bd91 100644 --- a/crates/node-ethereum/tests/it/builder.rs +++ b/crates/node-ethereum/tests/it/builder.rs @@ -13,7 +13,7 @@ fn test_basic_setup() { let msg = "On components".to_string(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types(EthereumNode::default()) + .with_types::() .with_components(EthereumNode::components()) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); diff --git a/crates/node-ethereum/tests/it/exex.rs b/crates/node-ethereum/tests/it/exex.rs index bbab6d9dc744e..80366ba23db54 100644 --- a/crates/node-ethereum/tests/it/exex.rs +++ b/crates/node-ethereum/tests/it/exex.rs @@ -31,7 +31,7 @@ fn basic_exex() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types(EthereumNode::default()) + .with_types::() .with_components(EthereumNode::components()) .install_exex("dummy", move |ctx| future::ok(DummyExEx { _ctx: ctx })) .check_launch(); diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 2eb14011f2b12..0a76f75046696 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,4 +1,4 @@ -//! Traits for configuring a node +//! Traits for configuring a node. use crate::{primitives::NodePrimitives, ConfigureEvm, EngineTypes}; use reth_db::{ @@ -15,21 +15,20 @@ use std::marker::PhantomData; /// The type that configures the essential types of an ethereum like node. /// /// This includes the primitive types of a node, the engine API types for communication with the -/// consensus layer, and the EVM configuration type for setting up the Ethereum Virtual Machine. +/// consensus layer. +/// +/// This trait is intended to be stateless and only define the types of the node. pub trait NodeTypes: Send + Sync + 'static { /// The node's primitive types, defining basic operations and structures. type Primitives: NodePrimitives; /// The node's engine types, defining the interaction with the consensus engine. type Engine: EngineTypes; - /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm; - - /// Returns the node's evm config. - fn evm_config(&self) -> Self::Evm; } /// A helper trait that is downstream of the [NodeTypes] trait and adds stateful components to the /// node. +/// +/// Its types are configured by node internally and are not intended to be user configurable. pub trait FullNodeTypes: NodeTypes + 'static { /// Underlying database type used by the node to store and retrieve data. type DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static; @@ -41,7 +40,7 @@ pub trait FullNodeTypes: NodeTypes + 'static { #[derive(Debug)] pub struct FullNodeTypesAdapter { /// An instance of the user configured node types. - pub types: Types, + pub types: PhantomData, /// The database type used by the node. pub db: PhantomData, /// The provider type used by the node. @@ -49,9 +48,15 @@ pub struct FullNodeTypesAdapter { } impl FullNodeTypesAdapter { - /// Create a new adapter from the given node types. - pub fn new(types: Types) -> Self { - Self { types, db: Default::default(), provider: Default::default() } + /// Create a new adapter with the configured types. + pub fn new() -> Self { + Self { types: Default::default(), db: Default::default(), provider: Default::default() } + } +} + +impl Default for FullNodeTypesAdapter { + fn default() -> Self { + Self::new() } } @@ -63,11 +68,6 @@ where { type Primitives = Types::Primitives; type Engine = Types::Engine; - type Evm = Types::Evm; - - fn evm_config(&self) -> Self::Evm { - self.types.evm_config() - } } impl FullNodeTypes for FullNodeTypesAdapter @@ -85,9 +85,15 @@ pub trait FullNodeComponents: FullNodeTypes + 'static { /// The transaction pool of the node. type Pool: TransactionPool + Unpin; + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: ConfigureEvm; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 7f898ca210bbc..815b138587be9 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -50,10 +50,10 @@ pub type RethFullAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter { @@ -187,12 +188,11 @@ where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { /// Configures the types of the node. - pub fn with_types(self, types: T) -> NodeBuilderWithTypes> + pub fn with_types(self) -> NodeBuilderWithTypes> where T: NodeTypes, { - let types = FullNodeTypesAdapter::new(types); - NodeBuilderWithTypes::new(self.config, types, self.database) + NodeBuilderWithTypes::new(self.config, self.database) } /// Preconfigures the node with a specific node implementation. @@ -205,7 +205,7 @@ where where N: Node>, { - self.with_types(node.clone()).with_components(node.components_builder()) + self.with_types().with_components(node.components_builder()) } } @@ -236,15 +236,12 @@ where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { /// Configures the types of the node. - pub fn with_types( - self, - types: T, - ) -> WithLaunchContext>> + pub fn with_types(self) -> WithLaunchContext>> where T: NodeTypes, { WithLaunchContext { - builder: self.builder.with_types(types), + builder: self.builder.with_types(), task_executor: self.task_executor, data_dir: self.data_dir, } @@ -260,7 +257,7 @@ where where N: Node>, { - self.with_types(node.clone()).with_components(node.components_builder()) + self.with_types().with_components(node.components_builder()) } /// Launches a preconfigured [Node] @@ -428,8 +425,6 @@ pub struct BuilderContext { pub(crate) config: NodeConfig, /// loaded config pub(crate) reth_config: reth_config::Config, - /// EVM config of the node - pub(crate) evm_config: Node::Evm, } impl BuilderContext { @@ -441,9 +436,8 @@ impl BuilderContext { data_dir: ChainPath, config: NodeConfig, reth_config: reth_config::Config, - evm_config: Node::Evm, ) -> Self { - Self { head, provider, executor, data_dir, config, reth_config, evm_config } + Self { head, provider, executor, data_dir, config, reth_config } } /// Returns the configured provider to interact with the blockchain. @@ -451,11 +445,6 @@ impl BuilderContext { &self.provider } - /// Returns the configured evm. - pub fn evm_config(&self) -> &Node::Evm { - &self.evm_config - } - /// Returns the current head of the blockchain at launch. pub fn head(&self) -> Head { self.head diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index b77588df4955b..753978de1968a 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -31,8 +31,8 @@ pub struct NodeBuilderWithTypes { impl NodeBuilderWithTypes { /// Creates a new instance of the node builder with the given configuration and types. - pub fn new(config: NodeConfig, types: T, database: T::DB) -> Self { - Self { config, adapter: NodeTypesAdapter::new(types, database) } + pub fn new(config: NodeConfig, database: T::DB) -> Self { + Self { config, adapter: NodeTypesAdapter::new(database) } } /// Advances the state of the node builder to the next state where all components are configured @@ -59,14 +59,12 @@ impl NodeBuilderWithTypes { pub(crate) struct NodeTypesAdapter { /// The database type used by the node. pub(crate) database: T::DB, - // TODO(mattsse): make this stateless - pub(crate) types: T, } impl NodeTypesAdapter { /// Create a new adapter from the given node types. - pub(crate) fn new(types: T, database: T::DB) -> Self { - Self { types, database } + pub(crate) fn new(database: T::DB) -> Self { + Self { database } } } @@ -85,18 +83,11 @@ pub struct NodeAdapter> { pub task_executor: TaskExecutor, /// The provider of the node. pub provider: T::Provider, - /// EVM config - pub evm: T::Evm, } impl> NodeTypes for NodeAdapter { type Primitives = T::Primitives; type Engine = T::Engine; - type Evm = T::Evm; - - fn evm_config(&self) -> Self::Evm { - self.evm.clone() - } } impl> FullNodeTypes for NodeAdapter { @@ -106,11 +97,16 @@ impl> FullNodeTypes for NodeAdapter impl> FullNodeComponents for NodeAdapter { type Pool = C::Pool; + type Evm = C::Evm; fn pool(&self) -> &Self::Pool { self.components.pool() } + fn evm_config(&self) -> &Self::Evm { + self.components.evm_config() + } + fn provider(&self) -> &Self::Provider { &self.provider } @@ -134,7 +130,6 @@ impl> Clone for NodeAdapter { components: self.components.clone(), task_executor: self.task_executor.clone(), provider: self.provider.clone(), - evm: self.evm.clone(), } } } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 1c963f0241449..d17cdc8eea88d 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -1,13 +1,16 @@ //! A generic [NodeComponentsBuilder] use crate::{ - components::{Components, NetworkBuilder, NodeComponents, PayloadServiceBuilder, PoolBuilder}, - BuilderContext, FullNodeTypes, + components::{ + Components, ExecutorBuilder, NetworkBuilder, NodeComponents, PayloadServiceBuilder, + PoolBuilder, + }, + BuilderContext, ConfigureEvm, FullNodeTypes, }; use reth_transaction_pool::TransactionPool; use std::{future::Future, marker::PhantomData}; -/// A generic, customizable [`NodeComponentsBuilder`]. +/// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. /// /// This type is stateful and captures the configuration of the node's components. /// @@ -27,21 +30,31 @@ use std::{future::Future, marker::PhantomData}; /// All component builders are captured in the builder state and will be consumed once the node is /// launched. #[derive(Debug)] -pub struct ComponentsBuilder { +pub struct ComponentsBuilder { pool_builder: PoolB, payload_builder: PayloadB, network_builder: NetworkB, + executor_builder: ExecB, _marker: PhantomData, } -impl ComponentsBuilder { +impl + ComponentsBuilder +{ /// Configures the node types. - pub fn node_types(self) -> ComponentsBuilder + pub fn node_types(self) -> ComponentsBuilder where Types: FullNodeTypes, { - let Self { pool_builder, payload_builder, network_builder, _marker } = self; + let Self { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; ComponentsBuilder { + executor_builder: evm_builder, pool_builder, payload_builder, network_builder, @@ -55,6 +68,7 @@ impl ComponentsBuilder ComponentsBuilder ComponentsBuilder ExecB) -> Self { + Self { + pool_builder: self.pool_builder, + payload_builder: self.payload_builder, + network_builder: self.network_builder, + executor_builder: f(self.executor_builder), _marker: self._marker, } } } -impl ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, { @@ -88,16 +116,32 @@ where /// /// This accepts a [PoolBuilder] instance that will be used to create the node's transaction /// pool. - pub fn pool(self, pool_builder: PB) -> ComponentsBuilder + pub fn pool( + self, + pool_builder: PB, + ) -> ComponentsBuilder where PB: PoolBuilder, { - let Self { pool_builder: _, payload_builder, network_builder, _marker } = self; - ComponentsBuilder { pool_builder, payload_builder, network_builder, _marker } + let Self { + pool_builder: _, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } } } -impl ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, @@ -106,57 +150,118 @@ where /// /// This accepts a [NetworkBuilder] instance that will be used to create the node's network /// stack. - pub fn network(self, network_builder: NB) -> ComponentsBuilder + pub fn network( + self, + network_builder: NB, + ) -> ComponentsBuilder where NB: NetworkBuilder, { - let Self { pool_builder, payload_builder, network_builder: _, _marker } = self; - ComponentsBuilder { pool_builder, payload_builder, network_builder, _marker } + let Self { + pool_builder, + payload_builder, + network_builder: _, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } } /// Configures the payload builder. /// /// This accepts a [PayloadServiceBuilder] instance that will be used to create the node's /// payload builder service. - pub fn payload(self, payload_builder: PB) -> ComponentsBuilder + pub fn payload( + self, + payload_builder: PB, + ) -> ComponentsBuilder where PB: PayloadServiceBuilder, { - let Self { pool_builder, payload_builder: _, network_builder, _marker } = self; - ComponentsBuilder { pool_builder, payload_builder, network_builder, _marker } + let Self { + pool_builder, + payload_builder: _, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } + } + + /// Configures the executor builder. + /// + /// This accepts a [ExecutorBuilder] instance that will be used to create the node's components + /// for execution. + pub fn executor( + self, + executor_builder: EB, + ) -> ComponentsBuilder + where + EB: ExecutorBuilder, + { + let Self { pool_builder, payload_builder, network_builder, executor_builder: _, _marker } = + self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder, + _marker, + } } } -impl NodeComponentsBuilder - for ComponentsBuilder +impl NodeComponentsBuilder + for ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, NetworkB: NetworkBuilder, PayloadB: PayloadServiceBuilder, + ExecB: ExecutorBuilder, { - type Components = Components; + type Components = Components; async fn build_components( self, context: &BuilderContext, ) -> eyre::Result { - let Self { pool_builder, payload_builder, network_builder, _marker } = self; + let Self { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + let evm_config = evm_builder.build_evm(context).await?; let pool = pool_builder.build_pool(context).await?; let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; - Ok(Components { transaction_pool: pool, network, payload_builder }) + Ok(Components { transaction_pool: pool, evm_config, network, payload_builder }) } } -impl Default for ComponentsBuilder<(), (), (), ()> { +impl Default for ComponentsBuilder<(), (), (), (), ()> { fn default() -> Self { Self { pool_builder: (), payload_builder: (), network_builder: (), + executor_builder: (), _marker: Default::default(), } } @@ -167,9 +272,9 @@ impl Default for ComponentsBuilder<(), (), (), ()> { /// Implementers of this trait are responsible for building all the components of the node: See /// [NodeComponents]. /// -/// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize -/// certain components of the node using the builder pattern and defaults, e.g. Ethereum and -/// Optimism. +/// The [ComponentsBuilder] is a generic, general purpose implementation of this trait that can be +/// used to customize certain components of the node using the builder pattern and defaults, e.g. +/// Ethereum and Optimism. /// A type that's responsible for building the components of the node. pub trait NodeComponentsBuilder: Send { /// The components for the node with the given types @@ -182,14 +287,15 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, + EVM: ConfigureEvm, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs new file mode 100644 index 0000000000000..417423d5454d1 --- /dev/null +++ b/crates/node/builder/src/components/execute.rs @@ -0,0 +1,34 @@ +//! EVM component for the node builder. +use crate::{BuilderContext, FullNodeTypes}; +use reth_node_api::ConfigureEvm; +use std::future::Future; + +/// A type that knows how to build the executor types. +pub trait ExecutorBuilder: Send { + /// The EVM config to build. + type EVM: ConfigureEvm; + // TODO(mattsse): integrate `Executor` + + /// Creates the transaction pool. + fn build_evm( + self, + ctx: &BuilderContext, + ) -> impl Future> + Send; +} + +impl ExecutorBuilder for F +where + Node: FullNodeTypes, + EVM: ConfigureEvm, + F: FnOnce(&BuilderContext) -> Fut + Send, + Fut: Future> + Send, +{ + type EVM = EVM; + + fn build_evm( + self, + ctx: &BuilderContext, + ) -> impl Future> { + self(ctx) + } +} diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index ea087ece23b28..24d83da0da57b 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -7,8 +7,9 @@ //! //! Components depend on a fully type configured node: [FullNodeTypes](crate::node::FullNodeTypes). -use crate::FullNodeTypes; +use crate::{ConfigureEvm, FullNodeTypes}; pub use builder::*; +pub use execute::*; pub use network::*; pub use payload::*; pub use pool::*; @@ -17,11 +18,13 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::TransactionPool; mod builder; +mod execute; mod network; mod payload; mod pool; /// An abstraction over the components of a node, consisting of: +/// - evm and executor /// - transaction pool /// - network /// - payload builder. @@ -29,9 +32,15 @@ pub trait NodeComponents: Clone + Send + Sync + 'stati /// The transaction pool of the node. type Pool: TransactionPool + Unpin; + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: ConfigureEvm; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + /// Returns the handle to the network fn network(&self) -> &NetworkHandle; @@ -43,26 +52,34 @@ pub trait NodeComponents: Clone + Send + Sync + 'stati /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + pub evm_config: EVM, /// The network implementation of the node. pub network: NetworkHandle, /// The handle to the payload builder service. pub payload_builder: PayloadBuilderHandle, } -impl NodeComponents for Components +impl NodeComponents for Components where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, + EVM: ConfigureEvm, { type Pool = Pool; + type Evm = EVM; fn pool(&self) -> &Self::Pool { &self.transaction_pool } + fn evm_config(&self) -> &Self::Evm { + &self.evm_config + } + fn network(&self) -> &NetworkHandle { &self.network } @@ -72,14 +89,16 @@ where } } -impl Clone for Components +impl Clone for Components where Node: FullNodeTypes, Pool: TransactionPool, + EVM: ConfigureEvm, { fn clone(&self) -> Self { Self { transaction_pool: self.transaction_pool.clone(), + evm_config: self.evm_config.clone(), network: self.network.clone(), payload_builder: self.payload_builder.clone(), } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 4f1f00e4e5b7e..bd81f83864907 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -40,6 +40,7 @@ use tokio::sync::{mpsc::unbounded_channel, oneshot}; pub mod common; pub use common::LaunchContext; +use reth_blockchain_tree::noop::NoopBlockchainTree; /// A general purpose trait that launches a new node of any kind. /// @@ -83,7 +84,7 @@ where ) -> eyre::Result { let Self { ctx } = self; let NodeBuilderWithComponents { - adapter: NodeTypesAdapter { types, database }, + adapter: NodeTypesAdapter { database }, components_builder, add_ons: NodeAddOns { hooks, rpc, exexs: installed_exex }, config, @@ -124,27 +125,22 @@ where let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); ctx.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener); + // fetch the head block from the database + let head = ctx.lookup_head()?; + // Configure the blockchain tree for the node - let evm_config = types.evm_config(); let tree_config = BlockchainTreeConfig::default(); - let tree_externals = TreeExternals::new( - ctx.provider_factory().clone(), - consensus.clone(), - EvmProcessorFactory::new(ctx.chain_spec(), evm_config.clone()), - ); - let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? - .with_sync_metrics_tx(sync_metrics_tx.clone()); - let canon_state_notification_sender = tree.canon_state_notification_sender(); - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - debug!(target: "reth::cli", "configured blockchain tree"); - - // fetch the head block from the database - let head = ctx.lookup_head()?; + // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: + let (canon_state_notification_sender, _receiver) = + tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - // setup the blockchain provider - let blockchain_db = - BlockchainProvider::new(ctx.provider_factory().clone(), blockchain_tree.clone())?; + let blockchain_db = BlockchainProvider::new( + ctx.provider_factory().clone(), + Arc::new(NoopBlockchainTree::with_canon_state_notifications( + canon_state_notification_sender.clone(), + )), + )?; let builder_ctx = BuilderContext::new( head, @@ -153,19 +149,37 @@ where ctx.data_dir().clone(), ctx.node_config().clone(), ctx.toml_config().clone(), - evm_config.clone(), ); debug!(target: "reth::cli", "creating components"); let components = components_builder.build_components(&builder_ctx).await?; + let tree_externals = TreeExternals::new( + ctx.provider_factory().clone(), + consensus.clone(), + EvmProcessorFactory::new(ctx.chain_spec(), components.evm_config().clone()), + ); + let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? + .with_sync_metrics_tx(sync_metrics_tx.clone()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(canon_state_notification_sender); + + let canon_state_notification_sender = tree.canon_state_notification_sender(); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + + // Replace the tree component with the actual tree + let blockchain_db = blockchain_db.with_tree(blockchain_tree); + + debug!(target: "reth::cli", "configured blockchain tree"); + let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; let node_adapter = NodeAdapter { components, task_executor: ctx.task_executor().clone(), provider: blockchain_db.clone(), - evm: evm_config.clone(), }; debug!(target: "reth::cli", "calling on_component_initialized hook"); @@ -225,7 +239,7 @@ where }); // send notifications from the blockchain tree to exex manager - let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); + let mut canon_state_notifications = blockchain_db.subscribe_to_canonical_state(); let mut handle = exex_manager_handle.clone(); ctx.task_executor().spawn_critical( "exex manager blockchain tree notifications", @@ -305,7 +319,7 @@ address.to_string(), format_ether(alloc.balance)); consensus_engine_tx.clone(), canon_state_notification_sender, mining_mode, - evm_config.clone(), + node_adapter.components.evm_config().clone(), ) .build(); @@ -320,7 +334,7 @@ address.to_string(), format_ether(alloc.balance)); ctx.prune_config(), max_block, static_file_producer, - evm_config, + node_adapter.components.evm_config().clone(), pipeline_exex_handle, ) .await?; @@ -343,7 +357,7 @@ address.to_string(), format_ether(alloc.balance)); ctx.prune_config(), max_block, static_file_producer, - evm_config, + node_adapter.components.evm_config().clone(), pipeline_exex_handle, ) .await?; @@ -447,7 +461,7 @@ address.to_string(), format_ether(alloc.balance)); }); let full_node = FullNode { - evm_config: node_adapter.evm.clone(), + evm_config: node_adapter.components.evm_config().clone(), pool: node_adapter.components.pool().clone(), network: node_adapter.components.network().clone(), provider: node_adapter.provider.clone(), diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index a65dcfce5f8c1..3ac553fa3a544 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -274,7 +274,7 @@ where .with_network(node.network().clone()) .with_events(node.provider().clone()) .with_executor(node.task_executor().clone()) - .with_evm_config(node.evm_config()) + .with_evm_config(node.evm_config().clone()) .build_with_auth_server(module_config, engine_api); let mut registry = RpcRegistry { registry }; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 8f6a3c19b47ad..a7b195f482779 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -6,10 +6,13 @@ use crate::{ OptimismEngineTypes, }; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_evm::ConfigureEvm; use reth_evm_optimism::OptimismEvmConfig; use reth_network::{NetworkHandle, NetworkManager}; use reth_node_builder::{ - components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, + components::{ + ComponentsBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + }, node::{FullNodeTypes, NodeTypes}, BuilderContext, Node, PayloadBuilderConfig, }; @@ -38,7 +41,13 @@ impl OptimismNode { /// Returns the components for the given [RollupArgs]. pub fn components( args: RollupArgs, - ) -> ComponentsBuilder + ) -> ComponentsBuilder< + Node, + OptimismPoolBuilder, + OptimismPayloadBuilder, + OptimismNetworkBuilder, + OptimismExecutorBuilder, + > where Node: FullNodeTypes, { @@ -46,8 +55,12 @@ impl OptimismNode { ComponentsBuilder::default() .node_types::() .pool(OptimismPoolBuilder::default()) - .payload(OptimismPayloadBuilder::new(compute_pending_block)) + .payload(OptimismPayloadBuilder::new( + compute_pending_block, + OptimismEvmConfig::default(), + )) .network(OptimismNetworkBuilder { disable_txpool_gossip }) + .executor(OptimismExecutorBuilder::default()) } } @@ -55,8 +68,13 @@ impl Node for OptimismNode where N: FullNodeTypes, { - type ComponentsBuilder = - ComponentsBuilder; + type ComponentsBuilder = ComponentsBuilder< + N, + OptimismPoolBuilder, + OptimismPayloadBuilder, + OptimismNetworkBuilder, + OptimismExecutorBuilder, + >; fn components_builder(self) -> Self::ComponentsBuilder { let Self { args } = self; @@ -67,10 +85,21 @@ where impl NodeTypes for OptimismNode { type Primitives = (); type Engine = OptimismEngineTypes; - type Evm = OptimismEvmConfig; +} + +/// A regular optimism evm and executor builder. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct OptimismExecutorBuilder; + +impl ExecutorBuilder for OptimismExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = OptimismEvmConfig; - fn evm_config(&self) -> Self::Evm { - OptimismEvmConfig::default() + async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { + Ok(OptimismEvmConfig::default()) } } @@ -151,7 +180,7 @@ where /// A basic optimism payload service builder #[derive(Debug, Default, Clone)] -pub struct OptimismPayloadBuilder { +pub struct OptimismPayloadBuilder { /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -161,19 +190,22 @@ pub struct OptimismPayloadBuilder { /// will use the payload attributes from the latest block. Note /// that this flag is not yet functional. pub compute_pending_block: bool, + /// The EVM configuration to use for the payload builder. + pub evm_config: EVM, } -impl OptimismPayloadBuilder { - /// Create a new instance with the given `compute_pending_block` flag. - pub const fn new(compute_pending_block: bool) -> Self { - Self { compute_pending_block } +impl OptimismPayloadBuilder { + /// Create a new instance with the given `compute_pending_block` flag and evm config. + pub const fn new(compute_pending_block: bool, evm_config: EVM) -> Self { + Self { compute_pending_block, evm_config } } } -impl PayloadServiceBuilder for OptimismPayloadBuilder +impl PayloadServiceBuilder for OptimismPayloadBuilder where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, + EVM: ConfigureEvm, { async fn spawn_payload_service( self, @@ -182,7 +214,7 @@ where ) -> eyre::Result> { let payload_builder = reth_optimism_payload_builder::OptimismPayloadBuilder::new( ctx.chain_spec(), - ctx.evm_config().clone(), + self.evm_config, ) .set_compute_pending_block(self.compute_pending_block); let conf = ctx.payload_builder_config(); diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index 64f96bd2d96c1..5d26e8bda850f 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -12,7 +12,7 @@ fn test_basic_setup() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types(OptimismNode::default()) + .with_types::() .with_components(OptimismNode::components(Default::default())) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index f58f77dd02240..b0f43ba9f7189 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -89,6 +89,13 @@ impl BlockchainProvider { ) -> Self { Self { database, tree, chain_info: ChainInfoTracker::new(latest) } } + + /// Sets the treeviewer for the provider. + #[doc(hidden)] + pub fn with_tree(mut self, tree: Arc) -> Self { + self.tree = tree; + self + } } impl BlockchainProvider diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 79639e1baa12b..ada28c0f3e7c9 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -37,9 +37,8 @@ use reth_node_api::{ EngineTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{ - node::{EthereumNetworkBuilder, EthereumPoolBuilder}, - EthEvmConfig, +use reth_node_ethereum::node::{ + EthereumExecutorBuilder, EthereumNetworkBuilder, EthereumPoolBuilder, }; use reth_payload_builder::{ error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderHandle, @@ -187,12 +186,6 @@ impl NodeTypes for MyCustomNode { type Primitives = (); // use the custom engine types type Engine = CustomEngineTypes; - // use the default ethereum EVM config - type Evm = EthEvmConfig; - - fn evm_config(&self) -> Self::Evm { - Self::Evm::default() - } } /// Implement the Node trait for the custom node @@ -207,6 +200,7 @@ where EthereumPoolBuilder, CustomPayloadServiceBuilder, EthereumNetworkBuilder, + EthereumExecutorBuilder, >; fn components_builder(self) -> Self::ComponentsBuilder { @@ -215,6 +209,7 @@ where .pool(EthereumPoolBuilder::default()) .payload(CustomPayloadServiceBuilder::default()) .network(EthereumNetworkBuilder::default()) + .executor(EthereumExecutorBuilder::default()) } } diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 6c80c9a7419a7..e5362c8081cbf 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -3,7 +3,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use reth::{ - builder::{node::NodeTypes, NodeBuilder}, + builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, primitives::{ address, revm_primitives::{CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, @@ -17,9 +17,9 @@ use reth::{ }, tasks::TaskManager, }; -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthereumNode}; +use reth_node_ethereum::{EthEvmConfig, EthereumNode}; use reth_primitives::{Chain, ChainSpec, Genesis, Header, Transaction}; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; @@ -104,18 +104,19 @@ impl ConfigureEvm for MyEvmConfig { } } -#[derive(Debug, Clone, Default)] +/// A regular ethereum evm and executor builder. +#[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] -struct MyCustomNode; +pub struct MyExecutorBuilder; -/// Configure the node types -impl NodeTypes for MyCustomNode { - type Primitives = (); - type Engine = EthEngineTypes; - type Evm = MyEvmConfig; +impl ExecutorBuilder for MyExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = MyEvmConfig; - fn evm_config(&self) -> Self::Evm { - Self::Evm::default() + async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { + Ok(MyEvmConfig::default()) } } @@ -140,8 +141,8 @@ async fn main() -> eyre::Result<()> { let handle = NodeBuilder::new(node_config) .testing_node(tasks.executor()) - .with_types(MyCustomNode::default()) - .with_components(EthereumNode::components()) + .with_types::() + .with_components(EthereumNode::components().executor(MyExecutorBuilder::default())) .launch() .await .unwrap(); diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 96672807dc751..a6db90674d142 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -19,7 +19,7 @@ fn main() { .run(|builder, _| async move { let handle = builder // use the default ethereum node types - .with_types(EthereumNode::default()) + .with_types::() // Configure the components of the node // use default ethereum components but use our custom pool .with_components(EthereumNode::components().pool(CustomPoolBuilder::default())) diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index 8e028771b9757..2c468c34af183 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -73,7 +73,7 @@ fn main() { Cli::parse_args() .run(|builder, _| async move { let handle = builder - .with_types(EthereumNode::default()) + .with_types::() // Configure the components of the node // use default ethereum components but use our custom payload builder .with_components( From afbb265b474cc4eb256f7bde2889a01e1a2ea33d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 30 Apr 2024 18:57:00 +0200 Subject: [PATCH 415/700] refactor: use `reth_rpc_types` `BlockId` in optimism rpc (#8003) --- crates/rpc/rpc-api/src/optimism.rs | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/crates/rpc/rpc-api/src/optimism.rs b/crates/rpc/rpc-api/src/optimism.rs index 80d60415d4737..3ff7c6ce3431a 100644 --- a/crates/rpc/rpc-api/src/optimism.rs +++ b/crates/rpc/rpc-api/src/optimism.rs @@ -2,18 +2,10 @@ #![allow(unreachable_pub)] use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::{Address, BlockNumber, ChainId, B256}; -use reth_rpc_types::BlockNumberOrTag; +use reth_rpc_types::{BlockId, BlockNumberOrTag}; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, net::IpAddr}; -/// todo: move to reth_rpc_types - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlockId { - pub hash: B256, - pub number: BlockNumber, -} - // https://github.com/ethereum-optimism/optimism/blob/develop/op-service/eth/id.go#L33 #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -329,19 +321,19 @@ mod tests { #[test] fn test_output_response() { - let output_response_json = r#"{"version":"0x0000000000000000000000000000000000000000000000000000000000000000","outputRoot":"0xf1119e7d0fef8c54ab799be80fc61f503cea4e5c0aa1cf7ac104ef3a104f3bd1","blockRef":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"hash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824","number":19665136},"sequenceNumber":4},"withdrawalStorageRoot":"0x5c9a29a8ad2ecf97fb4bdea74c715fd2c13fa87d4861414478bc4579601c3585","stateRoot":"0x16849c0a93d00bb2d7ceacda11a1478854d2bbb0a377b4d6793b67a3f05eb6fe","syncStatus":{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"head_l1":{"hash":"0xf98493dcc3d82fe9af339c0a81b0f96172a56764f9abcff464c740e0cb3ccee7","number":19665175,"parentHash":"0xfbab86e5b807916c7ddfa395db794cdf4162128b9770eb8eb829679d81d74328","timestamp":1713235763},"safe_l1":{"hash":"0xfb8f07e551eb65c3282aaefe9a4954c15672e0077b2a5a1db18fcd2126cbc922","number":19665115,"parentHash":"0xfc0d62788fb9cda1cacb54a0e53ca398289436a6b68d1ba69db2942500b4ce5f","timestamp":1713235031},"finalized_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"unsafe_l2":{"hash":"0x3540517a260316758a4872f7626e8b9e009968b6d8cfa9c11bfd3a03e7656bd5","number":118818499,"parentHash":"0x09f30550e6d6f217691e185bf1a2b4665b83f43fc8dbcc68c0bfd513e6805590","timestamp":1713235775,"l1origin":{"hash":"0x036003c1c6561123a2f6573b7a34e9598bd023199e259d91765ee2c8677d9c07","number":19665170},"sequenceNumber":0},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1}}}"#; + let output_response_json = r#"{"version":"0x0000000000000000000000000000000000000000000000000000000000000000","outputRoot":"0xf1119e7d0fef8c54ab799be80fc61f503cea4e5c0aa1cf7ac104ef3a104f3bd1","blockRef":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"blockHash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824"},"sequenceNumber":4},"withdrawalStorageRoot":"0x5c9a29a8ad2ecf97fb4bdea74c715fd2c13fa87d4861414478bc4579601c3585","stateRoot":"0x16849c0a93d00bb2d7ceacda11a1478854d2bbb0a377b4d6793b67a3f05eb6fe","syncStatus":{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"head_l1":{"hash":"0xf98493dcc3d82fe9af339c0a81b0f96172a56764f9abcff464c740e0cb3ccee7","number":19665175,"parentHash":"0xfbab86e5b807916c7ddfa395db794cdf4162128b9770eb8eb829679d81d74328","timestamp":1713235763},"safe_l1":{"hash":"0xfb8f07e551eb65c3282aaefe9a4954c15672e0077b2a5a1db18fcd2126cbc922","number":19665115,"parentHash":"0xfc0d62788fb9cda1cacb54a0e53ca398289436a6b68d1ba69db2942500b4ce5f","timestamp":1713235031},"finalized_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"unsafe_l2":{"hash":"0x3540517a260316758a4872f7626e8b9e009968b6d8cfa9c11bfd3a03e7656bd5","number":118818499,"parentHash":"0x09f30550e6d6f217691e185bf1a2b4665b83f43fc8dbcc68c0bfd513e6805590","timestamp":1713235775,"l1origin":{"blockHash":"0x036003c1c6561123a2f6573b7a34e9598bd023199e259d91765ee2c8677d9c07"},"sequenceNumber":0},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1}}}"#; test_helper::(output_response_json); } #[test] fn serialize_sync_status() { - let sync_status_json = r#"{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"head_l1":{"hash":"0xfc5ab77c6c08662a3b4d85b8c86010b7aecfc2c0369e4458f80357530db8e919","number":19665141,"parentHash":"0x099792a293002b987f3507524b28614f399b2b5ed607788520963c251844113c","timestamp":1713235355},"safe_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"finalized_l1":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"unsafe_l2":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"hash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824","number":19665136},"sequenceNumber":4},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1}}"#; + let sync_status_json = r#"{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"head_l1":{"hash":"0xfc5ab77c6c08662a3b4d85b8c86010b7aecfc2c0369e4458f80357530db8e919","number":19665141,"parentHash":"0x099792a293002b987f3507524b28614f399b2b5ed607788520963c251844113c","timestamp":1713235355},"safe_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"finalized_l1":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"unsafe_l2":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"blockHash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824"},"sequenceNumber":4},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1}}"#; test_helper::(sync_status_json); } #[test] fn test_rollup_config() { - let rollup_config_json = r#"{"genesis":{"l1":{"hash":"0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108","number":17422590},"l2":{"hash":"0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3","number":105235063},"l2_time":1686068903,"system_config":{"batcherAddr":"0x6887246668a3b87f54deb3b94ba47a6f63f32985","overhead":"0x00000000000000000000000000000000000000000000000000000000000000bc","scalar":"0x00000000000000000000000000000000000000000000000000000000000a6fe0","gasLimit":30000000}},"block_time":2,"max_sequencer_drift":600,"seq_window_size":3600,"channel_timeout":300,"l1_chain_id":1,"l2_chain_id":10,"regolith_time":0,"canyon_time":1704992401,"delta_time":1708560000,"ecotone_time":1710374401,"batch_inbox_address":"0xff00000000000000000000000000000000000010","deposit_contract_address":"0xbeb5fc579115071764c7423a4f12edde41f106ed","l1_system_config_address":"0x229047fed2591dbec1ef1118d64f7af3db9eb290","protocol_versions_address":"0x8062abc286f5e7d9428a0ccb9abd71e50d93b935","da_challenge_address":"0x0000000000000000000000000000000000000000","da_challenge_window":0,"da_resolve_window":0,"use_plasma":false}"#; + let rollup_config_json = r#"{"genesis":{"l1":{"blockHash":"0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108"},"l2":{"blockHash":"0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3"},"l2_time":1686068903,"system_config":{"batcherAddr":"0x6887246668a3b87f54deb3b94ba47a6f63f32985","overhead":"0x00000000000000000000000000000000000000000000000000000000000000bc","scalar":"0x00000000000000000000000000000000000000000000000000000000000a6fe0","gasLimit":30000000}},"block_time":2,"max_sequencer_drift":600,"seq_window_size":3600,"channel_timeout":300,"l1_chain_id":1,"l2_chain_id":10,"regolith_time":0,"canyon_time":1704992401,"delta_time":1708560000,"ecotone_time":1710374401,"batch_inbox_address":"0xff00000000000000000000000000000000000010","deposit_contract_address":"0xbeb5fc579115071764c7423a4f12edde41f106ed","l1_system_config_address":"0x229047fed2591dbec1ef1118d64f7af3db9eb290","protocol_versions_address":"0x8062abc286f5e7d9428a0ccb9abd71e50d93b935","da_challenge_address":"0x0000000000000000000000000000000000000000","da_challenge_window":0,"da_resolve_window":0,"use_plasma":false}"#; test_helper::(rollup_config_json); } From e09895257a0bb30d74139a88fbc850b0a655812e Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 30 Apr 2024 11:41:01 -0600 Subject: [PATCH 416/700] feat: add `Deref` impl for `PayloadTaskGuard` (#7971) --- crates/payload/basic/src/lib.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index c32961c72cad4..8da9163d0cd8b 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -35,6 +35,7 @@ use revm::{ }; use std::{ future::Future, + ops::Deref, pin::Pin, sync::{atomic::AtomicBool, Arc}, task::{Context, Poll}, @@ -228,6 +229,14 @@ pub struct PrecachedState { #[derive(Debug, Clone)] pub struct PayloadTaskGuard(Arc); +impl Deref for PayloadTaskGuard { + type Target = Semaphore; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + // === impl PayloadTaskGuard === impl PayloadTaskGuard { @@ -385,7 +394,7 @@ where let builder = this.builder.clone(); this.executor.spawn_blocking(Box::pin(async move { // acquire the permit for executing the task - let _permit = guard.0.acquire().await; + let _permit = guard.acquire().await; let args = BuildArguments { client, pool, From d532217afbf201bc4cb0c4557e742721af53dcab Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 30 Apr 2024 21:18:19 +0200 Subject: [PATCH 417/700] fix(op): discv5 ENR (#7991) --- crates/net/discv5/src/config.rs | 21 +- crates/net/discv5/src/error.rs | 3 + crates/net/discv5/src/filter.rs | 15 +- crates/net/discv5/src/lib.rs | 39 +- crates/net/discv5/src/metrics.rs | 23 +- crates/net/discv5/src/network_key.rs | 11 - crates/net/discv5/src/network_stack_id.rs | 33 + crates/net/network/src/config.rs | 15 +- crates/primitives/src/chain/mod.rs | 2 +- crates/primitives/src/chain/spec.rs | 23 +- crates/primitives/src/lib.rs | 2 +- etc/grafana/dashboards/reth-discovery.json | 1959 +++++++++++--------- 12 files changed, 1181 insertions(+), 965 deletions(-) delete mode 100644 crates/net/discv5/src/network_key.rs create mode 100644 crates/net/discv5/src/network_stack_id.rs diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 266b530ef07a9..371d40953f5ba 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -9,9 +9,9 @@ use std::{ use derive_more::Display; use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; -use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord, MAINNET}; +use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord}; -use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, network_key}; +use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId}; /// Default interval in seconds at which to run a lookup up query. /// @@ -50,7 +50,7 @@ impl ConfigBuilder { let Config { discv5_config, bootstrap_nodes, - fork: (network_key, fork_id), + fork, tcp_port, other_enr_kv_pairs, lookup_interval, @@ -60,7 +60,7 @@ impl ConfigBuilder { Self { discv5_config: Some(discv5_config), bootstrap_nodes, - fork: Some((network_key, fork_id.fork_id)), + fork: fork.map(|(key, fork_id)| (key, fork_id.fork_id)), tcp_port, other_enr_kv_pairs, lookup_interval: Some(lookup_interval), @@ -117,8 +117,8 @@ impl ConfigBuilder { /// Set fork ID kv-pair to set in local [`Enr`](discv5::enr::Enr). This lets peers on discovery /// network know which chain this node belongs to. - pub fn fork(mut self, network_key: &'static [u8], fork_id: ForkId) -> Self { - self.fork = Some((network_key, fork_id)); + pub fn fork(mut self, fork_key: &'static [u8], fork_id: ForkId) -> Self { + self.fork = Some((fork_key, fork_id)); self } @@ -160,13 +160,12 @@ impl ConfigBuilder { let discv5_config = discv5_config .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); - let (network_key, fork_id) = fork.unwrap_or((network_key::ETH, MAINNET.latest_fork_id())); - let fork = (network_key, fork_id.into()); + let fork = fork.map(|(key, fork_id)| (key, fork_id.into())); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); - let discovered_peer_filter = - discovered_peer_filter.unwrap_or_else(|| MustNotIncludeKeys::new(&[network_key::ETH2])); + let discovered_peer_filter = discovered_peer_filter + .unwrap_or_else(|| MustNotIncludeKeys::new(&[NetworkStackId::ETH2])); Config { discv5_config, @@ -190,7 +189,7 @@ pub struct Config { pub(super) bootstrap_nodes: HashSet, /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", [ForkId])`. - pub(super) fork: (&'static [u8], EnrForkIdEntry), + pub(super) fork: Option<(&'static [u8], EnrForkIdEntry)>, /// RLPx TCP port to advertise. pub(super) tcp_port: u16, /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to diff --git a/crates/net/discv5/src/error.rs b/crates/net/discv5/src/error.rs index 7e4fa86530092..1656208986950 100644 --- a/crates/net/discv5/src/error.rs +++ b/crates/net/discv5/src/error.rs @@ -11,6 +11,9 @@ pub enum Error { /// Node record has incompatible key type. #[error("incompatible key type (not secp256k1)")] IncompatibleKeyType, + /// No key used to identify rlpx network is configured. + #[error("network stack identifier is not configured")] + NetworkStackIdNotConfigured, /// Missing key used to identify rlpx network. #[error("fork missing on enr, key missing")] ForkMissing(&'static [u8]), diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index f2f2f2fd6e196..2e20e2fbdabae 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -96,7 +96,7 @@ mod tests { use alloy_rlp::Bytes; use discv5::enr::{CombinedKey, Enr}; - use crate::network_key::{ETH, ETH2}; + use crate::NetworkStackId; use super::*; @@ -104,16 +104,21 @@ mod tests { fn must_not_include_key_filter() { // rig test - let filter = MustNotIncludeKeys::new(&[ETH, ETH2]); + let filter = MustNotIncludeKeys::new(&[NetworkStackId::ETH, NetworkStackId::ETH2]); // enr_1 advertises a fork from one of the keys configured in filter let sk = CombinedKey::generate_secp256k1(); - let enr_1 = - Enr::builder().add_value_rlp(ETH as &[u8], Bytes::from("cancun")).build(&sk).unwrap(); + let enr_1 = Enr::builder() + .add_value_rlp(NetworkStackId::ETH as &[u8], Bytes::from("cancun")) + .build(&sk) + .unwrap(); // enr_2 advertises a fork from one the other key configured in filter let sk = CombinedKey::generate_secp256k1(); - let enr_2 = Enr::builder().add_value_rlp(ETH2, Bytes::from("deneb")).build(&sk).unwrap(); + let enr_2 = Enr::builder() + .add_value_rlp(NetworkStackId::ETH2, Bytes::from("deneb")) + .build(&sk) + .unwrap(); // test diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 5275956bfa54c..14793fab056bc 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -33,7 +33,7 @@ pub mod enr; pub mod error; pub mod filter; pub mod metrics; -pub mod network_key; +pub mod network_stack_id; pub use discv5::{self, IpMode}; @@ -41,6 +41,7 @@ pub use config::{BootNode, Config, ConfigBuilder}; pub use enr::enr_to_discv4_id; pub use error::Error; pub use filter::{FilterOutcome, MustNotIncludeKeys}; +pub use network_stack_id::NetworkStackId; use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; @@ -75,7 +76,7 @@ pub struct Discv5 { /// [`IpMode`] of the the node. ip_mode: IpMode, /// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'. - fork_key: &'static [u8], + fork_key: Option<&'static [u8]>, /// Filter applied to a discovered peers before passing it up to app. discovered_peer_filter: MustNotIncludeKeys, /// Metrics for underlying [`discv5::Discv5`] node and filtered discovered peers. @@ -217,7 +218,7 @@ impl Discv5 { fn build_local_enr( sk: &SecretKey, config: &Config, - ) -> (Enr, NodeRecord, &'static [u8], IpMode) { + ) -> (Enr, NodeRecord, Option<&'static [u8]>, IpMode) { let mut builder = discv5::enr::Enr::builder(); let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; @@ -258,8 +259,10 @@ impl Discv5 { }; // identifies which network node is on - let (network, fork_value) = fork; - builder.add_value_rlp(network, alloy_rlp::encode(fork_value).into()); + let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| { + builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into()); + *network_stack_id + }); // add other data for (key, value) in other_enr_kv_pairs { @@ -273,7 +276,7 @@ impl Discv5 { // backwards compatible enr let bc_enr = NodeRecord::from_secret_key(socket, sk); - (enr, bc_enr, network, ip_mode) + (enr, bc_enr, network_stack_id, ip_mode) } /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. @@ -438,8 +441,10 @@ impl Discv5 { return None } - let fork_id = - (self.fork_key == network_key::ETH).then(|| self.get_fork_id(enr).ok()).flatten(); + // todo: extend for all network stacks in reth-network rlpx logic + let fork_id = (self.fork_key == Some(NetworkStackId::ETH)) + .then(|| self.get_fork_id(enr).ok()) + .flatten(); trace!(target: "net::discovery::discv5", ?fork_id, @@ -483,12 +488,13 @@ impl Discv5 { self.discovered_peer_filter.filter(enr) } - /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr), if field is set. + /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr) w.r.t. the local node's network + /// stack, if field is set. fn get_fork_id( &self, enr: &discv5::enr::Enr, ) -> Result { - let key = self.fork_key; + let Some(key) = self.fork_key else { return Err(Error::NetworkStackIdNotConfigured) }; let fork_id = enr .get_decodable::(key) .ok_or(Error::ForkMissing(key))? @@ -519,7 +525,7 @@ impl Discv5 { } /// Returns the key to use to identify the [`ForkId`] kv-pair on the [`Enr`](discv5::Enr). - pub fn fork_key(&self) -> &[u8] { + pub fn fork_key(&self) -> Option<&[u8]> { self.fork_key } } @@ -625,7 +631,7 @@ mod tests { .unwrap(), ), ip_mode: IpMode::Ip4, - fork_key: b"noop", + fork_key: None, discovered_peer_filter: MustNotIncludeKeys::default(), metrics: Discv5Metrics::default(), } @@ -831,13 +837,16 @@ mod tests { const TCP_PORT: u16 = 30303; let fork_id = MAINNET.latest_fork_id(); - let config = Config::builder(TCP_PORT).fork(network_key::ETH, fork_id).build(); + let config = Config::builder(TCP_PORT).fork(NetworkStackId::ETH, fork_id).build(); let sk = SecretKey::new(&mut thread_rng()); let (enr, _, _, _) = Discv5::build_local_enr(&sk, &config); - let decoded_fork_id = - enr.get_decodable::(network_key::ETH).unwrap().map(Into::into).unwrap(); + let decoded_fork_id = enr + .get_decodable::(NetworkStackId::ETH) + .unwrap() + .map(Into::into) + .unwrap(); assert_eq!(fork_id, decoded_fork_id); assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4 diff --git a/crates/net/discv5/src/metrics.rs b/crates/net/discv5/src/metrics.rs index 12b024a2fbcd3..7bd3572f7aed5 100644 --- a/crates/net/discv5/src/metrics.rs +++ b/crates/net/discv5/src/metrics.rs @@ -2,7 +2,7 @@ use metrics::{Counter, Gauge}; use reth_metrics::Metrics; -use crate::network_key::{ETH, ETH2, OPSTACK}; +use crate::NetworkStackId; /// Information tracked by [`Discv5`](crate::Discv5). #[derive(Debug, Default, Clone)] @@ -91,27 +91,34 @@ impl DiscoveredPeersMetrics { #[derive(Metrics, Clone)] #[metrics(scope = "discv5")] pub struct AdvertisedChainMetrics { - /// Frequency of node records with a kv-pair with [`OPSTACK`](crate::network_key) as + /// Frequency of node records with a kv-pair with [`OPEL`](NetworkStackId::OPEL) as + /// key. + opel: Counter, + + /// Frequency of node records with a kv-pair with [`OPSTACK`](NetworkStackId::OPSTACK) as /// key. opstack: Counter, - /// Frequency of node records with a kv-pair with [`ETH`](crate::network_key) as key. + /// Frequency of node records with a kv-pair with [`ETH`](NetworkStackId::ETH) as key. eth: Counter, - /// Frequency of node records with a kv-pair with [`ETH2`](crate::network_key) as key. + /// Frequency of node records with a kv-pair with [`ETH2`](NetworkStackId::ETH2) as key. eth2: Counter, } impl AdvertisedChainMetrics { - /// Counts each recognised network type that is advertised on node record, once. + /// Counts each recognised network stack type that is advertised on node record, once. pub fn increment_once_by_network_type(&self, enr: &discv5::Enr) { - if enr.get_raw_rlp(OPSTACK).is_some() { + if enr.get_raw_rlp(NetworkStackId::OPEL).is_some() { + self.opel.increment(1u64) + } + if enr.get_raw_rlp(NetworkStackId::OPSTACK).is_some() { self.opstack.increment(1u64) } - if enr.get_raw_rlp(ETH).is_some() { + if enr.get_raw_rlp(NetworkStackId::ETH).is_some() { self.eth.increment(1u64) } - if enr.get_raw_rlp(ETH2).is_some() { + if enr.get_raw_rlp(NetworkStackId::ETH2).is_some() { self.eth2.increment(1u64) } } diff --git a/crates/net/discv5/src/network_key.rs b/crates/net/discv5/src/network_key.rs deleted file mode 100644 index 47576e5b2384d..0000000000000 --- a/crates/net/discv5/src/network_key.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Keys of ENR [`ForkId`](reth_primitives::ForkId) kv-pair. Identifies which network a node -//! belongs to. - -/// ENR fork ID kv-pair key, for an Ethereum L1 EL node. -pub const ETH: &[u8] = b"eth"; - -/// ENR fork ID kv-pair key, for an Ethereum L1 CL node. -pub const ETH2: &[u8] = b"eth2"; - -/// ENR fork ID kv-pair key, for an Optimism CL node. -pub const OPSTACK: &[u8] = b"opstack"; diff --git a/crates/net/discv5/src/network_stack_id.rs b/crates/net/discv5/src/network_stack_id.rs new file mode 100644 index 0000000000000..7bfeff517f2d8 --- /dev/null +++ b/crates/net/discv5/src/network_stack_id.rs @@ -0,0 +1,33 @@ +//! Keys of ENR [`ForkId`](reth_primitives::ForkId) kv-pair. Identifies which network stack a node +//! belongs to. + +use reth_primitives::ChainSpec; + +/// Identifies which Ethereum network stack a node belongs to, on the discovery network. +#[derive(Debug)] +pub struct NetworkStackId; + +impl NetworkStackId { + /// ENR fork ID kv-pair key, for an Ethereum L1 EL node. + pub const ETH: &'static [u8] = b"eth"; + + /// ENR fork ID kv-pair key, for an Ethereum L1 CL node. + pub const ETH2: &'static [u8] = b"eth2"; + + /// ENR fork ID kv-pair key, for an Optimism EL node. + pub const OPEL: &'static [u8] = b"opel"; + + /// ENR fork ID kv-pair key, for an Optimism CL node. + pub const OPSTACK: &'static [u8] = b"opstack"; + + /// Returns the [`NetworkStackId`] that matches the given [`ChainSpec`]. + pub fn id(chain: &ChainSpec) -> Option<&'static [u8]> { + if chain.is_optimism() { + return Some(Self::OPEL) + } else if chain.is_eth() { + return Some(Self::ETH) + } + + None + } +} diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 463bde78d9ff0..9e898014fe539 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -9,17 +9,18 @@ use crate::{ NetworkHandle, NetworkManager, }; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; -use reth_discv5::network_key; +use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; use reth_network_types::{pk2id, PeerId}; use reth_primitives::{ - mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, NamedChain, NodeRecord, MAINNET, + mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; use std::{collections::HashSet, net::SocketAddr, sync::Arc}; + // re-export for convenience use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocols}; pub use secp256k1::SecretKey; @@ -121,20 +122,16 @@ impl NetworkConfig { f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::Config, ) -> Self { let rlpx_port = self.listener_addr.port(); - let chain = self.chain_spec.chain; + let network_stack_id = NetworkStackId::id(&self.chain_spec); let fork_id = self.chain_spec.latest_fork_id(); let boot_nodes = self.boot_nodes.clone(); let mut builder = reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); - if chain.named() == Some(NamedChain::Mainnet) { - builder = builder.fork(network_key::ETH, fork_id) + if let Some(id) = network_stack_id { + builder = builder.fork(id, fork_id); } - // todo: set op EL fork id - /*if chain.is_optimism() { - builder = builder.fork(network_key::, fork_id) - }*/ self.set_discovery_v5(f(builder)) } diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index f8425f95e6631..bf60392cd8c63 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -1,4 +1,4 @@ -pub use alloy_chains::{Chain, NamedChain}; +pub use alloy_chains::{Chain, ChainKind, NamedChain}; pub use info::ChainInfo; pub use spec::{ AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 4ce26f84b84e7..cf45cceea62d9 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -4,8 +4,9 @@ use crate::{ net::{goerli_nodes, mainnet_nodes, sepolia_nodes}, proofs::state_root_ref_unhashed, revm_primitives::{address, b256}, - Address, BlockNumber, Chain, ForkFilter, ForkFilterKey, ForkHash, ForkId, Genesis, Hardfork, - Head, Header, NamedChain, NodeRecord, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, + Address, BlockNumber, Chain, ChainKind, ForkFilter, ForkFilterKey, ForkHash, ForkId, Genesis, + Hardfork, Head, Header, NamedChain, NodeRecord, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, + U256, }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; @@ -577,6 +578,24 @@ impl ChainSpec { self.chain } + /// Returns `true` if this chain contains Ethereum configuration. + #[inline] + pub fn is_eth(&self) -> bool { + matches!( + self.chain.kind(), + ChainKind::Named( + NamedChain::Mainnet | + NamedChain::Morden | + NamedChain::Ropsten | + NamedChain::Rinkeby | + NamedChain::Goerli | + NamedChain::Kovan | + NamedChain::Holesky | + NamedChain::Sepolia + ) + ) + } + /// Returns `true` if this chain contains Optimism configuration. #[inline] pub fn is_optimism(&self) -> bool { diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 1c88086288171..d20a35164ddb2 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -55,7 +55,7 @@ pub use block::{ ForkBlock, RpcBlockHash, SealedBlock, SealedBlockWithSenders, }; pub use chain::{ - AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, Chain, ChainInfo, ChainSpec, + AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, Chain, ChainInfo, ChainKind, ChainSpec, ChainSpecBuilder, DisplayHardforks, ForkBaseFeeParams, ForkCondition, ForkTimestamps, NamedChain, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, }; diff --git a/etc/grafana/dashboards/reth-discovery.json b/etc/grafana/dashboards/reth-discovery.json index 53d71cd766439..787913e651003 100644 --- a/etc/grafana/dashboards/reth-discovery.json +++ b/etc/grafana/dashboards/reth-discovery.json @@ -1,976 +1,1131 @@ { - "__inputs": [ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.3.3" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.3.3" + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" + "id": 96, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ { - "builtIn": 1, "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" } - ] + ], + "title": "Version", + "transparent": true, + "type": "stat" }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 96, - "panels": [], - "repeat": "instance", - "repeatDirection": "h", - "title": "Overview", - "type": "row" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 0, - "y": 1 - }, - "id": 22, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{version}}", - "range": false, - "refId": "A" - } - ], - "title": "Version", - "transparent": true, - "type": "stat" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 1 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 3, - "y": 1 - }, - "id": 192, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{build_timestamp}}", - "range": false, - "refId": "A" - } - ], - "title": "Build Timestamp", - "transparent": true, - "type": "stat" + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 1 - }, - "id": 193, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{git_sha}}", - "range": false, - "refId": "A" - } - ], - "title": "Git SHA", - "transparent": true, - "type": "stat" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 12, - "y": 1 - }, - "id": 195, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{build_profile}}", - "range": false, - "refId": "A" - } - ], - "title": "Build Profile", - "transparent": true, - "type": "stat" + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 5, - "x": 14, - "y": 1 - }, - "id": 196, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{target_triple}}", - "range": false, - "refId": "A" - } - ], - "title": "Target Triple", - "transparent": true, - "type": "stat" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 1 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 5, - "x": 19, - "y": 1 - }, - "id": 197, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{cargo_features}}", - "range": false, - "refId": "A" - } - ], - "title": "Cargo Features", - "transparent": true, - "type": "stat" + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 4 - }, - "id": 89, - "panels": [], - "repeat": "instance", - "repeatDirection": "h", - "title": "Discv5", - "type": "row" + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "description": "Peers managed by underlying sigp/discv5 node. \n\nOnly peers in the kbuckets are queried in FINDNODE lookups, and included in NODES responses to other peers.\n\nNot all peers with an established session will make it into the kbuckets, due to e.g. reachability issues (NAT) and capacity of kbuckets furthest log2distance away from local node (XOR metrics).", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 1 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 89, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Discv5", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Peers managed by underlying sigp/discv5 node. \n\nOnly peers in the kbuckets are queried in FINDNODE lookups, and included in NODES responses to other peers.\n\nNot all peers with an established session will make it into the kbuckets, due to e.g. reachability issues (NAT) and capacity of kbuckets furthest log2distance away from local node (XOR metrics).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 5 - }, - "id": 198, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true }, - "targets": [ + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total peers kbuckets" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_discv5_total_kbucket_peers_raw{instance=\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Total peers kbuckets", - "range": true, - "refId": "A", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total connected sessions" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_discv5_total_sessions_raw{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Total connected sessions", - "range": true, - "refId": "B", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-red", + "mode": "fixed" + } + } + ] } - ], - "title": "Peers", - "type": "timeseries" + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 198, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_discv5_total_kbucket_peers_raw{instance=\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total peers kbuckets", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_discv5_total_sessions_raw{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total connected sessions", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\n", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" }, - "unit": "cps", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 5 - }, - "id": 199, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps", + "unitScale": true }, - "targets": [ + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total Session Establishments" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_inserted_kbucket_peers_raw{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Total KBucket Insertions", - "range": true, - "refId": "A", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total KBucket Insertions" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Total Session Establishments", - "range": true, - "refId": "B", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-red", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Session Establishments (pass filter)" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_unreachable_enr{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Session Establishments (unreachable ENR)", - "range": true, - "refId": "C", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff0ada", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Session Establishments (unreachable ENR)" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_total_established_sessions_custom_filtered{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Session Establishments (pass filter)", - "range": true, - "refId": "D", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-orange", + "mode": "fixed" + } + } + ] } - ], - "title": "Peer Churn", - "type": "timeseries" + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 199, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "description": "Frequency of discovering peers from some popular networks.\n\nSome nodes miss advertising a fork ID kv-pair in their ENR. They will be counted as 'unknown', but may belong to a popular network.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_total_inserted_kbucket_peers_raw{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Total KBucket Insertions", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Total Session Establishments", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_total_established_sessions_unreachable_enr{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Session Establishments (unreachable ENR)", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_total_established_sessions_custom_filtered{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Session Establishments (pass filter)", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "Peer Churn", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Frequency of discovering peers from some popular network stacks.\n\nSome nodes miss advertising a fork ID kv-pair in their ENR. They will be counted as 'unknown', but may belong to a popular network.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - "unit": "cps", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 13 - }, - "id": 200, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Eth", - "range": true, - "refId": "A", - "useBackend": false + "thresholdsStyle": { + "mode": "off" + } }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps", + "unitScale": true + }, + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Eth" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Eth2", - "range": true, - "refId": "B", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Eth2" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "OP", - "range": true, - "refId": "C", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-red", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Unknown" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]))", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Unknown", - "range": true, - "refId": "D", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff0ae5", + "mode": "fixed" + } + } + ] } - ], - "title": "Advertised Networks", - "type": "timeseries" - } - ], - "refresh": "30s", - "schemaVersion": 39, - "tags": [], - "templating": { - "list": [ + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 200, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eth", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eth2", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "OP EL", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "OP CL", + "range": true, + "refId": "C", + "useBackend": false + }, { - "current": {}, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "query_result(reth_info)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "instance", - "options": [], - "query": { - "query": "query_result(reth_info)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "/.*instance=\\\"([^\\\"]*).*/", - "skipUrlSync": false, - "sort": 0, - "type": "query" + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Unknown", + "range": true, + "refId": "D", + "useBackend": false } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "reth - discovery", - "uid": "de6e87b2-7630-40b2-b2c4-a500476e799d", - "version": 11, - "weekStart": "" - } \ No newline at end of file + ], + "title": "Advertised Network Stacks", + "type": "timeseries" + } + ], + "refresh": "30s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "query_result(reth_info)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "query_result(reth_info)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "/.*instance=\\\"([^\\\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "reth - discovery", + "uid": "de6e87b2-7630-40b2-b2c4-a500476e799d", + "version": 1, + "weekStart": "" +} \ No newline at end of file From 8e65cb3aa52ef1d621b390d0b1f834f77db9ac60 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 30 Apr 2024 23:07:41 +0200 Subject: [PATCH 418/700] Bump `evm-inspectors` and `alloy` (#8006) --- Cargo.lock | 36 +++++++++---------- Cargo.toml | 26 +++++++------- crates/blockchain-tree/src/blockchain_tree.rs | 6 ++-- crates/consensus/common/src/validation.rs | 4 +-- crates/net/network/tests/it/connect.rs | 22 +++++------- crates/optimism/evm/src/execute.rs | 10 +++--- crates/primitives/src/transaction/eip1559.rs | 2 +- crates/primitives/src/transaction/eip2930.rs | 2 +- crates/primitives/src/transaction/legacy.rs | 2 +- crates/primitives/src/transaction/mod.rs | 28 ++++----------- crates/revm/src/optimism/processor.rs | 10 +++--- crates/revm/src/processor.rs | 4 +-- .../rpc-types-compat/src/transaction/mod.rs | 8 ++--- crates/rpc/rpc/src/eth/api/call.rs | 1 + crates/rpc/rpc/src/eth/api/server.rs | 3 +- crates/storage/codecs/src/alloy/txkind.rs | 2 +- crates/transaction-pool/src/test_utils/gen.rs | 2 +- .../transaction-pool/src/test_utils/mock.rs | 8 ++--- 18 files changed, 78 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c7eca223b4862..f1ee55cb34577 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,7 +133,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-eips", "alloy-primitives", @@ -166,7 +166,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -185,7 +185,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "alloy-serde", @@ -208,7 +208,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "serde", @@ -220,7 +220,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-consensus", "alloy-eips", @@ -237,7 +237,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -279,7 +279,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -329,7 +329,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -349,7 +349,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-consensus", "alloy-eips", @@ -371,7 +371,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "alloy-serde", @@ -381,7 +381,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-consensus", "alloy-eips", @@ -401,7 +401,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -413,7 +413,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "serde", @@ -423,7 +423,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "async-trait", @@ -436,7 +436,7 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-consensus", "alloy-network", @@ -511,7 +511,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -529,7 +529,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -7944,7 +7944,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=848d568#848d5688d0c499c538b9a78b423a7061525aa580" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=3d2077e#3d2077ee665046c256448a8bd90d8e93ea85de56" dependencies = [ "alloy-primitives", "alloy-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index ab330d87e7b2b..28b0692dd7f22 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "848d568" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "3d2077e" } # eth alloy-chains = "0.1.15" @@ -291,20 +291,20 @@ alloy-dyn-abi = "0.7.1" alloy-sol-types = "0.7.1" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "4e22b9e" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "ca54552" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } # misc auto_impl = "1" diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index eee4163c7cae9..09f829c7e9917 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1285,8 +1285,8 @@ mod tests { revm_primitives::AccountInfo, stage::StageCheckpoint, Account, Address, ChainSpecBuilder, Genesis, GenesisAccount, Header, Signature, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxKind, - Withdrawals, B256, MAINNET, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, Withdrawals, B256, + MAINNET, }; use reth_provider::{ test_utils::{ @@ -1465,7 +1465,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce, gas_limit: 21_000, - to: TxKind::Call(Address::ZERO), + to: Address::ZERO.into(), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() }), diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index c6e4e0aee857b..b67d40e98533e 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -202,7 +202,7 @@ mod tests { use reth_primitives::{ hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Bytes, ChainSpecBuilder, Signature, Transaction, TransactionSigned, TxEip4844, - TxKind, Withdrawal, Withdrawals, U256, + Withdrawal, Withdrawals, U256, }; use reth_provider::AccountReader; use std::ops::RangeBounds; @@ -313,7 +313,7 @@ mod tests { max_priority_fee_per_gas: 0x28f000fff, max_fee_per_blob_gas: 0x7, gas_limit: 10, - to: TxKind::Call(Address::default()), + to: Address::default().into(), value: U256::from(3_u64), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 1ff59bf406805..7b9c785ebdbe2 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -1,7 +1,7 @@ //! Connection tests use alloy_node_bindings::Geth; -use alloy_provider::{admin::AdminApi, ProviderBuilder}; +use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; use reth_discv4::Discv4Config; use reth_eth_wire::DisconnectReason; @@ -320,9 +320,8 @@ async fn test_incoming_node_id_blacklist() { let geth = Geth::new().data_dir(temp_dir).disable_discovery().authrpc_port(0).spawn(); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; @@ -375,9 +374,8 @@ async fn test_incoming_connect_with_single_geth() { let temp_dir = tempfile::tempdir().unwrap().into_path(); let geth = Geth::new().data_dir(temp_dir).disable_discovery().authrpc_port(0).spawn(); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; @@ -438,9 +436,8 @@ async fn test_outgoing_connect_with_single_geth() { let geth_socket = SocketAddr::new([127, 0, 0, 1].into(), geth_p2p_port); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()).to_string(); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; @@ -485,9 +482,8 @@ async fn test_geth_disconnect() { let geth_socket = SocketAddr::new([127, 0, 0, 1].into(), geth_p2p_port); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()).to_string(); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index c56c7622e622b..a77f422055f57 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -539,7 +539,7 @@ mod tests { use super::*; use reth_primitives::{ b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, - Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, + Transaction, TransactionSigned, TxEip1559, BASE_MAINNET, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, @@ -609,7 +609,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TxKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -618,7 +618,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TxKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), @@ -689,7 +689,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TxKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -698,7 +698,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TxKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 06cbc129c4291..5da0cd881adef 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -243,7 +243,7 @@ mod tests { chain_id: 1, nonce: 0x42, gas_limit: 44386, - to: TxKind::Call( hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), + to: TxKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), value: U256::ZERO, input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), max_fee_per_gas: 0x4a817c800, diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index b0d1291e810c3..0604a7888fda7 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -225,7 +225,7 @@ mod tests { nonce: 0, gas_price: 1, gas_limit: 2, - to: TxKind::Call(Address::default()), + to: Address::default().into(), value: U256::from(3), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index f2440e13cd48c..448662a243cf3 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -190,7 +190,7 @@ mod tests { nonce: 0x18, gas_price: 0xfa56ea00, gas_limit: 119902, - to: TxKind::Call( hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), + to: TxKind::Call(hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), value: U256::from(0x1c6bf526340000u64), input: hex!("f7d8c88300000000000000000000000000000000000000000000000000000000000cee6100000000000000000000000000000000000000000000000000000000000ac3e1").into(), }); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f401b0ef631ff..7b79a85a21a96 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1866,9 +1866,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TxKind::Call( - Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), - ), + to: Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap().into(), value: U256::from(1000000000000000u64), input: Bytes::default(), }); @@ -1888,9 +1886,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TxKind::Call(Address::from_slice( - &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], - )), + to: Address::from_slice(&hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..]).into(), value: U256::from(693361000000000u64), input: Default::default(), }); @@ -1909,9 +1905,7 @@ mod tests { nonce: 3, gas_price: 2000000000, gas_limit: 10000000, - to: TxKind::Call(Address::from_slice( - &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], - )), + to: Address::from_slice(&hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..]).into(), value: U256::from(1000000000000000u64), input: Bytes::default(), }); @@ -1931,9 +1925,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000, - to: TxKind::Call(Address::from_slice( - &hex!("61815774383099e24810ab832a5b2a5425c154d5")[..], - )), + to: Address::from_slice(&hex!("61815774383099e24810ab832a5b2a5425c154d5")[..]).into(), value: U256::from(3000000000000000000u64), input: Default::default(), access_list: Default::default(), @@ -1953,9 +1945,7 @@ mod tests { nonce: 15, gas_price: 2200000000, gas_limit: 34811, - to: TxKind::Call(Address::from_slice( - &hex!("cf7f9e66af820a19257a2108375b180b0ec49167")[..], - )), + to: Address::from_slice(&hex!("cf7f9e66af820a19257a2108375b180b0ec49167")[..]).into(), value: U256::from(1234), input: Bytes::default(), }); @@ -2242,9 +2232,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TxKind::Call( - Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), - ), + to: Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap().into(), value: U256::from(1000000000000000u64), input: Bytes::from(input), }); @@ -2291,9 +2279,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TxKind::Call( - Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), - ), + to: Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap().into(), value: U256::from(1000000000000000u64), input: Bytes::from(vec![3u8; 64]), }); diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs index bd68023bee5d1..01ec2efde179a 100644 --- a/crates/revm/src/optimism/processor.rs +++ b/crates/revm/src/optimism/processor.rs @@ -206,7 +206,7 @@ mod tests { }; use reth_primitives::{ b256, Account, Address, Block, ChainSpecBuilder, Header, Signature, StorageKey, - StorageValue, Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, + StorageValue, Transaction, TransactionSigned, TxEip1559, BASE_MAINNET, }; use revm::L1_BLOCK_CONTRACT; use std::{collections::HashMap, str::FromStr, sync::Arc}; @@ -278,7 +278,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TxKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -287,7 +287,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TxKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), @@ -352,7 +352,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TxKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -361,7 +361,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TxKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index e6a85b77d397c..c22272abc11bb 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -466,7 +466,7 @@ mod tests { bytes, constants::{BEACON_ROOTS_ADDRESS, EIP1559_INITIAL_BASE_FEE, SYSTEM_ADDRESS}, keccak256, Account, Bytes, ChainSpecBuilder, ForkCondition, Signature, Transaction, - TxEip1559, TxKind, MAINNET, + TxEip1559, MAINNET, }; use revm::{Database, TransitionState}; use std::collections::HashMap; @@ -855,7 +855,7 @@ mod tests { chain_id, nonce: 1, gas_limit: 21_000, - to: TxKind::Call(Address::ZERO), + to: Address::ZERO.into(), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() }), diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index a441c4c299945..6a35429c53d61 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -1,9 +1,7 @@ //! Compatibility functions for rpc `Transaction` type. use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; -use reth_primitives::{ - BlockNumber, TransactionSignedEcRecovered, TxKind as PrimitiveTransactionKind, TxType, B256, -}; +use reth_primitives::{BlockNumber, TransactionSignedEcRecovered, TxKind, TxType, B256}; use reth_rpc_types::Transaction; use signature::from_primitive_signature; pub use typed::*; @@ -45,8 +43,8 @@ fn fill( let signed_tx = tx.into_signed(); let to = match signed_tx.kind() { - PrimitiveTransactionKind::Create => None, - PrimitiveTransactionKind::Call(to) => Some(*to), + TxKind::Create => None, + TxKind::Call(to) => Some(*to), }; #[allow(unreachable_patterns)] diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 7066f73729823..191406f96a90d 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -443,6 +443,7 @@ where Ok(AccessListWithGasUsed { access_list, gas_used }) } + /// Executes the requests again after an out of gas error to check if the error is gas related /// or not #[inline] diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index c2be79a10e5be..7ba1539b812f4 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -429,8 +429,6 @@ where #[cfg(test)] mod tests { - use jsonrpsee::types::error::INVALID_PARAMS_CODE; - use crate::{ eth::{ cache::EthStateCache, gas_oracle::GasPriceOracle, FeeHistoryCache, @@ -438,6 +436,7 @@ mod tests { }, EthApi, }; + use jsonrpsee::types::error::INVALID_PARAMS_CODE; use reth_evm_ethereum::EthEvmConfig; use reth_interfaces::test_utils::{generators, generators::Rng}; use reth_network_api::noop::NoopNetwork; diff --git a/crates/storage/codecs/src/alloy/txkind.rs b/crates/storage/codecs/src/alloy/txkind.rs index 220384bdde730..e1dffa15be55a 100644 --- a/crates/storage/codecs/src/alloy/txkind.rs +++ b/crates/storage/codecs/src/alloy/txkind.rs @@ -21,7 +21,7 @@ impl Compact for TxKind { 0 => (TxKind::Create, buf), 1 => { let (addr, buf) = Address::from_compact(buf, buf.len()); - (TxKind::Call(addr), buf) + (addr.into(), buf) } _ => { unreachable!("Junk data in database: unknown TransactionKind variant",) diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 2e3c71828dcd5..5c335e5d6edab 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -306,7 +306,7 @@ impl TransactionBuilder { /// Sets the recipient or contract address for the transaction, mutable reference version. pub fn set_to(&mut self, to: Address) -> &mut Self { - self.to = TxKind::Call(to); + self.to = to.into(); self } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 7eda40e58e706..8e265e7ba36c2 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -213,7 +213,7 @@ impl MockTransaction { nonce: 0, gas_price: 0, gas_limit: 0, - to: TxKind::Call(Address::random()), + to: Address::random().into(), value: Default::default(), input: Default::default(), size: Default::default(), @@ -229,7 +229,7 @@ impl MockTransaction { max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, gas_limit: 0, - to: TxKind::Call(Address::random()), + to: Address::random().into(), value: Default::default(), input: Bytes::new(), accesslist: Default::default(), @@ -247,7 +247,7 @@ impl MockTransaction { max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_fee_per_blob_gas: DATA_GAS_PER_BLOB as u128, gas_limit: 0, - to: TxKind::Call(Address::random()), + to: Address::random().into(), value: Default::default(), input: Bytes::new(), accesslist: Default::default(), @@ -272,7 +272,7 @@ impl MockTransaction { hash: B256::random(), sender: Address::random(), nonce: 0, - to: TxKind::Call(Address::random()), + to: Address::random().into(), gas_limit: 0, input: Bytes::new(), value: Default::default(), From bf9d9745edcdfedf226ef9e6b93ae93ca256a86e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 30 Apr 2024 20:20:23 -0400 Subject: [PATCH 419/700] fix(op): use canyon,ecotone fork timestamps for op mainnet (#8011) --- crates/primitives/src/chain/spec.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index cf45cceea62d9..d0a5b84334d6f 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -296,6 +296,8 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { ), (Hardfork::Bedrock, ForkCondition::Block(105235063)), (Hardfork::Regolith, ForkCondition::Timestamp(0)), + (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)), + (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)), ]), base_fee_params: BaseFeeParamsKind::Variable( vec![ @@ -2469,6 +2471,25 @@ Post-merge hard forks (timestamp based): ); } + #[cfg(feature = "optimism")] + #[test] + fn op_mainnet_forkids() { + test_fork_ids( + &OP_MAINNET, + &[ + ( + Head { number: 0, ..Default::default() }, + ForkId { hash: ForkHash([0xca, 0xf5, 0x17, 0xed]), next: 3950000 }, + ), + // TODO: complete these, see https://github.com/paradigmxyz/reth/issues/8012 + ( + Head { number: 105235063, timestamp: 1710374401, ..Default::default() }, + ForkId { hash: ForkHash([0x19, 0xda, 0x4c, 0x52]), next: 0 }, + ), + ], + ); + } + #[cfg(feature = "optimism")] #[test] fn base_sepolia_forkids() { From 074c5c301373cec1a0b755e213abf973293a0014 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 30 Apr 2024 22:06:37 -0400 Subject: [PATCH 420/700] feat: introduce external context GAT in ConfigureEvm (#7842) --- crates/ethereum/evm/src/lib.rs | 12 +++++++++++- crates/evm/src/lib.rs | 11 +++++++---- crates/optimism/evm/src/lib.rs | 6 ++++-- crates/revm/src/test_utils.rs | 17 +++++++++++------ examples/custom-evm/src/main.rs | 4 +++- 5 files changed, 36 insertions(+), 14 deletions(-) diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index a320a2b3c62a8..adcfd700db0d7 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -14,6 +14,7 @@ use reth_primitives::{ revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, Address, ChainSpec, Head, Header, Transaction, U256, }; +use reth_revm::{Database, EvmBuilder}; pub mod execute; /// Ethereum-related EVM configuration. @@ -55,7 +56,16 @@ impl ConfigureEvmEnv for EthEvmConfig { } } -impl ConfigureEvm for EthEvmConfig {} +impl ConfigureEvm for EthEvmConfig { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>( + &self, + db: DB, + ) -> reth_revm::Evm<'a, Self::DefaultExternalContext<'a>, DB> { + EvmBuilder::default().with_db(db).build() + } +} #[cfg(test)] mod tests { diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 9179abc3355ab..154aac2d7bb87 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -16,14 +16,15 @@ pub mod execute; /// Trait for configuring the EVM for executing full blocks. pub trait ConfigureEvm: ConfigureEvmEnv { + /// Associated type for the default external context that should be configured for the EVM. + type DefaultExternalContext<'a>; + /// Returns new EVM with the given database /// /// This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is up to /// the caller to call an appropriate method to fill the transaction and block environment /// before executing any transactions using the provided EVM. - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { - EvmBuilder::default().with_db(db).build() - } + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB>; /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. @@ -33,7 +34,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { &self, db: DB, env: EnvWithHandlerCfg, - ) -> Evm<'a, (), DB> { + ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { let mut evm = self.evm(db); evm.modify_spec_id(env.spec_id()); evm.context.evm.env = env.env; @@ -43,6 +44,8 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// + /// This will use the given external inspector as the EVM external context. + /// /// This will preserve any handler modifications fn evm_with_env_and_inspector<'a, DB, I>( &self, diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 6a6324302e10c..8ab6fd4269c86 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -61,7 +61,9 @@ impl ConfigureEvmEnv for OptimismEvmConfig { } impl ConfigureEvm for OptimismEvmConfig { - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { EvmBuilder::default().with_db(db).optimism().build() } @@ -83,7 +85,7 @@ impl ConfigureEvm for OptimismEvmConfig { mod tests { use super::*; use reth_primitives::revm_primitives::{BlockEnv, CfgEnv}; - use reth_revm::primitives::SpecId; + use revm_primitives::SpecId; #[test] #[ignore] diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 1937369872321..73df4ea4b4dc5 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -18,11 +18,12 @@ use std::collections::HashMap; #[cfg(feature = "optimism")] use { reth_primitives::revm::env::fill_op_tx_env, - revm::{ - inspector_handle_register, - primitives::{HandlerCfg, SpecId}, - Database, Evm, EvmBuilder, GetInspector, - }, + revm::{inspector_handle_register, GetInspector}, +}; + +use revm::{ + primitives::{HandlerCfg, SpecId}, + Database, Evm, EvmBuilder, }; /// Mock state for testing @@ -158,9 +159,13 @@ impl ConfigureEvmEnv for TestEvmConfig { } impl ConfigureEvm for TestEvmConfig { - #[cfg(feature = "optimism")] + type DefaultExternalContext<'a> = (); + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + #[cfg(feature = "optimism")] let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }; + #[cfg(not(feature = "optimism"))] + let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST }; EvmBuilder::default().with_db(db).with_handler_cfg(handler_cfg).build() } diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index e5362c8081cbf..31edf4f039528 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -81,7 +81,9 @@ impl ConfigureEvmEnv for MyEvmConfig { } impl ConfigureEvm for MyEvmConfig { - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { EvmBuilder::default() .with_db(db) // add additional precompiles From fb960fb3e45e11c24125ccb4bd93f2e2e21ce271 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Anda=20Estensen?= Date: Wed, 1 May 2024 10:20:53 +0200 Subject: [PATCH 421/700] refactor: remove _args prefix from modules in node_core (#8015) --- .../args/{database_args.rs => database.rs} | 0 .../src/args/{debug_args.rs => debug.rs} | 0 .../src/args/{dev_args.rs => dev.rs} | 0 ...ice_oracle_args.rs => gas_price_oracle.rs} | 0 .../src/args/{log_args.rs => log.rs} | 0 crates/node-core/src/args/mod.rs | 48 +++++++++---------- .../src/args/{network_args.rs => network.rs} | 0 ...oad_builder_args.rs => payload_builder.rs} | 0 .../src/args/{pruning_args.rs => pruning.rs} | 0 .../{rpc_server_args.rs => rpc_server.rs} | 0 ...state_cache_args.rs => rpc_state_cache.rs} | 0 .../src/args/{stage_args.rs => stage.rs} | 0 .../src/args/{txpool_args.rs => txpool.rs} | 0 13 files changed, 24 insertions(+), 24 deletions(-) rename crates/node-core/src/args/{database_args.rs => database.rs} (100%) rename crates/node-core/src/args/{debug_args.rs => debug.rs} (100%) rename crates/node-core/src/args/{dev_args.rs => dev.rs} (100%) rename crates/node-core/src/args/{gas_price_oracle_args.rs => gas_price_oracle.rs} (100%) rename crates/node-core/src/args/{log_args.rs => log.rs} (100%) rename crates/node-core/src/args/{network_args.rs => network.rs} (100%) rename crates/node-core/src/args/{payload_builder_args.rs => payload_builder.rs} (100%) rename crates/node-core/src/args/{pruning_args.rs => pruning.rs} (100%) rename crates/node-core/src/args/{rpc_server_args.rs => rpc_server.rs} (100%) rename crates/node-core/src/args/{rpc_state_cache_args.rs => rpc_state_cache.rs} (100%) rename crates/node-core/src/args/{stage_args.rs => stage.rs} (100%) rename crates/node-core/src/args/{txpool_args.rs => txpool.rs} (100%) diff --git a/crates/node-core/src/args/database_args.rs b/crates/node-core/src/args/database.rs similarity index 100% rename from crates/node-core/src/args/database_args.rs rename to crates/node-core/src/args/database.rs diff --git a/crates/node-core/src/args/debug_args.rs b/crates/node-core/src/args/debug.rs similarity index 100% rename from crates/node-core/src/args/debug_args.rs rename to crates/node-core/src/args/debug.rs diff --git a/crates/node-core/src/args/dev_args.rs b/crates/node-core/src/args/dev.rs similarity index 100% rename from crates/node-core/src/args/dev_args.rs rename to crates/node-core/src/args/dev.rs diff --git a/crates/node-core/src/args/gas_price_oracle_args.rs b/crates/node-core/src/args/gas_price_oracle.rs similarity index 100% rename from crates/node-core/src/args/gas_price_oracle_args.rs rename to crates/node-core/src/args/gas_price_oracle.rs diff --git a/crates/node-core/src/args/log_args.rs b/crates/node-core/src/args/log.rs similarity index 100% rename from crates/node-core/src/args/log_args.rs rename to crates/node-core/src/args/log.rs diff --git a/crates/node-core/src/args/mod.rs b/crates/node-core/src/args/mod.rs index 14b63dd743f28..bce63917b7de2 100644 --- a/crates/node-core/src/args/mod.rs +++ b/crates/node-core/src/args/mod.rs @@ -1,55 +1,55 @@ //! Parameters for configuring the rpc more granularity via CLI /// NetworkArg struct for configuring the network -mod network_args; -pub use network_args::{DiscoveryArgs, NetworkArgs}; +mod network; +pub use network::{DiscoveryArgs, NetworkArgs}; /// RpcServerArg struct for configuring the RPC -mod rpc_server_args; -pub use rpc_server_args::RpcServerArgs; +mod rpc_server; +pub use rpc_server::RpcServerArgs; /// RpcStateCacheArgs struct for configuring RPC state cache -mod rpc_state_cache_args; -pub use rpc_state_cache_args::RpcStateCacheArgs; +mod rpc_state_cache; +pub use rpc_state_cache::RpcStateCacheArgs; /// DebugArgs struct for debugging purposes -mod debug_args; -pub use debug_args::DebugArgs; +mod debug; +pub use debug::DebugArgs; /// DatabaseArgs struct for configuring the database -mod database_args; -pub use database_args::DatabaseArgs; +mod database; +pub use database::DatabaseArgs; /// LogArgs struct for configuring the logger -mod log_args; -pub use log_args::{ColorMode, LogArgs}; +mod log; +pub use log::{ColorMode, LogArgs}; mod secret_key; pub use secret_key::{get_secret_key, SecretKeyError}; /// PayloadBuilderArgs struct for configuring the payload builder -mod payload_builder_args; -pub use payload_builder_args::PayloadBuilderArgs; +mod payload_builder; +pub use payload_builder::PayloadBuilderArgs; /// Stage related arguments -mod stage_args; -pub use stage_args::StageEnum; +mod stage; +pub use stage::StageEnum; /// Gas price oracle related arguments -mod gas_price_oracle_args; -pub use gas_price_oracle_args::GasPriceOracleArgs; +mod gas_price_oracle; +pub use gas_price_oracle::GasPriceOracleArgs; /// TxPoolArgs for configuring the transaction pool -mod txpool_args; -pub use txpool_args::TxPoolArgs; +mod txpool; +pub use txpool::TxPoolArgs; /// DevArgs for configuring the dev testnet -mod dev_args; -pub use dev_args::DevArgs; +mod dev; +pub use dev::DevArgs; /// PruneArgs for configuring the pruning and full node -mod pruning_args; -pub use pruning_args::PruningArgs; +mod pruning; +pub use pruning::PruningArgs; pub mod utils; diff --git a/crates/node-core/src/args/network_args.rs b/crates/node-core/src/args/network.rs similarity index 100% rename from crates/node-core/src/args/network_args.rs rename to crates/node-core/src/args/network.rs diff --git a/crates/node-core/src/args/payload_builder_args.rs b/crates/node-core/src/args/payload_builder.rs similarity index 100% rename from crates/node-core/src/args/payload_builder_args.rs rename to crates/node-core/src/args/payload_builder.rs diff --git a/crates/node-core/src/args/pruning_args.rs b/crates/node-core/src/args/pruning.rs similarity index 100% rename from crates/node-core/src/args/pruning_args.rs rename to crates/node-core/src/args/pruning.rs diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server.rs similarity index 100% rename from crates/node-core/src/args/rpc_server_args.rs rename to crates/node-core/src/args/rpc_server.rs diff --git a/crates/node-core/src/args/rpc_state_cache_args.rs b/crates/node-core/src/args/rpc_state_cache.rs similarity index 100% rename from crates/node-core/src/args/rpc_state_cache_args.rs rename to crates/node-core/src/args/rpc_state_cache.rs diff --git a/crates/node-core/src/args/stage_args.rs b/crates/node-core/src/args/stage.rs similarity index 100% rename from crates/node-core/src/args/stage_args.rs rename to crates/node-core/src/args/stage.rs diff --git a/crates/node-core/src/args/txpool_args.rs b/crates/node-core/src/args/txpool.rs similarity index 100% rename from crates/node-core/src/args/txpool_args.rs rename to crates/node-core/src/args/txpool.rs From f832b66f996dbf38106b68666c79bc02cd6902ad Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 1 May 2024 13:24:46 +0200 Subject: [PATCH 422/700] chore: rm uneccessary trait bounds (#8019) --- crates/rpc/rpc/src/layers/auth_layer.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/rpc/rpc/src/layers/auth_layer.rs b/crates/rpc/rpc/src/layers/auth_layer.rs index 0137fcd0c9d2a..ed22d607c1009 100644 --- a/crates/rpc/rpc/src/layers/auth_layer.rs +++ b/crates/rpc/rpc/src/layers/auth_layer.rs @@ -44,11 +44,7 @@ pub struct AuthLayer { validator: V, } -impl AuthLayer -where - V: AuthValidator, - V::ResponseBody: Body, -{ +impl AuthLayer { /// Creates an instance of [`AuthLayer`]. /// `validator` is a generic trait able to validate requests (see [`AuthValidator`]). pub fn new(validator: V) -> Self { From 99db2b352fc3a1279229b7fb8fc9689c2c098be8 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 1 May 2024 15:44:50 +0200 Subject: [PATCH 423/700] feat(op): nonce replay (#7781) --- bin/reth/src/cli/mod.rs | 7 +- bin/reth/src/commands/import.rs | 265 +++++++++-------- bin/reth/src/commands/import_op.rs | 274 ++++++++++++++++++ bin/reth/src/commands/mod.rs | 1 + crates/net/downloaders/src/file_client.rs | 17 ++ crates/primitives/src/stage/id.rs | 5 + crates/stages/src/stages/tx_lookup.rs | 17 +- .../src/providers/database/provider.rs | 5 + 8 files changed, 460 insertions(+), 131 deletions(-) create mode 100644 bin/reth/src/commands/import_op.rs diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 9c81b0aec17c9..e7d2789646b44 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -6,7 +6,8 @@ use crate::{ LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, node, node::NoArgs, + config_cmd, db, debug_cmd, dump_genesis, import, import_op, init_cmd, init_state, + node::{self, NoArgs}, p2p, recover, stage, test_vectors, }, version::{LONG_VERSION, SHORT_VERSION}, @@ -147,6 +148,7 @@ impl Cli { Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Stage(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), @@ -183,6 +185,9 @@ pub enum Commands { /// This syncs RLP encoded blocks from a file. #[command(name = "import")] Import(import::ImportCommand), + /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. + #[command(name = "import-op")] + ImportOp(import_op::ImportOpCommand), /// Dumps genesis block JSON configuration to stdout. DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 4731bf56558fa..e1851f51e0130 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -14,7 +14,7 @@ use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; -use reth_db::{database::Database, init_db}; +use reth_db::{database::Database, init_db, tables, transaction::DbTx}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, @@ -30,8 +30,8 @@ use reth_node_ethereum::EthEvmConfig; use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; use reth_provider::{ - BlockNumReader, HeaderProvider, HeaderSyncMode, ProviderError, ProviderFactory, - StageCheckpointReader, StaticFileProviderFactory, + BlockNumReader, ChainSpecProvider, HeaderProvider, HeaderSyncMode, ProviderError, + ProviderFactory, StageCheckpointReader, StaticFileProviderFactory, }; use reth_stages::{ prelude::*, @@ -41,7 +41,7 @@ use reth_stages::{ use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; -use tracing::{debug, info}; +use tracing::{debug, error, info}; /// Stages that require state. const STATE_STAGES: &[StageId] = &[ @@ -87,11 +87,6 @@ pub struct ImportCommand { #[arg(long, verbatim_doc_comment)] no_state: bool, - /// Import OP Mainnet chain below Bedrock. Caution! Flag must be set as env var, since the env - /// var is read by another process too, in order to make below Bedrock import work. - #[arg(long, verbatim_doc_comment, env = "OP_RETH_MAINNET_BELOW_BEDROCK")] - op_mainnet_below_bedrock: bool, - /// Chunk byte length. #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] chunk_len: Option, @@ -109,27 +104,23 @@ pub struct ImportCommand { impl ImportCommand { /// Execute `import` command - pub async fn execute(mut self) -> eyre::Result<()> { + pub async fn execute(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); - if self.op_mainnet_below_bedrock { - self.no_state = true; - debug!(target: "reth::cli", "Importing OP mainnet below bedrock"); - } - if self.no_state { - debug!(target: "reth::cli", "Stages requiring state disabled"); + info!(target: "reth::cli", "Disabled stages requiring state"); } debug!(target: "reth::cli", - chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), "Chunking chain import" + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking chain import" ); // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); - let mut config: Config = self.load_config(config_path.clone())?; + let mut config: Config = load_config(config_path.clone())?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to @@ -155,6 +146,9 @@ impl ImportCommand { // open file let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + let mut total_decoded_blocks = 0; + let mut total_decoded_txns = 0; + while let Some(file_client) = reader.next_chunk().await? { // create a new FileClient from chunk read from file info!(target: "reth::cli", @@ -164,20 +158,22 @@ impl ImportCommand { let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; info!(target: "reth::cli", "Chain file chunk read"); - let (mut pipeline, events) = self - .build_import_pipeline( - &config, + total_decoded_blocks += file_client.headers_len(); + total_decoded_txns += file_client.total_transactions(); + + let (mut pipeline, events) = build_import_pipeline( + &config, + provider_factory.clone(), + &consensus, + Arc::new(file_client), + StaticFileProducer::new( provider_factory.clone(), - &consensus, - Arc::new(file_client), - StaticFileProducer::new( - provider_factory.clone(), - provider_factory.static_file_provider(), - PruneModes::default(), - ), - self.no_state, - ) - .await?; + provider_factory.static_file_provider(), + PruneModes::default(), + ), + true, + ) + .await?; // override the tip pipeline.set_tip(tip); @@ -202,104 +198,129 @@ impl ImportCommand { } } - info!(target: "reth::cli", "Chain file imported"); - Ok(()) - } + let provider = provider_factory.provider()?; - async fn build_import_pipeline( - &self, - config: &Config, - provider_factory: ProviderFactory, - consensus: &Arc, - file_client: Arc, - static_file_producer: StaticFileProducer, - no_state: bool, - ) -> eyre::Result<(Pipeline, impl Stream)> - where - DB: Database + Clone + Unpin + 'static, - C: Consensus + 'static, - { - if !file_client.has_canonical_blocks() { - eyre::bail!("unable to import non canonical blocks"); - } + let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_txns = provider.tx_ref().entries::()?; - // Retrieve latest header found in the database. - let last_block_number = provider_factory.last_block_number()?; - let local_head = provider_factory - .sealed_header(last_block_number)? - .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; - - let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(file_client.clone(), consensus.clone()) - .into_task(); - // TODO: The pipeline should correctly configure the downloader on its own. - // Find the possibility to remove unnecessary pre-configuration. - header_downloader.update_local_head(local_head); - header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); - - let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) - .build(file_client.clone(), consensus.clone(), provider_factory.clone()) - .into_task(); - // TODO: The pipeline should correctly configure the downloader on its own. - // Find the possibility to remove unnecessary pre-configuration. - body_downloader - .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) - .expect("failed to set download range"); - - let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); - - let max_block = file_client.max_block().unwrap_or(0); - - let mut pipeline = Pipeline::builder() - .with_tip_sender(tip_tx) - // we want to sync all blocks the file client provides or 0 if empty - .with_max_block(max_block) - .add_stages( - DefaultStages::new( - provider_factory.clone(), - HeaderSyncMode::Tip(tip_rx), - consensus.clone(), - header_downloader, - body_downloader, - factory.clone(), - config.stages.etl.clone(), - ) - .set(SenderRecoveryStage { - commit_threshold: config.stages.sender_recovery.commit_threshold, - }) - .set(ExecutionStage::new( - factory, - ExecutionStageThresholds { - max_blocks: config.stages.execution.max_blocks, - max_changes: config.stages.execution.max_changes, - max_cumulative_gas: config.stages.execution.max_cumulative_gas, - max_duration: config.stages.execution.max_duration, - }, - config - .stages - .merkle - .clean_threshold - .max(config.stages.account_hashing.clean_threshold) - .max(config.stages.storage_hashing.clean_threshold), - config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), - ExExManagerHandle::empty(), - )) - .disable_all_if(STATE_STAGES, || no_state), - ) - .build(provider_factory, static_file_producer); + if total_decoded_blocks != total_imported_blocks || + total_decoded_txns != total_imported_txns + { + error!(target: "reth::cli", + total_decoded_blocks, + total_imported_blocks, + total_decoded_txns, + total_imported_txns, + "Chain was partially imported" + ); + } - let events = pipeline.events().map(Into::into); + info!(target: "reth::cli", + total_imported_blocks, + total_imported_txns, + "Chain file imported" + ); - Ok((pipeline, events)) + Ok(()) } +} - /// Loads the reth config - fn load_config(&self, config_path: PathBuf) -> eyre::Result { - confy::load_path::(config_path.clone()) - .wrap_err_with(|| format!("Could not load config file {config_path:?}")) +/// Builds import pipeline. +/// +/// If configured to execute, all stages will run. Otherwise, only stages that don't require state +/// will run. +pub async fn build_import_pipeline( + config: &Config, + provider_factory: ProviderFactory, + consensus: &Arc, + file_client: Arc, + static_file_producer: StaticFileProducer, + should_exec: bool, +) -> eyre::Result<(Pipeline, impl Stream)> +where + DB: Database + Clone + Unpin + 'static, + C: Consensus + 'static, +{ + if !file_client.has_canonical_blocks() { + eyre::bail!("unable to import non canonical blocks"); } + + // Retrieve latest header found in the database. + let last_block_number = provider_factory.last_block_number()?; + let local_head = provider_factory + .sealed_header(last_block_number)? + .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; + + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) + .build(file_client.clone(), consensus.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + header_downloader.update_local_head(local_head); + header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); + + let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) + .build(file_client.clone(), consensus.clone(), provider_factory.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + body_downloader + .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) + .expect("failed to set download range"); + + let (tip_tx, tip_rx) = watch::channel(B256::ZERO); + let factory = + reth_revm::EvmProcessorFactory::new(provider_factory.chain_spec(), EthEvmConfig::default()); + + let max_block = file_client.max_block().unwrap_or(0); + + let mut pipeline = Pipeline::builder() + .with_tip_sender(tip_tx) + // we want to sync all blocks the file client provides or 0 if empty + .with_max_block(max_block) + .add_stages( + DefaultStages::new( + provider_factory.clone(), + HeaderSyncMode::Tip(tip_rx), + consensus.clone(), + header_downloader, + body_downloader, + factory.clone(), + config.stages.etl.clone(), + ) + .set(SenderRecoveryStage { + commit_threshold: config.stages.sender_recovery.commit_threshold, + }) + .set(ExecutionStage::new( + factory, + ExecutionStageThresholds { + max_blocks: config.stages.execution.max_blocks, + max_changes: config.stages.execution.max_changes, + max_cumulative_gas: config.stages.execution.max_cumulative_gas, + max_duration: config.stages.execution.max_duration, + }, + config + .stages + .merkle + .clean_threshold + .max(config.stages.account_hashing.clean_threshold) + .max(config.stages.storage_hashing.clean_threshold), + config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), + ExExManagerHandle::empty(), + )) + .disable_all_if(STATE_STAGES, || should_exec), + ) + .build(provider_factory, static_file_producer); + + let events = pipeline.events().map(Into::into); + + Ok((pipeline, events)) +} + +/// Loads the reth config +pub fn load_config(config_path: PathBuf) -> eyre::Result { + confy::load_path::(config_path.clone()) + .wrap_err_with(|| format!("Could not load config file {config_path:?}")) } #[cfg(test)] diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs new file mode 100644 index 0000000000000..02620f47f570d --- /dev/null +++ b/bin/reth/src/commands/import_op.rs @@ -0,0 +1,274 @@ +//! Command that initializes the node by importing a chain from a file. + +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + commands::import::{build_import_pipeline, load_config}, + dirs::{DataDirPath, MaybePlatformPath}, + version::SHORT_VERSION, +}; +use clap::Parser; +use reth_beacon_consensus::BeaconConsensus; +use reth_config::{config::EtlConfig, Config}; + +use reth_db::{init_db, tables, transaction::DbTx}; +use reth_downloaders::file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}; + +use reth_node_core::init::init_genesis; + +use reth_primitives::{hex, stage::StageId, ChainSpec, PruneModes, TxHash}; +use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory}; +use reth_static_file::StaticFileProducer; +use std::{path::PathBuf, sync::Arc}; + +use tracing::{debug, error, info}; + +/// Syncs RLP encoded blocks from a file. +#[derive(Debug, Parser)] +pub struct ImportOpCommand { + /// The path to the configuration file to use. + #[arg(long, value_name = "FILE", verbatim_doc_comment)] + config: Option, + + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = chain_help(), + default_value = SUPPORTED_CHAINS[0], + value_parser = genesis_value_parser + )] + chain: Arc, + + /// Chunk byte length. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + + #[command(flatten)] + db: DatabaseArgs, + + /// The path to a block file for import. + /// + /// The online stages (headers and bodies) are replaced by a file import, after which the + /// remaining stages are executed. + #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] + path: PathBuf, +} + +impl ImportOpCommand { + /// Execute `import` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); + + info!(target: "reth::cli", + "Disabled stages requiring state, since cannot execute OVM state changes" + ); + + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking chain import" + ); + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + + let mut config: Config = load_config(config_path.clone())?; + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); + + // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + if config.stages.etl.dir.is_none() { + config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + } + + let db_path = data_dir.db_path(); + + info!(target: "reth::cli", path = ?db_path, "Opening database"); + let db = Arc::new(init_db(db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); + let provider_factory = + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + + debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); + + init_genesis(provider_factory.clone())?; + + let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + info!(target: "reth::cli", "Consensus engine initialized"); + + // open file + let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + + let mut total_decoded_blocks = 0; + let mut total_decoded_txns = 0; + let mut total_filtered_out_dup_txns = 0; + + while let Some(mut file_client) = reader.next_chunk().await? { + // create a new FileClient from chunk read from file + info!(target: "reth::cli", + "Importing chain file chunk" + ); + + let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; + info!(target: "reth::cli", "Chain file chunk read"); + + total_decoded_blocks += file_client.headers_len(); + total_decoded_txns += file_client.bodies_len(); + + for (block_number, body) in file_client.bodies_iter_mut() { + body.transactions.retain(|tx| { + if is_duplicate(tx.hash, *block_number) { + total_filtered_out_dup_txns += 1; + return false + } + true + }) + } + + let (mut pipeline, events) = build_import_pipeline( + &config, + provider_factory.clone(), + &consensus, + Arc::new(file_client), + StaticFileProducer::new( + provider_factory.clone(), + provider_factory.static_file_provider(), + PruneModes::default(), + ), + false, + ) + .await?; + + // override the tip + pipeline.set_tip(tip); + debug!(target: "reth::cli", ?tip, "Tip manually set"); + + let provider = provider_factory.provider()?; + + let latest_block_number = + provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); + tokio::spawn(reth_node_events::node::handle_events( + None, + latest_block_number, + events, + db.clone(), + )); + + // Run pipeline + info!(target: "reth::cli", "Starting sync pipeline"); + tokio::select! { + res = pipeline.run() => res?, + _ = tokio::signal::ctrl_c() => {}, + } + } + + let provider = provider_factory.provider()?; + + let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_txns = provider.tx_ref().entries::()?; + + if total_decoded_blocks != total_imported_blocks || + total_decoded_txns != total_imported_txns + { + error!(target: "reth::cli", + total_decoded_blocks, + total_imported_blocks, + total_decoded_txns, + total_imported_txns, + "Chain was partially imported" + ); + } + + info!(target: "reth::cli", + total_imported_blocks, + total_imported_txns, + "Chain file imported" + ); + + Ok(()) + } +} + +/// A transaction that has been replayed in chain below Bedrock. +#[derive(Debug)] +pub struct ReplayedTx { + tx_hash: TxHash, + original_block: u64, +} + +impl ReplayedTx { + /// Returns a new instance. + pub const fn new(tx_hash: TxHash, original_block: u64) -> Self { + Self { tx_hash, original_block } + } +} + +/// Transaction 0x9ed8..9cb9, first seen in block 985. +pub const TX_BLOCK_985: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9")), + 985, +); + +/// Transaction 0x86f8..76e5, first seen in block 123 322. +pub const TX_BLOCK_123_322: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("c033250c5a45f9d104fc28640071a776d146d48403cf5e95ed0015c712e26cb6")), + 123_322, +); + +/// Transaction 0x86f8..76e5, first seen in block 1 133 328. +pub const TX_BLOCK_1_133_328: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("86f8c77cfa2b439e9b4e92a10f6c17b99fce1220edf4001e4158b57f41c576e5")), + 1_133_328, +); + +/// Transaction 0x3cc2..cd4e, first seen in block 1 244 152. +pub const TX_BLOCK_1_244_152: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("3cc27e7cc8b7a9380b2b2f6c224ea5ef06ade62a6af564a9dd0bcca92131cd4e")), + 1_244_152, +); + +/// List of original occurrences of all duplicate transactions below Bedrock. +pub const TX_DUP_ORIGINALS: [ReplayedTx; 4] = + [TX_BLOCK_985, TX_BLOCK_123_322, TX_BLOCK_1_133_328, TX_BLOCK_1_244_152]; + +/// Returns `true` if transaction is the second or third appearance of the transaction. +pub fn is_duplicate(tx_hash: TxHash, block_number: u64) -> bool { + for ReplayedTx { tx_hash: dup_tx_hash, original_block } in TX_DUP_ORIGINALS { + if tx_hash == dup_tx_hash && block_number != original_block { + return true + } + } + false +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_common_import_command_chain_args() { + for chain in SUPPORTED_CHAINS { + let args: ImportOpCommand = + ImportOpCommand::parse_from(["reth", "--chain", chain, "."]); + assert_eq!( + Ok(args.chain.chain), + chain.parse::(), + "failed to parse chain {chain}" + ); + } + } +} diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index 03d5a8287ed00..a005d5e8b9425 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -5,6 +5,7 @@ pub mod db; pub mod debug_cmd; pub mod dump_genesis; pub mod import; +pub mod import_op; pub mod init_cmd; pub mod init_state; diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 362ed3c402483..ee783a1a4f164 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -222,6 +222,23 @@ impl FileClient { pub fn bodies_len(&self) -> usize { self.bodies.len() } + + /// Returns an iterator over headers in the client. + pub fn headers_iter(&mut self) -> impl Iterator { + self.headers.values() + } + + /// Returns a mutable iterator over bodies in the client. + pub fn bodies_iter_mut(&mut self) -> impl Iterator { + let bodies = &mut self.bodies; + let headers = &self.headers; + headers.keys().zip(bodies.values_mut()) + } + + /// Returns the current number of transactions in the client. + pub fn total_transactions(&self) -> usize { + self.bodies.iter().flat_map(|(_, body)| &body.transactions).count() + } } impl HeadersClient for FileClient { diff --git a/crates/primitives/src/stage/id.rs b/crates/primitives/src/stage/id.rs index 2f5de34eec20a..d4926fea15b5b 100644 --- a/crates/primitives/src/stage/id.rs +++ b/crates/primitives/src/stage/id.rs @@ -79,6 +79,11 @@ impl StageId { matches!(self, StageId::Headers | StageId::Bodies) } + /// Returns `true` if it's [TransactionLookup](StageId::TransactionLookup) stage. + pub fn is_tx_lookup(&self) -> bool { + matches!(self, StageId::TransactionLookup) + } + /// Returns true indicating if it's the finish stage [StageId::Finish] pub fn is_finish(&self) -> bool { matches!(self, StageId::Finish) diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 101c52258a8e8..342183905ba18 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -153,18 +153,19 @@ impl Stage for TransactionLookupStage { ); } + let key = RawKey::::from_vec(hash); if append_only { - txhash_cursor.append( - RawKey::::from_vec(hash), - RawValue::::from_vec(number), - )?; + txhash_cursor.append(key, RawValue::::from_vec(number))? } else { - txhash_cursor.insert( - RawKey::::from_vec(hash), - RawValue::::from_vec(number), - )?; + txhash_cursor.insert(key, RawValue::::from_vec(number))? } } + + trace!(target: "sync::stages::transaction_lookup", + total_hashes, + "Transaction hashes inserted" + ); + break } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index c96a059379f47..2cae000ce8720 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -354,6 +354,11 @@ impl DatabaseProvider { |_| true, ) } + + /// Returns a reference to the [`ChainSpec`]. + pub fn chain_spec(&self) -> &ChainSpec { + &self.chain_spec + } } impl DatabaseProvider { From 99924e4244cc829b00866d73f398f720daee78b5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 1 May 2024 15:59:46 +0200 Subject: [PATCH 424/700] perf(build): make maxperf-op (#7967) --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index fd5a252f17ca0..5ac3bb4682759 100644 --- a/Makefile +++ b/Makefile @@ -263,6 +263,10 @@ update-book-cli: ## Update book cli documentation. maxperf: ## Builds `reth` with the most aggressive optimisations. RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak +.PHONY: maxperf-op +maxperf-op: ## Builds `op-reth` with the most aggressive optimisations. + RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak,optimism --bin op-reth + .PHONY: maxperf-no-asm maxperf-no-asm: ## Builds `reth` with the most aggressive optimisations, minus the "asm-keccak" feature. RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc From c1f5b45bbd07ef21f97f23be65b84f18bf2e0647 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 1 May 2024 16:46:50 +0200 Subject: [PATCH 425/700] fix(grafana): tx pool dashboard units (#8020) --- etc/grafana/dashboards/reth-mempool.json | 46 +++++++++--------------- 1 file changed, 17 insertions(+), 29 deletions(-) diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index 07212ac3bdbfa..3ba499a9a4fb0 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -573,7 +573,7 @@ } ] }, - "unit": "decbytes", + "unit": "bytes", "unitScale": true }, "overrides": [] @@ -726,7 +726,7 @@ } ] }, - "unit": "decbytes", + "unit": "bytes", "unitScale": true }, "overrides": [] @@ -858,7 +858,7 @@ } ] }, - "unit": "decbytes", + "unit": "bytes", "unitScale": true }, "overrides": [] @@ -1638,8 +1638,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1773,8 +1772,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1904,8 +1902,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2024,8 +2021,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2144,8 +2140,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2367,8 +2362,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2487,8 +2481,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2624,8 +2617,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2793,8 +2785,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2889,8 +2880,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3032,8 +3022,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3340,8 +3329,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3425,8 +3413,8 @@ }, "timepicker": {}, "timezone": "", - "title": "reth - mempool", + "title": "Reth - Transaction Pool", "uid": "bee34f59-c79c-4669-a000-198057b3703d", - "version": 1, + "version": 3, "weekStart": "" } \ No newline at end of file From f157ec83b6ead4dc7bebf41a080193dca2ccd8fd Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 1 May 2024 16:59:42 +0200 Subject: [PATCH 426/700] chore: remove redundant suffix in ChainPath methods (#8025) --- bin/reth/src/commands/db/mod.rs | 4 +- bin/reth/src/commands/db/static_files/mod.rs | 5 +-- bin/reth/src/commands/db/stats.rs | 4 +- .../src/commands/debug_cmd/build_block.rs | 6 +-- bin/reth/src/commands/debug_cmd/execution.rs | 12 +++--- .../commands/debug_cmd/in_memory_merkle.rs | 10 ++--- bin/reth/src/commands/debug_cmd/merkle.rs | 10 ++--- .../src/commands/debug_cmd/replay_engine.rs | 10 ++--- bin/reth/src/commands/import.rs | 8 ++-- bin/reth/src/commands/import_op.rs | 8 ++-- bin/reth/src/commands/init_cmd.rs | 4 +- bin/reth/src/commands/init_state.rs | 4 +- bin/reth/src/commands/node/mod.rs | 10 ++--- bin/reth/src/commands/p2p/mod.rs | 6 +-- .../src/commands/recover/storage_tries.rs | 4 +- bin/reth/src/commands/stage/drop.rs | 4 +- bin/reth/src/commands/stage/dump/execution.rs | 8 +--- .../commands/stage/dump/hashing_account.rs | 8 +--- .../commands/stage/dump/hashing_storage.rs | 8 +--- bin/reth/src/commands/stage/dump/merkle.rs | 8 +--- bin/reth/src/commands/stage/dump/mod.rs | 4 +- bin/reth/src/commands/stage/run.rs | 21 ++++------ bin/reth/src/commands/stage/unwind.rs | 10 ++--- crates/node-core/src/dirs.rs | 40 ++++++++++--------- crates/node-core/src/node_config.rs | 4 +- crates/node-ethereum/src/node.rs | 4 +- crates/node/builder/src/builder/mod.rs | 2 +- crates/node/builder/src/launch/common.rs | 8 ++-- crates/optimism/node/src/node.rs | 4 +- examples/custom-node-components/src/main.rs | 2 +- 30 files changed, 110 insertions(+), 130 deletions(-) diff --git a/bin/reth/src/commands/db/mod.rs b/bin/reth/src/commands/db/mod.rs index f28f8375f0c02..aeaf1d7e883cf 100644 --- a/bin/reth/src/commands/db/mod.rs +++ b/bin/reth/src/commands/db/mod.rs @@ -108,9 +108,9 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); let db_args = self.db.database_args(); - let static_files_path = data_dir.static_files_path(); + let static_files_path = data_dir.static_files(); match self.command { // TODO: We'll need to add this on the DB trait. diff --git a/bin/reth/src/commands/db/static_files/mod.rs b/bin/reth/src/commands/db/static_files/mod.rs index 9391db76c6fc3..8f5930e108355 100644 --- a/bin/reth/src/commands/db/static_files/mod.rs +++ b/bin/reth/src/commands/db/static_files/mod.rs @@ -96,11 +96,10 @@ impl Command { }); let db = open_db_read_only( - data_dir.db_path().as_path(), + data_dir.db().as_path(), db_args.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)), )?; - let provider_factory = - Arc::new(ProviderFactory::new(db, chain, data_dir.static_files_path())?); + let provider_factory = Arc::new(ProviderFactory::new(db, chain, data_dir.static_files())?); { if !self.only_bench { diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index 5ffc136dd16ee..474603c746468 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -174,8 +174,8 @@ impl Command { ]); } - let static_files = iter_static_files(data_dir.static_files_path())?; - let static_file_provider = StaticFileProvider::new(data_dir.static_files_path())?; + let static_files = iter_static_files(data_dir.static_files())?; + let static_file_provider = StaticFileProvider::new(data_dir.static_files())?; let mut total_data_size = 0; let mut total_index_size = 0; diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index aee51ee793da6..9d5942ae152de 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -114,7 +114,7 @@ impl Command { let factory = ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?; let provider = factory.provider()?; @@ -148,7 +148,7 @@ impl Command { pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // initialize the database @@ -156,7 +156,7 @@ impl Command { let provider_factory = ProviderFactory::new( Arc::clone(&db), Arc::clone(&self.chain), - data_dir.static_files_path(), + data_dir.static_files(), )?; let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index df6b4d1119570..a83ea19fde267 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -173,7 +173,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -206,17 +206,17 @@ impl Command { let mut config = Config::default(); let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { - config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); } fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(provider_factory.clone())?; @@ -225,14 +225,14 @@ impl Command { // Configure and build network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index f13b503f1f768..e68231a768782 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -94,7 +94,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -109,12 +109,12 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // initialize the database let db = Arc::new(init_db(db_path, self.db.database_args())?); - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; + let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?; let provider = factory.provider()?; // Look up merkle checkpoint @@ -126,14 +126,14 @@ impl Command { // Configure and build network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index d806306d4160a..07075ff26768a 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -104,7 +104,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -119,24 +119,24 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // initialize the database let db = Arc::new(init_db(db_path, self.db.database_args())?); - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; + let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?; let provider_rw = factory.provider_rw()?; // Configure and build network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index f59af621868d9..1360c2f1ba11b 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -101,7 +101,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -116,13 +116,13 @@ impl Command { // Add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // Initialize the database let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); @@ -146,14 +146,14 @@ impl Command { // Set up network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index e1851f51e0130..3c191d8bbe79d 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -118,23 +118,23 @@ impl ImportCommand { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = load_config(config_path.clone())?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { - config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); } - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 02620f47f570d..1c5a74015d5a0 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -85,23 +85,23 @@ impl ImportOpCommand { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = load_config(config_path.clone())?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { - config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); } - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); diff --git a/bin/reth/src/commands/init_cmd.rs b/bin/reth/src/commands/init_cmd.rs index 7a2988ebd5efb..bdd8acb52d1f2 100644 --- a/bin/reth/src/commands/init_cmd.rs +++ b/bin/reth/src/commands/init_cmd.rs @@ -51,12 +51,12 @@ impl InitCommand { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(&db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); - let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?; + let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?; info!(target: "reth::cli", "Writing genesis block"); diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs index c05f064b31c5e..fa70264e55afb 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/bin/reth/src/commands/init_state.rs @@ -72,12 +72,12 @@ impl InitStateCommand { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(&db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); - let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?; + let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?; info!(target: "reth::cli", "Writing genesis block"); diff --git a/bin/reth/src/commands/node/mod.rs b/bin/reth/src/commands/node/mod.rs index 5f95c534d2a7d..9f2a4d67af6a7 100644 --- a/bin/reth/src/commands/node/mod.rs +++ b/bin/reth/src/commands/node/mod.rs @@ -180,7 +180,7 @@ impl NodeCommand { let _ = node_config.install_prometheus_recorder()?; let data_dir = datadir.unwrap_or_chain_default(node_config.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); tracing::info!(target: "reth::cli", path = ?db_path, "Opening database"); let database = Arc::new(init_db(db_path.clone(), self.db.database_args())?.with_metrics()); @@ -280,14 +280,14 @@ mod tests { NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let config_path = cmd.config.unwrap_or_else(|| data_dir.config_path()); + let config_path = cmd.config.unwrap_or_else(|| data_dir.config()); assert_eq!(config_path, Path::new("my/path/to/reth.toml")); let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config()); let end = format!("reth/{}/reth.toml", SUPPORTED_CHAINS[0]); assert!(config_path.ends_with(end), "{:?}", cmd.config); } @@ -296,14 +296,14 @@ mod tests { fn parse_db_path() { let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]); assert!(db_path.ends_with(end), "{:?}", cmd.config); let cmd = NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); assert_eq!(db_path, Path::new("my/custom/path/db")); } diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index b67881e64ec65..35d111e57d2fe 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -105,7 +105,7 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = confy::load_path(&config_path).unwrap_or_default(); @@ -119,7 +119,7 @@ impl Command { config.peers.trusted_nodes_only = self.trusted_only; - let default_secret_key_path = data_dir.p2p_secret_path(); + let default_secret_key_path = data_dir.p2p_secret(); let secret_key_path = self.p2p_secret_key.clone().unwrap_or(default_secret_key_path); let p2p_secret_key = get_secret_key(&secret_key_path)?; @@ -133,7 +133,7 @@ impl Command { let mut network_config = network_config_builder.build(Arc::new(ProviderFactory::new( noop_db, self.chain.clone(), - data_dir.static_files_path(), + data_dir.static_files(), )?)); if self.discovery.enable_discv5_discovery { diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/bin/reth/src/commands/recover/storage_tries.rs index 7a1c2ccc2747c..025a170a035c1 100644 --- a/bin/reth/src/commands/recover/storage_tries.rs +++ b/bin/reth/src/commands/recover/storage_tries.rs @@ -50,11 +50,11 @@ impl Command { /// Execute `storage-tries` recovery command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path, self.db.database_args())?); - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; + let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(factory.clone())?; diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index 5c14406027e29..625a3f36b8078 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -54,12 +54,12 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; let db = open_db(db_path.as_ref(), self.db.database_args())?; let provider_factory = - ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?; let static_file_provider = provider_factory.static_file_provider(); let tool = DbTool::new(provider_factory, self.chain.clone())?; diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/bin/reth/src/commands/stage/dump/execution.rs index 7d2d8f0bab5f3..571ce486a678c 100644 --- a/bin/reth/src/commands/stage/dump/execution.rs +++ b/bin/reth/src/commands/stage/dump/execution.rs @@ -20,7 +20,7 @@ pub(crate) async fn dump_execution_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; import_tables_with_range(&output_db, db_tool, from, to)?; @@ -28,11 +28,7 @@ pub(crate) async fn dump_execution_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) diff --git a/bin/reth/src/commands/stage/dump/hashing_account.rs b/bin/reth/src/commands/stage/dump/hashing_account.rs index 35bbfa4d74c94..2f28ba129a107 100644 --- a/bin/reth/src/commands/stage/dump/hashing_account.rs +++ b/bin/reth/src/commands/stage/dump/hashing_account.rs @@ -15,7 +15,7 @@ pub(crate) async fn dump_hashing_account_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; // Import relevant AccountChangeSets output_db.update(|tx| { @@ -30,11 +30,7 @@ pub(crate) async fn dump_hashing_account_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) diff --git a/bin/reth/src/commands/stage/dump/hashing_storage.rs b/bin/reth/src/commands/stage/dump/hashing_storage.rs index a7e3878806822..7d38892dc8bd7 100644 --- a/bin/reth/src/commands/stage/dump/hashing_storage.rs +++ b/bin/reth/src/commands/stage/dump/hashing_storage.rs @@ -15,17 +15,13 @@ pub(crate) async fn dump_hashing_storage_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; unwind_and_copy(db_tool, from, tip_block_number, &output_db)?; if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/bin/reth/src/commands/stage/dump/merkle.rs index 2dfd0172b8c74..55a8ec76d1e4d 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/bin/reth/src/commands/stage/dump/merkle.rs @@ -24,7 +24,7 @@ pub(crate) async fn dump_merkle_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; output_db.update(|tx| { tx.import_table_with_range::( @@ -46,11 +46,7 @@ pub(crate) async fn dump_merkle_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) diff --git a/bin/reth/src/commands/stage/dump/mod.rs b/bin/reth/src/commands/stage/dump/mod.rs index 4e1cace6e5a2f..fa41843565589 100644 --- a/bin/reth/src/commands/stage/dump/mod.rs +++ b/bin/reth/src/commands/stage/dump/mod.rs @@ -102,11 +102,11 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = - ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?; info!(target: "reth::cli", "Database opened"); diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 66fb25b477e2c..d798c87d1f143 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -130,23 +130,20 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let config: Config = confy::load_path(config_path).unwrap_or_default(); info!(target: "reth::cli", "reth {} starting stage {:?}", SHORT_VERSION, self.stage); // use the overridden db path if specified - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); - let factory = ProviderFactory::new( - Arc::clone(&db), - self.chain.clone(), - data_dir.static_files_path(), - )?; + let factory = + ProviderFactory::new(Arc::clone(&db), self.chain.clone(), data_dir.static_files())?; let mut provider_rw = factory.provider_rw()?; if let Some(listen_addr) = self.metrics { @@ -165,9 +162,7 @@ impl Command { let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1); let etl_config = EtlConfig::new( - Some( - self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(&data_dir.data_dir_path())), - ), + Some(self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(data_dir.data_dir()))), self.etl_file_size.unwrap_or(EtlConfig::default_file_size()), ); @@ -188,15 +183,15 @@ impl Command { .network .p2p_secret_key .clone() - .unwrap_or_else(|| data_dir.p2p_secret_path()); + .unwrap_or_else(|| data_dir.p2p_secret()); let p2p_secret_key = get_secret_key(&network_secret_path)?; - let default_peers_path = data_dir.known_peers_path(); + let default_peers_path = data_dir.known_peers(); let provider_factory = Arc::new(ProviderFactory::new( db.clone(), self.chain.clone(), - data_dir.static_files_path(), + data_dir.static_files(), )?); let network = self diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 9ffaad97952ad..0c4260c0c6d3f 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -83,16 +83,16 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); if !db_path.exists() { eyre::bail!("Database {db_path:?} does not exist.") } - let config_path = data_dir.config_path(); + let config_path = data_dir.config(); let config: Config = confy::load_path(config_path).unwrap_or_default(); let db = Arc::new(open_db(db_path.as_ref(), self.db.database_args())?); let provider_factory = - ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?; let range = self.command.unwind_range(provider_factory.clone())?; if *range.start() == 0 { @@ -148,9 +148,9 @@ impl Command { // Even though we are not planning to download anything, we need to initialize Body and // Header stage with a network client let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let p2p_secret_key = get_secret_key(&network_secret_path)?; - let default_peers_path = data_dir.known_peers_path(); + let default_peers_path = data_dir.known_peers(); let network = self .network .network_config( diff --git a/crates/node-core/src/dirs.rs b/crates/node-core/src/dirs.rs index 223e65bb2b623..75919f6f0fcae 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node-core/src/dirs.rs @@ -271,63 +271,65 @@ impl ChainPath { /// Returns the path to the reth data directory for this chain. /// /// `/` - pub fn data_dir_path(&self) -> PathBuf { - self.0.as_ref().into() + pub fn data_dir(&self) -> &Path { + self.0.as_ref() } /// Returns the path to the db directory for this chain. /// /// `//db` - pub fn db_path(&self) -> PathBuf { - self.0.join("db").into() + pub fn db(&self) -> PathBuf { + self.data_dir().join("db") } /// Returns the path to the static_files directory for this chain. - pub fn static_files_path(&self) -> PathBuf { - self.0.join("static_files").into() + /// + /// `//static_files` + pub fn static_files(&self) -> PathBuf { + self.data_dir().join("static_files") } /// Returns the path to the reth p2p secret key for this chain. /// /// `//discovery-secret` - pub fn p2p_secret_path(&self) -> PathBuf { - self.0.join("discovery-secret").into() + pub fn p2p_secret(&self) -> PathBuf { + self.data_dir().join("discovery-secret") } /// Returns the path to the known peers file for this chain. /// /// `//known-peers.json` - pub fn known_peers_path(&self) -> PathBuf { - self.0.join("known-peers.json").into() + pub fn known_peers(&self) -> PathBuf { + self.data_dir().join("known-peers.json") } /// Returns the path to the blobstore directory for this chain where blobs of unfinalized /// transactions are stored. /// /// `//blobstore` - pub fn blobstore_path(&self) -> PathBuf { - self.0.join("blobstore").into() + pub fn blobstore(&self) -> PathBuf { + self.data_dir().join("blobstore") } /// Returns the path to the local transactions backup file /// /// `//txpool-transactions-backup.rlp` - pub fn txpool_transactions_path(&self) -> PathBuf { - self.0.join("txpool-transactions-backup.rlp").into() + pub fn txpool_transactions(&self) -> PathBuf { + self.data_dir().join("txpool-transactions-backup.rlp") } /// Returns the path to the config file for this chain. /// /// `//reth.toml` - pub fn config_path(&self) -> PathBuf { - self.0.join("reth.toml").into() + pub fn config(&self) -> PathBuf { + self.data_dir().join("reth.toml") } /// Returns the path to the jwtsecret file for this chain. /// /// `//jwt.hex` - pub fn jwt_path(&self) -> PathBuf { - self.0.join("jwt.hex").into() + pub fn jwt(&self) -> PathBuf { + self.data_dir().join("jwt.hex") } } @@ -359,7 +361,7 @@ mod tests { let path = path.unwrap_or_chain_default(Chain::mainnet()); assert!(path.as_ref().ends_with("reth/mainnet"), "{path:?}"); - let db_path = path.db_path(); + let db_path = path.db(); assert!(db_path.ends_with("reth/mainnet/db"), "{db_path:?}"); let path = MaybePlatformPath::::from_str("my/path/to/datadir").unwrap(); diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index c25395e07246e..411a8b447fb6f 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -234,7 +234,7 @@ impl NodeConfig { /// Get the network secret from the given data dir pub fn network_secret(&self, data_dir: &ChainPath) -> eyre::Result { let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); debug!(target: "reth::cli", ?network_secret_path, "Loading p2p key file"); let secret_key = get_secret_key(&network_secret_path)?; Ok(secret_key) @@ -299,7 +299,7 @@ impl NodeConfig { ) -> eyre::Result> { info!(target: "reth::cli", "Connecting to P2P network"); let secret_key = self.network_secret(data_dir)?; - let default_peers_path = data_dir.known_peers_path(); + let default_peers_path = data_dir.known_peers(); Ok(self.load_network_config(config, client, executor, head, secret_key, default_peers_path)) } diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 4f52027b4509c..9de0cbe6ee5ef 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -100,7 +100,7 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let data_dir = ctx.data_dir(); - let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?; + let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec()) .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) @@ -114,7 +114,7 @@ where let transaction_pool = reth_transaction_pool::Pool::eth_pool(validator, blob_store, ctx.pool_config()); info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); + let transactions_path = data_dir.txpool_transactions(); // spawn txpool maintenance task { diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 815b138587be9..8c899df155fc6 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -533,7 +533,7 @@ impl BuilderContext { self.executor.spawn_critical("p2p txpool", txpool); self.executor.spawn_critical("p2p eth request handler", eth); - let default_peers_path = self.data_dir().known_peers_path(); + let default_peers_path = self.data_dir().known_peers(); let known_peers_file = self.config.network.persistent_peers_file(default_peers_path); self.executor.spawn_critical_with_graceful_shutdown_signal( "p2p network task", diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 28453a047c3ab..63060f64759ff 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -61,7 +61,7 @@ impl LaunchContext { /// Loads the reth config with the configured `data_dir` and overrides settings according to the /// `config`. pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { - let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path()); + let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config()); let mut toml_config = confy::load_path::(&config_path) .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; @@ -192,7 +192,7 @@ impl LaunchContextWith> { pub fn ensure_etl_datadir(mut self) -> Self { if self.toml_config_mut().stages.etl.dir.is_none() { self.toml_config_mut().stages.etl.dir = - Some(EtlConfig::from_datadir(&self.data_dir().data_dir_path())) + Some(EtlConfig::from_datadir(self.data_dir().data_dir())) } self @@ -273,7 +273,7 @@ impl LaunchContextWith> { /// Loads the JWT secret for the engine API pub fn auth_jwt_secret(&self) -> eyre::Result { - let default_jwt_path = self.data_dir().jwt_path(); + let default_jwt_path = self.data_dir().jwt(); let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?; Ok(secret) } @@ -299,7 +299,7 @@ where let factory = ProviderFactory::new( self.right().clone(), self.chain_spec(), - self.data_dir().static_files_path(), + self.data_dir().static_files(), )? .with_static_files_metrics(); diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index a7b195f482779..c95f3dd9587c2 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -119,7 +119,7 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let data_dir = ctx.data_dir(); - let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?; + let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec()) .with_head_timestamp(ctx.head().timestamp) @@ -139,7 +139,7 @@ where ctx.pool_config(), ); info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); + let transactions_path = data_dir.txpool_transactions(); // spawn txpool maintenance task { diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index a6db90674d142..ac98de7afbb32 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -64,7 +64,7 @@ where let transaction_pool = reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.pool_config); info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); + let transactions_path = data_dir.txpool_transactions(); // spawn txpool maintenance task { From 399afd802c2ed17dcbc8fb7b0219185ca73bde3f Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 1 May 2024 17:22:49 +0200 Subject: [PATCH 427/700] feat(node_builder): allow borrowing self in `ConfigureEvm::evm` (#8024) --- crates/consensus/auto-seal/src/lib.rs | 2 +- crates/consensus/auto-seal/src/task.rs | 2 +- crates/ethereum/evm/src/execute.rs | 2 +- crates/evm/src/lib.rs | 11 ++-- crates/node/builder/src/builder/mod.rs | 5 ++ crates/node/builder/src/components/execute.rs | 2 +- crates/optimism/evm/src/execute.rs | 2 +- crates/revm/src/factory.rs | 7 +-- crates/revm/src/optimism/processor.rs | 3 +- crates/revm/src/processor.rs | 58 ++++++++----------- examples/exex/rollup/src/main.rs | 3 +- 11 files changed, 46 insertions(+), 51 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index c09dcbcc816e1..402a6c9834133 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -426,7 +426,7 @@ impl StorageInner { withdrawals: Option, client: &impl StateProviderFactory, chain_spec: Arc, - evm_config: EvmConfig, + evm_config: &EvmConfig, ) -> Result<(SealedHeader, BundleStateWithReceipts), BlockExecutionError> where EvmConfig: ConfigureEvm, diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 6009cd810a06d..7e2a700ef4b44 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -145,7 +145,7 @@ where withdrawals.clone(), &client, chain_spec, - evm_config, + &evm_config, ) { Ok((new_header, bundle_state)) => { // clear all transactions from pool diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 64b69d1e577c4..4239fe44924a2 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -137,7 +137,7 @@ where /// /// It does __not__ apply post-execution changes. fn execute_pre_and_transactions( - &mut self, + &self, block: &BlockWithSenders, mut evm: Evm<'_, Ext, &mut State>, ) -> Result<(Vec, u64), BlockExecutionError> diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 154aac2d7bb87..d8e50b759ed2a 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -24,14 +24,17 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is up to /// the caller to call an appropriate method to fill the transaction and block environment /// before executing any transactions using the provided EVM. - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB>; + fn evm<'a, DB: Database + 'a>( + &'a self, + db: DB, + ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB>; /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// /// This will preserve any handler modifications fn evm_with_env<'a, DB: Database + 'a>( - &self, + &'a self, db: DB, env: EnvWithHandlerCfg, ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { @@ -48,7 +51,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// /// This will preserve any handler modifications fn evm_with_env_and_inspector<'a, DB, I>( - &self, + &'a self, db: DB, env: EnvWithHandlerCfg, inspector: I, @@ -68,7 +71,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Caution: This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is /// up to the caller to call an appropriate method to fill the transaction and block /// environment before executing any transactions using the provided EVM. - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + fn evm_with_inspector<'a, DB, I>(&'a self, db: DB, inspector: I) -> Evm<'a, I, DB> where DB: Database + 'a, I: GetInspector, diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 8c899df155fc6..b6f0a191e3e53 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -235,6 +235,11 @@ impl WithLaunchContext> where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { + /// Returns a reference to the node builder's config. + pub fn config(&self) -> &NodeConfig { + self.builder.config() + } + /// Configures the types of the node. pub fn with_types(self) -> WithLaunchContext>> where diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 417423d5454d1..01684e9c2bccb 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -9,7 +9,7 @@ pub trait ExecutorBuilder: Send { type EVM: ConfigureEvm; // TODO(mattsse): integrate `Executor` - /// Creates the transaction pool. + /// Creates the EVM config. fn build_evm( self, ctx: &BuilderContext, diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index a77f422055f57..0a5e057806f49 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -137,7 +137,7 @@ where /// /// It does __not__ apply post-execution changes. fn execute_pre_and_transactions( - &mut self, + &self, block: &BlockWithSenders, mut evm: Evm<'_, Ext, &mut State>, ) -> Result<(Vec, u64), BlockExecutionError> diff --git a/crates/revm/src/factory.rs b/crates/revm/src/factory.rs index 61e43cc1809cc..fdaae52c0e787 100644 --- a/crates/revm/src/factory.rs +++ b/crates/revm/src/factory.rs @@ -46,11 +46,8 @@ where sp: SP, ) -> Box + 'a> { let database_state = StateProviderDatabase::new(sp); - let mut evm = EVMProcessor::new_with_db( - self.chain_spec.clone(), - database_state, - self.evm_config.clone(), - ); + let mut evm = + EVMProcessor::new_with_db(self.chain_spec.clone(), database_state, &self.evm_config); if let Some(stack) = &self.stack { evm.set_stack(stack.clone()); } diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs index 01ec2efde179a..9fe51d059cfa3 100644 --- a/crates/revm/src/optimism/processor.rs +++ b/crates/revm/src/optimism/processor.rs @@ -242,10 +242,11 @@ mod tests { chain_spec: Arc, db: StateProviderTest, ) -> EVMProcessor<'a, TestEvmConfig> { + static CONFIG: std::sync::OnceLock = std::sync::OnceLock::new(); let mut executor = EVMProcessor::new_with_db( chain_spec, StateProviderDatabase::new(db), - TestEvmConfig::default(), + CONFIG.get_or_init(TestEvmConfig::default), ); executor.evm.context.evm.db.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); executor diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index c22272abc11bb..487cec52805d2 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -7,7 +7,7 @@ use revm::{ primitives::{CfgEnvWithHandlerCfg, ResultAndState}, Evm, State, }; -use std::{sync::Arc, time::Instant}; +use std::{marker::PhantomData, sync::Arc, time::Instant}; #[cfg(not(feature = "optimism"))] use tracing::{debug, trace}; @@ -59,7 +59,7 @@ pub struct EVMProcessor<'a, EvmConfig> { /// Execution stats pub(crate) stats: BlockExecutorStats, /// The type that is able to configure the EVM environment. - _evm_config: EvmConfig, + _phantom: PhantomData, } impl<'a, EvmConfig> EVMProcessor<'a, EvmConfig> @@ -75,7 +75,7 @@ where pub fn new_with_db( chain_spec: Arc, db: StateProviderDatabase, - evm_config: EvmConfig, + evm_config: &'a EvmConfig, ) -> Self { let state = State::builder() .with_database_boxed(Box::new(db)) @@ -89,7 +89,7 @@ where pub fn new_with_state( chain_spec: Arc, revm_state: StateDBBox<'a, ProviderError>, - evm_config: EvmConfig, + evm_config: &'a EvmConfig, ) -> Self { let stack = InspectorStack::new(InspectorStackConfig::default()); let evm = evm_config.evm_with_inspector(revm_state, stack); @@ -98,7 +98,7 @@ where evm, batch_record: BlockBatchRecord::default(), stats: BlockExecutorStats::default(), - _evm_config: evm_config, + _phantom: PhantomData, } } @@ -507,11 +507,9 @@ mod tests { ); // execute invalid header (no parent beacon block root) - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); // attempt to execute a block without parent beacon block root, expect err let err = executor @@ -599,11 +597,9 @@ mod tests { .build(), ); - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); executor.init_env(&header, U256::ZERO); // get the env @@ -648,11 +644,9 @@ mod tests { .build(), ); - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); // construct the header for block one let header = Header { @@ -702,11 +696,9 @@ mod tests { let mut header = chain_spec.genesis_header(); - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); executor.init_env(&header, U256::ZERO); // attempt to execute the genesis block with non-zero parent beacon block root, expect err @@ -781,11 +773,9 @@ mod tests { ); // execute header - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); executor.init_env(&header, U256::ZERO); // ensure that the env is configured with a base fee @@ -843,11 +833,9 @@ mod tests { let chain_id = chain_spec.chain.id(); // execute header - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); // Create a test transaction that gonna fail let transaction = TransactionSigned::from_transaction_and_signature( diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs index cd2b0c94d3445..db33aaf726a24 100644 --- a/examples/exex/rollup/src/main.rs +++ b/examples/exex/rollup/src/main.rs @@ -298,7 +298,8 @@ fn execute_block( ) .with_bundle_update() .build(); - let mut evm = EthEvmConfig::default().evm(state); + let evm_config = EthEvmConfig::default(); + let mut evm = evm_config.evm(state); // Set state clear flag. evm.db_mut().set_state_clear_flag( From 0938504f4a265eec74e0b833ab5c41d66649bdee Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 1 May 2024 19:32:25 +0200 Subject: [PATCH 428/700] chore: reduce number of Evm monomorphizations (#8030) --- crates/payload/builder/src/database.rs | 12 ++- crates/payload/ethereum/src/lib.rs | 81 ++++++++++++++------- crates/payload/optimism/src/builder.rs | 67 +++++++++++------ crates/rpc/rpc/src/debug.rs | 36 +++------ crates/rpc/rpc/src/eth/api/call.rs | 4 +- crates/rpc/rpc/src/eth/api/pending_block.rs | 5 +- crates/rpc/rpc/src/eth/api/transactions.rs | 31 +++----- crates/rpc/rpc/src/eth/revm_utils.rs | 7 +- crates/rpc/rpc/src/trace.rs | 2 +- 9 files changed, 139 insertions(+), 106 deletions(-) diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index 5b5239fddcb1a..ac36de98c3ab4 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -61,10 +61,13 @@ impl CachedReads { } } +/// A [Database] that caches reads inside [CachedReads]. #[derive(Debug)] -struct CachedReadsDbMut<'a, DB> { - cached: &'a mut CachedReads, - db: DB, +pub struct CachedReadsDbMut<'a, DB> { + /// The cache of reads. + pub cached: &'a mut CachedReads, + /// The underlying database. + pub db: DB, } impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { @@ -126,7 +129,8 @@ impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { /// `revm::db::State` for repeated payload build jobs. #[derive(Debug)] pub struct CachedReadsDBRef<'a, DB> { - inner: RefCell>, + /// The inner cache reads db mut. + pub inner: RefCell>, } impl<'a, DB: DatabaseRef> DatabaseRef for CachedReadsDBRef<'a, DB> { diff --git a/crates/payload/ethereum/src/lib.rs b/crates/payload/ethereum/src/lib.rs index f1c0a215b87f1..e34287f76510a 100644 --- a/crates/payload/ethereum/src/lib.rs +++ b/crates/payload/ethereum/src/lib.rs @@ -73,36 +73,54 @@ where debug!(target: "payload_builder", parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building empty payload"); let state = client.state_by_block_hash(parent_block.hash()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to get state for empty payload"); - err - })?; + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to get state for empty payload" + ); + err + })?; let mut db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new(&state))) + .with_database(StateProviderDatabase::new(state)) .with_bundle_update() .build(); let base_fee = initialized_block_env.basefee.to::(); let block_number = initialized_block_env.number.to::(); - let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); + let block_gas_limit = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( - &mut db, - &chain_spec, - block_number, - &initialized_cfg, - &initialized_block_env, - &attributes, - ).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to apply beacon root contract call for empty payload"); - err - })?; - - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals.clone()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to commit withdrawals for empty payload"); - err - })?; + &mut db, + &chain_spec, + block_number, + &initialized_cfg, + &initialized_block_env, + &attributes, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + err + })?; + + let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( + &mut db, + &chain_spec, + attributes.timestamp, + attributes.withdrawals.clone(), + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to commit withdrawals for empty payload" + ); + err + })?; // merge all transitions into bundle state, this would apply the withdrawal balance // changes and 4788 contract call @@ -110,10 +128,14 @@ where // calculate the state root let bundle_state = db.take_bundle(); - let state_root = state.state_root(&bundle_state).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to calculate state root for empty payload"); - err - })?; + let state_root = db.database.state_root(&bundle_state).map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to calculate state root for empty payload" + ); + err + })?; let mut excess_blob_gas = None; let mut blob_gas_used = None; @@ -178,9 +200,9 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let state_provider = client.state_by_block_hash(config.parent_block.hash())?; - let state = StateProviderDatabase::new(&state_provider); + let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(&state)).with_bundle_update().build(); + State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); let extra_data = config.extra_data(); let PayloadConfig { initialized_block_env, @@ -349,7 +371,10 @@ where let logs_bloom = bundle.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let state_root = state_provider.state_root(bundle.state())?; + let state_root = { + let state_provider = db.database.0.inner.borrow_mut(); + state_provider.db.state_root(bundle.state())? + }; // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); diff --git a/crates/payload/optimism/src/builder.rs b/crates/payload/optimism/src/builder.rs index 7d8efa6899e2b..8e8bfb8f0f732 100644 --- a/crates/payload/optimism/src/builder.rs +++ b/crates/payload/optimism/src/builder.rs @@ -123,7 +123,7 @@ where err })?; let mut db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new(&state))) + .with_database(StateProviderDatabase::new(state)) .with_bundle_update() .build(); @@ -133,22 +133,36 @@ where // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( - &mut db, - &chain_spec, - block_number, - &initialized_cfg, - &initialized_block_env, - &attributes, - ).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to apply beacon root contract call for empty payload"); - err - })?; + &mut db, + &chain_spec, + block_number, + &initialized_cfg, + &initialized_block_env, + &attributes, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + err + })?; - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.payload_attributes.timestamp, attributes.payload_attributes.withdrawals.clone()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to commit withdrawals for empty payload"); - err - })?; + let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( + &mut db, + &chain_spec, + attributes.payload_attributes.timestamp, + attributes.payload_attributes.withdrawals.clone(), + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to commit withdrawals for empty payload" + ); + err + })?; // merge all transitions into bundle state, this would apply the withdrawal balance // changes and 4788 contract call @@ -156,10 +170,14 @@ where // calculate the state root let bundle_state = db.take_bundle(); - let state_root = state.state_root(&bundle_state).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to calculate state root for empty payload"); - err - })?; + let state_root = db.database.state_root(&bundle_state).map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to calculate state root for empty payload" + ); + err + })?; let mut excess_blob_gas = None; let mut blob_gas_used = None; @@ -236,9 +254,9 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let state_provider = client.state_by_block_hash(config.parent_block.hash())?; - let state = StateProviderDatabase::new(&state_provider); + let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(&state)).with_bundle_update().build(); + State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); let extra_data = config.extra_data(); let PayloadConfig { initialized_block_env, @@ -510,7 +528,10 @@ where let logs_bloom = bundle.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let state_root = state_provider.state_root(bundle.state())?; + let state_root = { + let state_provider = db.database.0.inner.borrow_mut(); + state_provider.db.state_root(bundle.state())? + }; // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index e47ccc46612b0..b21adf5205c98 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -323,14 +323,11 @@ where self.inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, _, db) = this.eth_api().inspect_and_return_db( - db, - env, - &mut inspector, - )?; + let (res, _) = + this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector .into_geth_builder() - .geth_prestate_traces(&res, prestate_config, &db)?; + .geth_prestate_traces(&res, prestate_config, db)?; Ok(frame) }) .await?; @@ -348,12 +345,9 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, _, db) = this.eth_api().inspect_and_return_db( - db, - env, - &mut inspector, - )?; - let frame = inspector.try_into_mux_frame(&res, &db)?; + let (res, _) = + this.eth_api().inspect(&mut *db, env, &mut inspector)?; + let frame = inspector.try_into_mux_frame(&res, db)?; Ok(frame.into()) }) .await?; @@ -370,12 +364,9 @@ where .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { let mut inspector = JsInspector::new(code, config)?; - let (res, _, db) = this.eth_api().inspect_and_return_db( - db, - env.clone(), - &mut inspector, - )?; - Ok(inspector.json_result(res, &env, &db)?) + let (res, _) = + this.eth_api().inspect(&mut *db, env.clone(), &mut inspector)?; + Ok(inspector.json_result(res, &env, db)?) }) .await?; @@ -564,8 +555,7 @@ where let mut inspector = TracingInspector::new( TracingInspectorConfig::from_geth_prestate_config(&prestate_config), ); - let (res, _, db) = - self.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector.into_geth_builder().geth_prestate_traces( &res, @@ -585,8 +575,7 @@ where let mut inspector = MuxInspector::try_from_config(mux_config)?; - let (res, _, db) = - self.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector.try_into_mux_frame(&res, db)?; return Ok((frame.into(), res.state)) } @@ -598,8 +587,7 @@ where config, transaction_context.unwrap_or_default(), )?; - let (res, env, db) = - self.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let state = res.state.clone(); let result = inspector.json_result(res, &env, db)?; diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 191406f96a90d..8ef2af2f52acf 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -451,14 +451,14 @@ where &self, env_gas_limit: U256, mut env: EnvWithHandlerCfg, - mut db: &mut CacheDB>, + db: &mut CacheDB>, ) -> EthApiError where S: StateProvider, { let req_gas_limit = env.tx.gas_limit; env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); - let (res, _) = match self.transact(&mut db, env) { + let (res, _) = match self.transact(db, env) { Ok(res) => res, Err(err) => return err, }; diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index aa18bf7ecc06b..dbb148981bf7c 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -52,8 +52,8 @@ impl PendingBlockEnv { let parent_hash = origin.build_target_hash(); let state_provider = client.history_by_block_hash(parent_hash)?; - let state = StateProviderDatabase::new(&state_provider); - let mut db = State::builder().with_database(Box::new(state)).with_bundle_update().build(); + let state = StateProviderDatabase::new(state_provider); + let mut db = State::builder().with_database(state).with_bundle_update().build(); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; @@ -230,6 +230,7 @@ impl PendingBlockEnv { let logs_bloom = bundle.block_logs_bloom(block_number).expect("Block is present"); // calculate the state root + let state_provider = &db.database; let state_root = state_provider.state_root(bundle.state())?; // create the block header diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 3e582821b5b55..15e2b6f5650be 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -287,7 +287,7 @@ pub trait EthTransactions: Send + Sync { f: F, ) -> EthResult where - F: FnOnce(StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, + F: FnOnce(&mut StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, R: Send + 'static; /// Executes the call request at the given [BlockId]. @@ -308,7 +308,7 @@ pub trait EthTransactions: Send + Sync { inspector: I, ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> where - I: Inspector + Send + 'static; + I: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static; /// Executes the transaction on top of the given [BlockId] with a tracer configured by the /// config. @@ -571,10 +571,7 @@ where ::Error: Into, I: GetInspector, { - let mut evm = self.inner.evm_config.evm_with_env_and_inspector(db, env, inspector); - let res = evm.transact()?; - let (_, env) = evm.into_db_and_env_with_handler_cfg(); - Ok((res, env)) + self.inspect_and_return_db(db, env, inspector).map(|(res, env, _)| (res, env)) } fn inspect_and_return_db( @@ -1066,7 +1063,7 @@ where f: F, ) -> EthResult where - F: FnOnce(StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, + F: FnOnce(&mut StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, R: Send + 'static, { let (cfg, block_env, at) = self.evm_env_at(at).await?; @@ -1085,7 +1082,7 @@ where &mut db, overrides, )?; - f(db, env) + f(&mut db, env) }) .await .map_err(|_| EthApiError::InternalBlockingTaskError)? @@ -1098,10 +1095,7 @@ where overrides: EvmOverrides, ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> { let this = self.clone(); - self.spawn_with_call_at(request, at, overrides, move |mut db, env| { - this.transact(&mut db, env) - }) - .await + self.spawn_with_call_at(request, at, overrides, move |db, env| this.transact(db, env)).await } async fn spawn_inspect_call_at( @@ -1112,7 +1106,7 @@ where inspector: I, ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> where - I: Inspector + Send + 'static, + I: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static, { let this = self.clone(); self.spawn_with_call_at(request, at, overrides, move |db, env| { @@ -1133,11 +1127,9 @@ where { let this = self.clone(); self.with_state_at_block(at, |state| { - let db = CacheDB::new(StateProviderDatabase::new(state)); - + let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(db, env, &mut inspector)?; - + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res) }) } @@ -1155,10 +1147,9 @@ where { let this = self.clone(); self.spawn_with_state_at_block(at, move |state| { - let db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _, db) = this.inspect_and_return_db(db, env, &mut inspector)?; - + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res, db) }) .await diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index c80aee99d5c4f..c2855163bad40 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -278,7 +278,10 @@ pub(crate) fn create_txn_env( } /// Caps the configured [TxEnv] `gas_limit` with the allowance of the caller. -pub(crate) fn cap_tx_gas_limit_with_caller_allowance(db: DB, env: &mut TxEnv) -> EthResult<()> +pub(crate) fn cap_tx_gas_limit_with_caller_allowance( + db: &mut DB, + env: &mut TxEnv, +) -> EthResult<()> where DB: Database, EthApiError: From<::Error>, @@ -296,7 +299,7 @@ where /// /// Returns an error if the caller has insufficient funds. /// Caution: This assumes non-zero `env.gas_price`. Otherwise, zero allowance will be returned. -pub(crate) fn caller_gas_allowance(mut db: DB, env: &TxEnv) -> EthResult +pub(crate) fn caller_gas_allowance(db: &mut DB, env: &TxEnv) -> EthResult where DB: Database, EthApiError: From<::Error>, diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 0479190367b9e..5ee089a91f589 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -86,7 +86,7 @@ where let this = self.clone(); self.eth_api() .spawn_with_call_at(trace_request.call, at, overrides, move |db, env| { - let (res, _, db) = this.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let trace_res = inspector.into_parity_builder().into_trace_results_with_state( &res, &trace_request.trace_types, From 1c1cbe92317eaf89d3ff32bc6ccf896ef6e24de9 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 1 May 2024 18:57:01 +0100 Subject: [PATCH 429/700] feat(pool): add `chain_id` to transaction mocks (#8031) --- .../transaction-pool/src/test_utils/mock.rs | 267 ++++++++++-------- 1 file changed, 151 insertions(+), 116 deletions(-) diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 8e265e7ba36c2..108e6073b8cb9 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -13,8 +13,9 @@ use rand::{ }; use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, + eip4844::kzg_to_versioned_hash, transaction::TryFromRecoveredTransactionError, - AccessList, Address, BlobTransactionSidecar, Bytes, FromRecoveredPooledTransaction, + AccessList, Address, BlobTransactionSidecar, Bytes, ChainId, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, @@ -94,6 +95,8 @@ macro_rules! make_setters_getters { pub enum MockTransaction { /// Legacy transaction type. Legacy { + /// The chain id of the transaction. + chain_id: Option, /// The hash of the transaction. hash: B256, /// The sender's address. @@ -113,8 +116,35 @@ pub enum MockTransaction { /// The size of the transaction, returned in the implementation of [PoolTransaction]. size: usize, }, + /// EIP-2930 transaction type. + Eip2930 { + /// The chain id of the transaction. + chain_id: ChainId, + /// The hash of the transaction. + hash: B256, + /// The sender's address. + sender: Address, + /// The transaction nonce. + nonce: u64, + /// The transaction's destination. + to: TxKind, + /// The gas limit for the transaction. + gas_limit: u64, + /// The transaction input data. + input: Bytes, + /// The value of the transaction. + value: U256, + /// The gas price for the transaction. + gas_price: u128, + /// The access list associated with the transaction. + access_list: AccessList, + /// The size of the transaction, returned in the implementation of [PoolTransaction]. + size: usize, + }, /// EIP-1559 transaction type. Eip1559 { + /// The chain id of the transaction. + chain_id: ChainId, /// The hash of the transaction. hash: B256, /// The sender's address. @@ -132,7 +162,7 @@ pub enum MockTransaction { /// The value of the transaction. value: U256, /// The access list associated with the transaction. - accesslist: AccessList, + access_list: AccessList, /// The transaction input data. input: Bytes, /// The size of the transaction, returned in the implementation of [PoolTransaction]. @@ -140,6 +170,8 @@ pub enum MockTransaction { }, /// EIP-4844 transaction type. Eip4844 { + /// The chain id of the transaction. + chain_id: ChainId, /// The hash of the transaction. hash: B256, /// The sender's address. @@ -159,7 +191,7 @@ pub enum MockTransaction { /// The value of the transaction. value: U256, /// The access list associated with the transaction. - accesslist: AccessList, + access_list: AccessList, /// The transaction input data. input: Bytes, /// The sidecar information for the transaction. @@ -167,29 +199,6 @@ pub enum MockTransaction { /// The size of the transaction, returned in the implementation of [PoolTransaction]. size: usize, }, - /// EIP-2930 transaction type. - Eip2930 { - /// The hash of the transaction. - hash: B256, - /// The sender's address. - sender: Address, - /// The transaction nonce. - nonce: u64, - /// The transaction's destination. - to: TxKind, - /// The gas limit for the transaction. - gas_limit: u64, - /// The transaction input data. - input: Bytes, - /// The value of the transaction. - value: U256, - /// The gas price for the transaction. - gas_price: u128, - /// The access list associated with the transaction. - accesslist: AccessList, - /// The size of the transaction, returned in the implementation of [PoolTransaction]. - size: usize, - }, } // === impl MockTransaction === @@ -208,6 +217,7 @@ impl MockTransaction { /// Returns a new legacy transaction with random address and hash and empty values pub fn legacy() -> Self { MockTransaction::Legacy { + chain_id: Some(1), hash: B256::random(), sender: Address::random(), nonce: 0, @@ -220,9 +230,27 @@ impl MockTransaction { } } + /// Returns a new EIP2930 transaction with random address and hash and empty values + pub fn eip2930() -> Self { + MockTransaction::Eip2930 { + chain_id: 1, + hash: B256::random(), + sender: Address::random(), + nonce: 0, + to: Address::random().into(), + gas_limit: 0, + input: Bytes::new(), + value: Default::default(), + gas_price: 0, + access_list: Default::default(), + size: Default::default(), + } + } + /// Returns a new EIP1559 transaction with random address and hash and empty values pub fn eip1559() -> Self { MockTransaction::Eip1559 { + chain_id: 1, hash: B256::random(), sender: Address::random(), nonce: 0, @@ -232,7 +260,7 @@ impl MockTransaction { to: Address::random().into(), value: Default::default(), input: Bytes::new(), - accesslist: Default::default(), + access_list: Default::default(), size: Default::default(), } } @@ -240,6 +268,7 @@ impl MockTransaction { /// Returns a new EIP4844 transaction with random address and hash and empty values pub fn eip4844() -> Self { MockTransaction::Eip4844 { + chain_id: 1, hash: B256::random(), sender: Address::random(), nonce: 0, @@ -250,7 +279,7 @@ impl MockTransaction { to: Address::random().into(), value: Default::default(), input: Bytes::new(), - accesslist: Default::default(), + access_list: Default::default(), sidecar: Default::default(), size: Default::default(), } @@ -266,22 +295,6 @@ impl MockTransaction { transaction } - /// Returns a new EIP2930 transaction with random address and hash and empty values - pub fn eip2930() -> Self { - MockTransaction::Eip2930 { - hash: B256::random(), - sender: Address::random(), - nonce: 0, - to: Address::random().into(), - gas_limit: 0, - input: Bytes::new(), - value: Default::default(), - gas_price: 0, - accesslist: Default::default(), - size: Default::default(), - } - } - /// Creates a new transaction with the given [TxType]. /// /// See the default constructors for each of the transaction types: @@ -372,9 +385,9 @@ impl MockTransaction { pub fn set_accesslist(&mut self, list: AccessList) -> &mut Self { match self { MockTransaction::Legacy { .. } => {} - MockTransaction::Eip1559 { accesslist, .. } | - MockTransaction::Eip4844 { accesslist, .. } | - MockTransaction::Eip2930 { accesslist, .. } => { + MockTransaction::Eip1559 { access_list: accesslist, .. } | + MockTransaction::Eip4844 { access_list: accesslist, .. } | + MockTransaction::Eip2930 { access_list: accesslist, .. } => { *accesslist = list; } } @@ -611,9 +624,9 @@ impl PoolTransaction for MockTransaction { fn access_list(&self) -> Option<&AccessList> { match self { MockTransaction::Legacy { .. } => None, - MockTransaction::Eip1559 { accesslist, .. } | - MockTransaction::Eip4844 { accesslist, .. } | - MockTransaction::Eip2930 { accesslist, .. } => Some(accesslist), + MockTransaction::Eip1559 { access_list: accesslist, .. } | + MockTransaction::Eip4844 { access_list: accesslist, .. } | + MockTransaction::Eip2930 { access_list: accesslist, .. } => Some(accesslist), } } @@ -735,7 +748,7 @@ impl TryFromRecoveredTransaction for MockTransaction { #[allow(unreachable_patterns)] match transaction.transaction { Transaction::Legacy(TxLegacy { - chain_id: _, + chain_id, nonce, gas_price, gas_limit, @@ -743,6 +756,7 @@ impl TryFromRecoveredTransaction for MockTransaction { value, input, }) => Ok(MockTransaction::Legacy { + chain_id, hash, sender, nonce, @@ -753,31 +767,30 @@ impl TryFromRecoveredTransaction for MockTransaction { input, size, }), - Transaction::Eip1559(TxEip1559 { - chain_id: _, + Transaction::Eip2930(TxEip2930 { + chain_id, nonce, + gas_price, gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, to, value, input, access_list, - }) => Ok(MockTransaction::Eip1559 { + }) => Ok(MockTransaction::Eip2930 { + chain_id, hash, sender, nonce, - max_fee_per_gas, - max_priority_fee_per_gas, + gas_price, gas_limit, to, value, input, - accesslist: access_list, + access_list, size, }), - Transaction::Eip4844(TxEip4844 { - chain_id: _, + Transaction::Eip1559(TxEip1559 { + chain_id, nonce, gas_limit, max_fee_per_gas, @@ -786,42 +799,46 @@ impl TryFromRecoveredTransaction for MockTransaction { value, input, access_list, - blob_versioned_hashes: _, - max_fee_per_blob_gas, - }) => Ok(MockTransaction::Eip4844 { + }) => Ok(MockTransaction::Eip1559 { + chain_id, hash, sender, nonce, max_fee_per_gas, max_priority_fee_per_gas, - max_fee_per_blob_gas, gas_limit, to, value, input, - accesslist: access_list, - sidecar: BlobTransactionSidecar::default(), + access_list, size, }), - Transaction::Eip2930(TxEip2930 { - chain_id: _, + Transaction::Eip4844(TxEip4844 { + chain_id, nonce, - gas_price, gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, to, value, input, access_list, - }) => Ok(MockTransaction::Eip2930 { + blob_versioned_hashes: _, + max_fee_per_blob_gas, + }) => Ok(MockTransaction::Eip4844 { + chain_id, hash, sender, nonce, - gas_price, + max_fee_per_gas, + max_priority_fee_per_gas, + max_fee_per_blob_gas, gas_limit, to, value, input, - accesslist: access_list, + access_list, + sidecar: BlobTransactionSidecar::default(), size, }), _ => unreachable!("Invalid transaction type"), @@ -856,6 +873,7 @@ impl From for Transaction { fn from(mock: MockTransaction) -> Self { match mock { MockTransaction::Legacy { + chain_id, hash: _, sender: _, nonce, @@ -865,16 +883,31 @@ impl From for Transaction { value, input, size: _, - } => Self::Legacy(TxLegacy { - chain_id: Some(1), + } => Self::Legacy(TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input }), + MockTransaction::Eip2930 { + chain_id, + hash: _, + sender: _, + nonce, + to, + gas_limit, + input, + value, + gas_price, + access_list, + size: _, + } => Self::Eip2930(TxEip2930 { + chain_id, nonce, gas_price, gas_limit, to, value, + access_list, input, }), MockTransaction::Eip1559 { + chain_id, hash: _, sender: _, nonce, @@ -883,22 +916,23 @@ impl From for Transaction { gas_limit, to, value, - accesslist, + access_list, input, size: _, } => Self::Eip1559(TxEip1559 { - chain_id: 1, + chain_id, nonce, gas_limit, max_fee_per_gas, max_priority_fee_per_gas, to, value, - access_list: accesslist, + access_list, input, }), MockTransaction::Eip4844 { - hash, + chain_id, + hash: _, sender: _, nonce, max_fee_per_gas, @@ -907,44 +941,27 @@ impl From for Transaction { gas_limit, to, value, - accesslist, + access_list, input, - sidecar: _, + sidecar, size: _, } => Self::Eip4844(TxEip4844 { - chain_id: 1, + chain_id, nonce, gas_limit, max_fee_per_gas, max_priority_fee_per_gas, to, value, - access_list: accesslist, - blob_versioned_hashes: vec![hash], + access_list, + blob_versioned_hashes: sidecar + .commitments + .into_iter() + .map(|commitment| kzg_to_versioned_hash((*commitment).into())) + .collect(), max_fee_per_blob_gas, input, }), - MockTransaction::Eip2930 { - hash: _, - sender: _, - nonce, - to, - gas_limit, - input, - value, - gas_price, - accesslist, - size: _, - } => Self::Eip2930(TxEip2930 { - chain_id: 1, - nonce, - gas_price, - gas_limit, - to, - value, - access_list: accesslist, - input, - }), } } } @@ -958,23 +975,37 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { any::<(Transaction, Address, B256)>() .prop_map(|(tx, sender, tx_hash)| match &tx { Transaction::Legacy(TxLegacy { + chain_id, nonce, gas_price, gas_limit, to, value, input, - .. - }) | + }) => MockTransaction::Legacy { + chain_id: *chain_id, + sender, + hash: tx_hash, + nonce: *nonce, + gas_price: *gas_price, + gas_limit: *gas_limit, + to: *to, + value: *value, + input: input.clone(), + size: tx.size(), + }, + Transaction::Eip2930(TxEip2930 { + chain_id, nonce, gas_price, gas_limit, to, value, + access_list, input, - .. - }) => MockTransaction::Legacy { + }) => MockTransaction::Eip2930 { + chain_id: *chain_id, sender, hash: tx_hash, nonce: *nonce, @@ -982,10 +1013,12 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_limit: *gas_limit, to: *to, value: *value, - input: (*input).clone(), + input: input.clone(), + access_list: access_list.clone(), size: tx.size(), }, Transaction::Eip1559(TxEip1559 { + chain_id, nonce, gas_limit, max_fee_per_gas, @@ -994,8 +1027,8 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { value, input, access_list, - .. }) => MockTransaction::Eip1559 { + chain_id: *chain_id, sender, hash: tx_hash, nonce: *nonce, @@ -1004,11 +1037,12 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_limit: *gas_limit, to: *to, value: *value, - input: (*input).clone(), - accesslist: (*access_list).clone(), + input: input.clone(), + access_list: access_list.clone(), size: tx.size(), }, Transaction::Eip4844(TxEip4844 { + chain_id, nonce, gas_limit, max_fee_per_gas, @@ -1018,8 +1052,9 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { input, max_fee_per_blob_gas, access_list, - .. + blob_versioned_hashes: _, }) => MockTransaction::Eip4844 { + chain_id: *chain_id, sender, hash: tx_hash, nonce: *nonce, @@ -1029,8 +1064,8 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_limit: *gas_limit, to: *to, value: *value, - input: (*input).clone(), - accesslist: (*access_list).clone(), + input: input.clone(), + access_list: access_list.clone(), // only generate a sidecar if it is a 4844 tx - also for the sake of // performance just use a default sidecar sidecar: BlobTransactionSidecar::default(), From 9ae9af484d4aa3e5f8393654ec0ab110bd940afd Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 1 May 2024 19:44:55 +0100 Subject: [PATCH 430/700] feat(pool): make mock transaction validator eth-compatible (#8034) --- crates/transaction-pool/src/noop.rs | 17 +++---- .../transaction-pool/src/test_utils/mock.rs | 48 ++++++++++++++++--- crates/transaction-pool/src/traits.rs | 32 ++++++++----- crates/transaction-pool/src/validate/eth.rs | 22 +++------ 4 files changed, 77 insertions(+), 42 deletions(-) diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 5f2a1104808af..b550a2bc160c6 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -11,10 +11,10 @@ use crate::{ TransactionListenerKind, }, validate::ValidTransaction, - AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, - NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PooledTransactionsElement, - PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, - TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, + AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPoolTransaction, + EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, + PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, + TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use reth_eth_wire::HandleMempoolData; use reth_primitives::{Address, BlobTransactionSidecar, TxHash, U256}; @@ -252,20 +252,21 @@ pub struct MockTransactionValidator { _marker: PhantomData, } -impl TransactionValidator for MockTransactionValidator { +impl TransactionValidator for MockTransactionValidator { type Transaction = T; async fn validate_transaction( &self, origin: TransactionOrigin, - transaction: Self::Transaction, + mut transaction: Self::Transaction, ) -> TransactionValidationOutcome { + let maybe_sidecar = transaction.take_blob().maybe_sidecar().cloned(); // we return `balance: U256::MAX` to simulate a valid transaction which will never go into // overdraft TransactionValidationOutcome::Valid { balance: U256::MAX, state_nonce: 0, - transaction: ValidTransaction::Valid(transaction), + transaction: ValidTransaction::new(transaction, maybe_sidecar), propagate: match origin { TransactionOrigin::External => true, TransactionOrigin::Local => self.propagate_local, @@ -285,7 +286,7 @@ impl MockTransactionValidator { impl Default for MockTransactionValidator { fn default() -> Self { - MockTransactionValidator { propagate_local: true, _marker: Default::default() } + Self { propagate_local: true, _marker: Default::default() } } } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 108e6073b8cb9..bcacff2da1914 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -4,7 +4,8 @@ use crate::{ identifier::{SenderIdentifiers, TransactionId}, pool::txpool::TxPool, traits::TransactionOrigin, - CoinbaseTipOrdering, PoolTransaction, ValidPoolTransaction, + CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, PoolTransaction, + ValidPoolTransaction, }; use paste::paste; use rand::{ @@ -15,11 +16,11 @@ use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, eip4844::kzg_to_versioned_hash, transaction::TryFromRecoveredTransactionError, - AccessList, Address, BlobTransactionSidecar, Bytes, ChainId, FromRecoveredPooledTransaction, - IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, - TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip1559, - TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, - EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, + AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, Bytes, ChainId, + FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, + Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, + TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, TxType, + B256, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -730,7 +731,40 @@ impl PoolTransaction for MockTransaction { /// Returns the chain ID associated with the transaction. fn chain_id(&self) -> Option { - Some(1) + match self { + MockTransaction::Legacy { chain_id, .. } => *chain_id, + + MockTransaction::Eip1559 { chain_id, .. } | + MockTransaction::Eip4844 { chain_id, .. } | + MockTransaction::Eip2930 { chain_id, .. } => Some(*chain_id), + } + } +} + +impl EthPoolTransaction for MockTransaction { + fn take_blob(&mut self) -> EthBlobTransactionSidecar { + match self { + Self::Eip4844 { sidecar, .. } => EthBlobTransactionSidecar::Present(sidecar.clone()), + _ => EthBlobTransactionSidecar::None, + } + } + + fn blob_count(&self) -> usize { + match self { + Self::Eip4844 { sidecar, .. } => sidecar.blobs.len(), + _ => 0, + } + } + + fn validate_blob( + &self, + _blob: &BlobTransactionSidecar, + _settings: &revm::primitives::KzgSettings, + ) -> Result<(), reth_primitives::BlobTransactionValidationError> { + match &self { + Self::Eip4844 { .. } => Ok(()), + _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), + } } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 79b9af6984d1e..ca91b00daf537 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -14,8 +14,8 @@ use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElement, PooledTransactionsElementEcRecovered, - SealedBlock, Transaction, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip4844, - TxHash, TxKind, B256, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, + SealedBlock, Transaction, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxHash, + TxKind, B256, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -856,12 +856,7 @@ pub trait EthPoolTransaction: PoolTransaction { fn take_blob(&mut self) -> EthBlobTransactionSidecar; /// Returns the number of blobs this transaction has. - fn blob_count(&self) -> usize { - self.as_eip4844().map(|tx| tx.blob_versioned_hashes.len()).unwrap_or_default() - } - - /// Returns the transaction as EIP-4844 transaction if it is one. - fn as_eip4844(&self) -> Option<&TxEip4844>; + fn blob_count(&self) -> usize; /// Validates the blob sidecar of the transaction with the given settings. fn validate_blob( @@ -908,6 +903,16 @@ pub enum EthBlobTransactionSidecar { Present(BlobTransactionSidecar), } +impl EthBlobTransactionSidecar { + /// Returns the blob sidecar if it is present + pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { + match self { + EthBlobTransactionSidecar::Present(sidecar) => Some(sidecar), + _ => None, + } + } +} + impl EthPooledTransaction { /// Create new instance of [Self]. /// @@ -1096,8 +1101,11 @@ impl EthPoolTransaction for EthPooledTransaction { } } - fn as_eip4844(&self) -> Option<&TxEip4844> { - self.transaction.as_eip4844() + fn blob_count(&self) -> usize { + match &self.transaction.transaction { + Transaction::Eip4844(tx) => tx.blob_versioned_hashes.len(), + _ => 0, + } } fn validate_blob( @@ -1125,13 +1133,13 @@ impl TryFromRecoveredTransaction for EthPooledTransaction { } EIP4844_TX_TYPE_ID => { // doesn't have a blob sidecar - return Err(TryFromRecoveredTransactionError::BlobSidecarMissing); + return Err(TryFromRecoveredTransactionError::BlobSidecarMissing) } unsupported => { // unsupported transaction type return Err(TryFromRecoveredTransactionError::UnsupportedTransactionType( unsupported, - )); + )) } }; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index a07e6fc97cefb..b31a3af489e9f 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -361,25 +361,17 @@ where } } EthBlobTransactionSidecar::Present(blob) => { - if let Some(eip4844) = transaction.as_eip4844() { - // validate the blob - if let Err(err) = eip4844.validate_blob(&blob, &self.kzg_settings) { - return TransactionValidationOutcome::Invalid( - transaction, - InvalidPoolTransactionError::Eip4844( - Eip4844PoolTransactionError::InvalidEip4844Blob(err), - ), - ) - } - // store the extracted blob - maybe_blob_sidecar = Some(blob); - } else { - // this should not happen + // validate the blob + if let Err(err) = transaction.validate_blob(&blob, &self.kzg_settings) { return TransactionValidationOutcome::Invalid( transaction, - InvalidTransactionError::TxTypeNotSupported.into(), + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::InvalidEip4844Blob(err), + ), ) } + // store the extracted blob + maybe_blob_sidecar = Some(blob); } } } From 9d2ca45c30a1cf13c193abe1e1ae172d0381d9c6 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 1 May 2024 21:23:21 +0200 Subject: [PATCH 431/700] chore(engine): flatten fcu processing (#8027) --- crates/consensus/beacon/src/engine/mod.rs | 149 +++++++++------------- 1 file changed, 59 insertions(+), 90 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 428b95c0ba73c..338a37f021102 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -45,7 +45,6 @@ use std::{ use tokio::sync::{ mpsc, mpsc::{UnboundedReceiver, UnboundedSender}, - oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; @@ -381,40 +380,6 @@ where None } - /// Called to resolve chain forks and ensure that the Execution layer is working with the latest - /// valid chain. - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). - /// - /// Returns an error if an internal error occurred like a database error. - fn forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option, - ) -> Result { - trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - - // Pre-validate forkchoice state update and return if it's invalid or - // cannot be processed at the moment. - if let Some(on_updated) = self.pre_validate_forkchoice_update(state) { - return Ok(on_updated) - } - - let start = Instant::now(); - let make_canonical_result = self.blockchain.make_canonical(state.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - - let status = self.on_forkchoice_updated_make_canonical_result( - state, - attrs, - make_canonical_result, - elapsed, - )?; - trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); - Ok(status) - } - /// Process the result of attempting to make forkchoice state head hash canonical. /// /// # Returns @@ -519,56 +484,54 @@ where false } - /// Invoked when we receive a new forkchoice update message. + /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree + /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid + /// chain. + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). /// - /// Returns `true` if the engine now reached its maximum block number, See - /// [EngineSyncController::has_reached_max_block]. + /// Returns an error if an internal error occurred like a database error. fn on_forkchoice_updated( &mut self, state: ForkchoiceState, attrs: Option, - tx: oneshot::Sender>, - ) -> Result { + ) -> Result { self.metrics.forkchoice_updated_messages.increment(1); self.blockchain.on_forkchoice_update_received(&state); + trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - let on_updated = match self.forkchoice_updated(state, attrs) { - Ok(response) => response, - Err(error) => { - if error.is_fatal() { - // FCU resulted in a fatal error from which we can't recover - let err = error.clone(); - let _ = tx.send(Err(RethError::Canonical(error))); - return Err(err) - } - let _ = tx.send(Err(RethError::Canonical(error))); - return Ok(OnForkchoiceUpdateOutcome::Processed) - } - }; - - let fcu_status = on_updated.forkchoice_status(); + // Pre-validate forkchoice state update and return if it's invalid or + // cannot be processed at the moment. + if let Some(on_updated) = self.pre_validate_forkchoice_update(state) { + return Ok(on_updated) + } - // update the forkchoice state tracker - self.forkchoice_state_tracker.set_latest(state, fcu_status); + let start = Instant::now(); + let make_canonical_result = self.blockchain.make_canonical(state.head_block_hash); + let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - // send the response to the CL ASAP - let _ = tx.send(Ok(on_updated)); + let status = self.on_forkchoice_updated_make_canonical_result( + state, + attrs, + make_canonical_result, + elapsed, + )?; + trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); + Ok(status) + } - match fcu_status { + /// Called after the forkchoice update status has been resolved. + /// Depending on the outcome, the method updates the sync state and notifies the listeners + /// about new processed FCU. + fn on_forkchoice_updated_status(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { + match status { ForkchoiceStatus::Invalid => {} ForkchoiceStatus::Valid => { // FCU head is valid, we're no longer syncing self.sync_state_updater.update_sync_state(SyncState::Idle); // node's fully synced, clear active download requests self.sync.clear_block_download_requests(); - - // check if we reached the maximum configured block - let tip_number = self.blockchain.canonical_tip().number; - if self.sync.has_reached_max_block(tip_number) { - // Terminate the sync early if it's reached the maximum user - // configured block. - return Ok(OnForkchoiceUpdateOutcome::ReachedMaxBlock) - } } ForkchoiceStatus::Syncing => { // we're syncing @@ -577,9 +540,7 @@ where } // notify listeners about new processed FCU - self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, fcu_status)); - - Ok(OnForkchoiceUpdateOutcome::Processed) + self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); } /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less @@ -966,7 +927,7 @@ where /// /// If the newest head is not invalid, then this will trigger a new pipeline run to sync the gap /// - /// See [Self::forkchoice_updated] and [BlockchainTreeEngine::make_canonical]. + /// See [Self::on_forkchoice_updated] and [BlockchainTreeEngine::make_canonical]. fn on_failed_canonical_forkchoice_update( &mut self, state: &ForkchoiceState, @@ -1758,17 +1719,34 @@ where if let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) { match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - match this.on_forkchoice_updated(state, payload_attrs, tx) { - Ok(OnForkchoiceUpdateOutcome::Processed) => {} - Ok(OnForkchoiceUpdateOutcome::ReachedMaxBlock) => { - // reached the max block, we can terminate the future - return Poll::Ready(Ok(())) + match this.on_forkchoice_updated(state, payload_attrs) { + Ok(on_updated) => { + let fcu_status = on_updated.forkchoice_status(); + // update the forkchoice state tracker + this.forkchoice_state_tracker.set_latest(state, fcu_status); + // send the response to the CL ASAP + let _ = tx.send(Ok(on_updated)); + + if fcu_status.is_valid() { + let tip_number = this.blockchain.canonical_tip().number; + if this.sync.has_reached_max_block(tip_number) { + // Terminate the sync early if it's reached the + // maximum user configured block. + return Poll::Ready(Ok(())) + } + } + + this.on_forkchoice_updated_status(state, fcu_status); } - Err(err) => { - // fatal error, we can terminate the future - return Poll::Ready(Err(RethError::Canonical(err).into())) + Err(error) => { + if error.is_fatal() { + // fatal error, we can terminate the future + let _ = tx.send(Err(RethError::Canonical(error.clone()))); + return Poll::Ready(Err(RethError::Canonical(error).into())) + } + let _ = tx.send(Err(RethError::Canonical(error))); } - } + }; } BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { this.metrics.new_payload_messages.increment(1); @@ -1828,15 +1806,6 @@ where } } -/// Represents all outcomes of an applied fork choice update. -#[derive(Debug)] -enum OnForkchoiceUpdateOutcome { - /// FCU was processed successfully. - Processed, - /// FCU was processed successfully and reached max block. - ReachedMaxBlock, -} - /// Represents outcomes of processing a sync event #[derive(Debug)] enum SyncEventOutcome { From f94ce6e7800df33a296c1b4be2db9ab812474b08 Mon Sep 17 00:00:00 2001 From: Daniel Ramirez Date: Wed, 1 May 2024 15:37:49 -0400 Subject: [PATCH 432/700] chore: cfg ImportOp behind optimism feature (#8033) --- bin/reth/src/cli/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index e7d2789646b44..40e1f24be9c48 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -1,12 +1,14 @@ //! CLI definition and entrypoint to executable +#[cfg(feature = "optimism")] +use crate::commands::import_op; use crate::{ args::{ utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, import_op, init_cmd, init_state, + config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, node::{self, NoArgs}, p2p, recover, stage, test_vectors, }, @@ -148,6 +150,7 @@ impl Cli { Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), + #[cfg(feature = "optimism")] Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), @@ -186,6 +189,7 @@ pub enum Commands { #[command(name = "import")] Import(import::ImportCommand), /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. + #[cfg(feature = "optimism")] #[command(name = "import-op")] ImportOp(import_op::ImportOpCommand), /// Dumps genesis block JSON configuration to stdout. From 0aa7d4d05ed5565545002655ddc94083e057977e Mon Sep 17 00:00:00 2001 From: Seva Zhidkov Date: Wed, 1 May 2024 21:52:40 +0100 Subject: [PATCH 433/700] feat(rpc-builder): add tower layer for updating bearer token in auth client (#8010) --- crates/e2e-test-utils/src/engine_api.rs | 5 +- crates/rpc/rpc-builder/src/auth.rs | 41 ++++------ .../rpc/rpc/src/layers/auth_client_layer.rs | 79 +++++++++++++++++++ crates/rpc/rpc/src/layers/mod.rs | 3 + crates/rpc/rpc/src/lib.rs | 5 +- 5 files changed, 106 insertions(+), 27 deletions(-) create mode 100644 crates/rpc/rpc/src/layers/auth_client_layer.rs diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 9ede69e674414..fecd9b8b7f4cb 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,5 +1,5 @@ use crate::traits::PayloadEnvelopeExt; -use jsonrpsee::http_client::HttpClient; +use jsonrpsee::http_client::{transport::HttpBackend, HttpClient}; use reth::{ api::{EngineTypes, PayloadBuilderAttributes}, providers::CanonStateNotificationStream, @@ -10,12 +10,13 @@ use reth::{ }; use reth_payload_builder::PayloadId; use reth_primitives::B256; +use reth_rpc::AuthClientService; use std::marker::PhantomData; /// Helper for engine api operations pub struct EngineApiTestContext { pub canonical_stream: CanonStateNotificationStream, - pub engine_api_client: HttpClient, + pub engine_api_client: HttpClient>, pub _marker: PhantomData, } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 72345aca6b6af..186d61332ab6b 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -15,6 +15,7 @@ use jsonrpsee::{ }; pub use reth_ipc::server::Builder as IpcServerBuilder; +use jsonrpsee::http_client::transport::HttpBackend; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; use reth_network_api::{NetworkInfo, Peers}; @@ -27,16 +28,13 @@ use reth_rpc::{ cache::EthStateCache, gas_oracle::GasPriceOracle, EthFilterConfig, FeeHistoryCache, FeeHistoryCacheConfig, }, - AuthLayer, Claims, EngineEthApi, EthApi, EthFilter, EthSubscriptionIdProvider, - JwtAuthValidator, JwtSecret, + secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, EngineEthApi, EthApi, + EthFilter, EthSubscriptionIdProvider, JwtAuthValidator, JwtSecret, }; use reth_rpc_api::servers::*; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use reth_transaction_pool::TransactionPool; -use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use tower::layer::util::Identity; /// Configure and launch a _standalone_ auth server with `engine` and a _new_ `eth` namespace. @@ -397,32 +395,27 @@ impl AuthServerHandle { format!("ws://{}", self.local_addr) } - fn bearer(&self) -> String { - format!( - "Bearer {}", - self.secret - .encode(&Claims { - iat: (SystemTime::now().duration_since(UNIX_EPOCH).unwrap() + - Duration::from_secs(60)) - .as_secs(), - exp: None, - }) - .unwrap() - ) - } - /// Returns a http client connected to the server. - pub fn http_client(&self) -> jsonrpsee::http_client::HttpClient { + pub fn http_client( + &self, + ) -> jsonrpsee::http_client::HttpClient> { + // Create a middleware that adds a new JWT token to every request. + let secret_layer = AuthClientLayer::new(self.secret.clone()); + let middleware = tower::ServiceBuilder::default().layer(secret_layer); jsonrpsee::http_client::HttpClientBuilder::default() - .set_headers(HeaderMap::from_iter([(AUTHORIZATION, self.bearer().parse().unwrap())])) + .set_http_middleware(middleware) .build(self.http_url()) .expect("Failed to create http client") } - /// Returns a ws client connected to the server. + /// Returns a ws client connected to the server. Note that the connection can only be + /// be established within 1 minute due to the JWT token expiration. pub async fn ws_client(&self) -> jsonrpsee::ws_client::WsClient { jsonrpsee::ws_client::WsClientBuilder::default() - .set_headers(HeaderMap::from_iter([(AUTHORIZATION, self.bearer().parse().unwrap())])) + .set_headers(HeaderMap::from_iter([( + AUTHORIZATION, + secret_to_bearer_header(&self.secret), + )])) .build(self.ws_url()) .await .expect("Failed to create ws client") diff --git a/crates/rpc/rpc/src/layers/auth_client_layer.rs b/crates/rpc/rpc/src/layers/auth_client_layer.rs new file mode 100644 index 0000000000000..4c845796ede2d --- /dev/null +++ b/crates/rpc/rpc/src/layers/auth_client_layer.rs @@ -0,0 +1,79 @@ +use crate::{Claims, JwtSecret}; +use http::HeaderValue; +use hyper::{header::AUTHORIZATION, service::Service}; +use std::{ + task::{Context, Poll}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; +use tower::Layer; + +/// A layer that adds a new JWT token to every request using AuthClientService. +#[derive(Debug)] +pub struct AuthClientLayer { + secret: JwtSecret, +} + +impl AuthClientLayer { + /// Create a new AuthClientLayer with the given `secret`. + pub fn new(secret: JwtSecret) -> Self { + Self { secret } + } +} + +impl Layer for AuthClientLayer { + type Service = AuthClientService; + + fn layer(&self, inner: S) -> Self::Service { + AuthClientService::new(self.secret.clone(), inner) + } +} + +/// Automatically authenticates every client request with the given `secret`. +#[derive(Debug, Clone)] +pub struct AuthClientService { + secret: JwtSecret, + inner: S, +} + +impl AuthClientService { + fn new(secret: JwtSecret, inner: S) -> Self { + Self { secret, inner } + } +} + +impl Service> for AuthClientService +where + S: Service>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut request: hyper::Request) -> Self::Future { + request.headers_mut().insert(AUTHORIZATION, secret_to_bearer_header(&self.secret)); + self.inner.call(request) + } +} + +/// Helper function to convert a secret into a Bearer auth header value with claims according to +/// . +/// The token is valid for 60 seconds. +pub fn secret_to_bearer_header(secret: &JwtSecret) -> HeaderValue { + format!( + "Bearer {}", + secret + .encode(&Claims { + iat: (SystemTime::now().duration_since(UNIX_EPOCH).unwrap() + + Duration::from_secs(60)) + .as_secs(), + exp: None, + }) + .unwrap() + ) + .parse() + .unwrap() +} diff --git a/crates/rpc/rpc/src/layers/mod.rs b/crates/rpc/rpc/src/layers/mod.rs index ff021a37250b1..83a336e5f9452 100644 --- a/crates/rpc/rpc/src/layers/mod.rs +++ b/crates/rpc/rpc/src/layers/mod.rs @@ -1,8 +1,11 @@ use http::{HeaderMap, Response}; +mod auth_client_layer; mod auth_layer; mod jwt_secret; mod jwt_validator; + +pub use auth_client_layer::{secret_to_bearer_header, AuthClientLayer, AuthClientService}; pub use auth_layer::AuthLayer; pub use jwt_secret::{Claims, JwtError, JwtSecret}; pub use jwt_validator::JwtAuthValidator; diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index c75fa9b6be128..d68f8a0184ea5 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -41,7 +41,10 @@ pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthApiSpec, EthFilter, EthPubSub, EthSubscriptionIdProvider}; -pub use layers::{AuthLayer, AuthValidator, Claims, JwtAuthValidator, JwtError, JwtSecret}; +pub use layers::{ + secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, AuthValidator, Claims, + JwtAuthValidator, JwtError, JwtSecret, +}; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; From 2334317dc725d2153fcefb467b1de07e27740b10 Mon Sep 17 00:00:00 2001 From: 0xKitsune <77890308+0xKitsune@users.noreply.github.com> Date: Wed, 1 May 2024 14:00:57 -0700 Subject: [PATCH 434/700] chore: deduplicate fork timestamp configuration in `ChainSpec` (#8038) --- crates/node-core/src/init.rs | 5 +- crates/primitives/src/chain/mod.rs | 3 +- crates/primitives/src/chain/spec.rs | 167 +------------------------- crates/primitives/src/lib.rs | 4 +- examples/polygon-p2p/src/chain_cfg.rs | 4 +- 5 files changed, 9 insertions(+), 174 deletions(-) diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 883bb437a5f0c..92b9f5696efe7 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -479,8 +479,8 @@ mod tests { DatabaseEnv, }; use reth_primitives::{ - Chain, ForkTimestamps, Genesis, IntegerList, GOERLI, GOERLI_GENESIS_HASH, MAINNET, - MAINNET_GENESIS_HASH, SEPOLIA, SEPOLIA_GENESIS_HASH, + Chain, Genesis, IntegerList, GOERLI, GOERLI_GENESIS_HASH, MAINNET, MAINNET_GENESIS_HASH, + SEPOLIA, SEPOLIA_GENESIS_HASH, }; use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; @@ -570,7 +570,6 @@ mod tests { ..Default::default() }, hardforks: BTreeMap::default(), - fork_timestamps: ForkTimestamps::default(), genesis_hash: None, paris_block_and_final_difficulty: None, deposit_contract: None, diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index bf60392cd8c63..b04e88ee0be6a 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -2,8 +2,7 @@ pub use alloy_chains::{Chain, ChainKind, NamedChain}; pub use info::ChainInfo; pub use spec::{ AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, - DisplayHardforks, ForkBaseFeeParams, ForkCondition, ForkTimestamps, DEV, GOERLI, HOLESKY, - MAINNET, SEPOLIA, + DisplayHardforks, ForkBaseFeeParams, ForkCondition, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, }; #[cfg(feature = "optimism")] pub use spec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index d0a5b84334d6f..823548d272165 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -41,7 +41,6 @@ pub static MAINNET: Lazy> = Lazy::new(|| { 15537394, U256::from(58_750_003_716_598_352_816_469u128), )), - fork_timestamps: ForkTimestamps::default().shanghai(1681338455).cancun(1710338135), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(1150000)), @@ -90,7 +89,6 @@ pub static GOERLI: Lazy> = Lazy::new(|| { )), // paris_block_and_final_difficulty: Some((7382818, U256::from(10_790_000))), - fork_timestamps: ForkTimestamps::default().shanghai(1678832736).cancun(1705473120), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -133,7 +131,6 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { )), // paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), - fork_timestamps: ForkTimestamps::default().shanghai(1677557088).cancun(1706655072), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -179,7 +176,6 @@ pub static HOLESKY: Lazy> = Lazy::new(|| { "b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4" )), paris_block_and_final_difficulty: Some((0, U256::from(1))), - fork_timestamps: ForkTimestamps::default().shanghai(1696000704).cancun(1707305664), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -224,7 +220,6 @@ pub static DEV: Lazy> = Lazy::new(|| { "2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - fork_timestamps: ForkTimestamps::default().shanghai(0).cancun(0), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -270,11 +265,6 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1704992401) - .canyon(1704992401) - .cancun(1710374401) - .ecotone(1710374401), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -322,11 +312,6 @@ pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1699981200) - .canyon(1699981200) - .cancun(1708534800) - .ecotone(1708534800), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -376,11 +361,6 @@ pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1699981200) - .canyon(1699981200) - .cancun(1708534800) - .ecotone(1708534800), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -430,11 +410,6 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1704992401) - .canyon(1704992401) - .cancun(1710374401) - .ecotone(1710374401), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -535,12 +510,6 @@ pub struct ChainSpec { #[serde(skip, default)] pub paris_block_and_final_difficulty: Option<(u64, U256)>, - /// Timestamps of various hardforks - /// - /// This caches entries in `hardforks` map - #[serde(skip, default)] - pub fork_timestamps: ForkTimestamps, - /// The active hard forks and their activation conditions pub hardforks: BTreeMap, @@ -565,7 +534,6 @@ impl Default for ChainSpec { genesis_hash: Default::default(), genesis: Default::default(), paris_block_and_final_difficulty: Default::default(), - fork_timestamps: Default::default(), hardforks: Default::default(), deposit_contract: Default::default(), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), @@ -819,28 +787,19 @@ impl ChainSpec { /// Convenience method to check if [Hardfork::Shanghai] is active at a given timestamp. #[inline] pub fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork_timestamps - .shanghai - .map(|shanghai| timestamp >= shanghai) - .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp)) + self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp) } /// Convenience method to check if [Hardfork::Cancun] is active at a given timestamp. #[inline] pub fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork_timestamps - .cancun - .map(|cancun| timestamp >= cancun) - .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp)) + self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp) } /// Convenience method to check if [Hardfork::Prague] is active at a given timestamp. #[inline] pub fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork_timestamps - .prague - .map(|prague| timestamp >= prague) - .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp)) + self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp) } /// Convenience method to check if [Hardfork::Byzantium] is active at a given block number. @@ -1084,7 +1043,6 @@ impl From for ChainSpec { chain: genesis.config.chain_id.into(), genesis, genesis_hash: None, - fork_timestamps: ForkTimestamps::from_hardforks(&hardforks), hardforks, paris_block_and_final_difficulty, deposit_contract: None, @@ -1093,94 +1051,6 @@ impl From for ChainSpec { } } -/// Various timestamps of forks -#[derive(Debug, Clone, Default, Eq, PartialEq)] -pub struct ForkTimestamps { - /// The timestamp of the Shanghai fork - pub shanghai: Option, - /// The timestamp of the Cancun fork - pub cancun: Option, - /// The timestamp of the Prague fork - pub prague: Option, - /// The timestamp of the Regolith fork - #[cfg(feature = "optimism")] - pub regolith: Option, - /// The timestamp of the Canyon fork - #[cfg(feature = "optimism")] - pub canyon: Option, - /// The timestamp of the Ecotone fork - #[cfg(feature = "optimism")] - pub ecotone: Option, -} - -impl ForkTimestamps { - /// Creates a new [`ForkTimestamps`] from the given hardforks by extracting the timestamps - fn from_hardforks(forks: &BTreeMap) -> Self { - let mut timestamps = ForkTimestamps::default(); - if let Some(shanghai) = forks.get(&Hardfork::Shanghai).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.shanghai(shanghai); - } - if let Some(cancun) = forks.get(&Hardfork::Cancun).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.cancun(cancun); - } - if let Some(prague) = forks.get(&Hardfork::Prague).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.prague(prague); - } - #[cfg(feature = "optimism")] - { - if let Some(regolith) = forks.get(&Hardfork::Regolith).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.regolith(regolith); - } - if let Some(canyon) = forks.get(&Hardfork::Canyon).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.canyon(canyon); - } - if let Some(ecotone) = forks.get(&Hardfork::Ecotone).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.ecotone(ecotone); - } - } - timestamps - } - - /// Sets the given Shanghai timestamp - pub fn shanghai(mut self, shanghai: u64) -> Self { - self.shanghai = Some(shanghai); - self - } - - /// Sets the given Cancun timestamp - pub fn cancun(mut self, cancun: u64) -> Self { - self.cancun = Some(cancun); - self - } - - /// Sets the given Prague timestamp - pub fn prague(mut self, prague: u64) -> Self { - self.prague = Some(prague); - self - } - - /// Sets the given regolith timestamp - #[cfg(feature = "optimism")] - pub fn regolith(mut self, regolith: u64) -> Self { - self.regolith = Some(regolith); - self - } - - /// Sets the given canyon timestamp - #[cfg(feature = "optimism")] - pub fn canyon(mut self, canyon: u64) -> Self { - self.canyon = Some(canyon); - self - } - - /// Sets the given ecotone timestamp - #[cfg(feature = "optimism")] - pub fn ecotone(mut self, ecotone: u64) -> Self { - self.ecotone = Some(ecotone); - self - } -} - /// A helper type for compatibility with geth's config #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(untagged)] @@ -1418,7 +1288,6 @@ impl ChainSpecBuilder { chain: self.chain.expect("The chain is required"), genesis: self.genesis.expect("The genesis is required"), genesis_hash: None, - fork_timestamps: ForkTimestamps::from_hardforks(&self.hardforks), hardforks: self.hardforks, paris_block_and_final_difficulty, deposit_contract: None, @@ -1839,36 +1708,6 @@ Post-merge hard forks (timestamp based): ); } - // Tests that the ForkTimestamps are correctly set up. - #[test] - fn test_fork_timestamps() { - let spec = ChainSpec::builder().chain(Chain::mainnet()).genesis(Genesis::default()).build(); - assert!(spec.fork_timestamps.shanghai.is_none()); - - let spec = ChainSpec::builder() - .chain(Chain::mainnet()) - .genesis(Genesis::default()) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(1337)) - .build(); - assert_eq!(spec.fork_timestamps.shanghai, Some(1337)); - assert!(spec.is_shanghai_active_at_timestamp(1337)); - assert!(!spec.is_shanghai_active_at_timestamp(1336)); - } - - // Tests that all predefined timestamps are correctly set up in the chainspecs - #[test] - fn test_predefined_chain_spec_fork_timestamps() { - let predefined = [&MAINNET, &SEPOLIA, &HOLESKY, &GOERLI]; - - for spec in predefined.iter() { - let expected_timestamp_forks = &spec.fork_timestamps; - let got_timestamp_forks = ForkTimestamps::from_hardforks(&spec.hardforks); - - // make sure they're the same - assert_eq!(expected_timestamp_forks, &got_timestamp_forks); - } - } - // Tests that we skip any fork blocks in block #0 (the genesis ruleset) #[test] fn ignores_genesis_fork_blocks() { diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index d20a35164ddb2..ae20cf6b2c8a3 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -56,8 +56,8 @@ pub use block::{ }; pub use chain::{ AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, Chain, ChainInfo, ChainKind, ChainSpec, - ChainSpecBuilder, DisplayHardforks, ForkBaseFeeParams, ForkCondition, ForkTimestamps, - NamedChain, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, + ChainSpecBuilder, DisplayHardforks, ForkBaseFeeParams, ForkCondition, NamedChain, DEV, GOERLI, + HOLESKY, MAINNET, SEPOLIA, }; #[cfg(feature = "zstd-codec")] pub use compression::*; diff --git a/examples/polygon-p2p/src/chain_cfg.rs b/examples/polygon-p2p/src/chain_cfg.rs index 5a1fadb534236..5860cdb1d39da 100644 --- a/examples/polygon-p2p/src/chain_cfg.rs +++ b/examples/polygon-p2p/src/chain_cfg.rs @@ -1,6 +1,5 @@ use reth_primitives::{ - b256, BaseFeeParams, Chain, ChainSpec, ForkCondition, ForkTimestamps, Hardfork, Head, - NodeRecord, B256, + b256, BaseFeeParams, Chain, ChainSpec, ForkCondition, Hardfork, Head, NodeRecord, B256, }; use std::{collections::BTreeMap, sync::Arc}; @@ -15,7 +14,6 @@ pub(crate) fn polygon_chain_spec() -> Arc { // genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), genesis_hash: Some(GENESIS), - fork_timestamps: ForkTimestamps::default().shanghai(1681338455), paris_block_and_final_difficulty: None, hardforks: BTreeMap::from([ (Hardfork::Petersburg, ForkCondition::Block(0)), From 4f002f6ef19a94d7e0adafabf1c6a1280b202459 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 09:30:44 +0200 Subject: [PATCH 435/700] chore(engine): introduce blockchain tree action (#8029) Co-authored-by: Matthias Seitz --- crates/consensus/beacon/src/engine/mod.rs | 165 +++++++++++++--------- 1 file changed, 100 insertions(+), 65 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 338a37f021102..839bb0278c90b 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -43,8 +43,8 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{ - mpsc, - mpsc::{UnboundedReceiver, UnboundedSender}, + mpsc::{self, UnboundedReceiver, UnboundedSender}, + oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; @@ -189,13 +189,11 @@ where payload_builder: PayloadBuilderHandle, /// Validator for execution payloads payload_validator: ExecutionPayloadValidator, - /// Listeners for engine events. - listeners: EventListeners, + /// Current blockchain tree action. + blockchain_tree_action: Option>, /// Tracks the header of invalid payloads that were rejected by the engine because they're /// invalid. invalid_headers: InvalidHeaderCache, - /// Consensus engine metrics. - metrics: EngineMetrics, /// After downloading a block corresponding to a recent forkchoice update, the engine will /// check whether or not we can connect the block to the current canonical chain. If we can't, /// we need to download and execute the missing parents of that block. @@ -209,6 +207,10 @@ where /// be used to download and execute the missing blocks. pipeline_run_threshold: u64, hooks: EngineHooksController, + /// Listeners for engine events. + listeners: EventListeners, + /// Consensus engine metrics. + metrics: EngineMetrics, } impl BeaconConsensusEngine @@ -305,11 +307,12 @@ where handle: handle.clone(), forkchoice_state_tracker: Default::default(), payload_builder, - listeners, invalid_headers: InvalidHeaderCache::new(MAX_INVALID_HEADERS), - metrics: EngineMetrics::default(), + blockchain_tree_action: None, pipeline_run_threshold, hooks: EngineHooksController::new(hooks), + listeners, + metrics: EngineMetrics::default(), }; let maybe_pipeline_target = match target { @@ -496,35 +499,38 @@ where &mut self, state: ForkchoiceState, attrs: Option, - ) -> Result { + tx: oneshot::Sender>, + ) { self.metrics.forkchoice_updated_messages.increment(1); self.blockchain.on_forkchoice_update_received(&state); trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - // Pre-validate forkchoice state update and return if it's invalid or - // cannot be processed at the moment. if let Some(on_updated) = self.pre_validate_forkchoice_update(state) { - return Ok(on_updated) + // Pre-validate forkchoice state update and return if it's invalid + // or cannot be processed at the moment. + self.on_forkchoice_updated_status(state, on_updated, tx); + } else { + self.blockchain_tree_action = + Some(BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx }); } - - let start = Instant::now(); - let make_canonical_result = self.blockchain.make_canonical(state.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - - let status = self.on_forkchoice_updated_make_canonical_result( - state, - attrs, - make_canonical_result, - elapsed, - )?; - trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); - Ok(status) } /// Called after the forkchoice update status has been resolved. /// Depending on the outcome, the method updates the sync state and notifies the listeners /// about new processed FCU. - fn on_forkchoice_updated_status(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { + fn on_forkchoice_updated_status( + &mut self, + state: ForkchoiceState, + on_updated: OnForkChoiceUpdated, + tx: oneshot::Sender>, + ) { + // send the response to the CL ASAP + let status = on_updated.forkchoice_status(); + let _ = tx.send(Ok(on_updated)); + + // update the forkchoice state tracker + self.forkchoice_state_tracker.set_latest(state, status); + match status { ForkchoiceStatus::Invalid => {} ForkchoiceStatus::Valid => { @@ -1491,17 +1497,17 @@ where fn on_sync_event( &mut self, event: EngineSyncEvent, - ) -> Result { + ) -> Result { let outcome = match event { EngineSyncEvent::FetchedFullBlock(block) => { self.on_downloaded_block(block); - SyncEventOutcome::Processed + EngineEventOutcome::Processed } EngineSyncEvent::PipelineStarted(target) => { trace!(target: "consensus::engine", ?target, continuous = target.is_none(), "Started the pipeline"); self.metrics.pipeline_runs.increment(1); self.sync_state_updater.update_sync_state(SyncState::Syncing); - SyncEventOutcome::Processed + EngineEventOutcome::Processed } EngineSyncEvent::PipelineFinished { result, reached_max_block } => { trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); @@ -1509,10 +1515,10 @@ where let ctrl = result?; if reached_max_block { // Terminate the sync early if it's reached the maximum user-configured block. - SyncEventOutcome::ReachedMaxBlock + EngineEventOutcome::ReachedMaxBlock } else { self.on_pipeline_outcome(ctrl)?; - SyncEventOutcome::Processed + EngineEventOutcome::Processed } } EngineSyncEvent::PipelineTaskDropped => { @@ -1669,6 +1675,45 @@ where Ok(()) } + + /// Process the outcome of blockchain tree action. + fn on_blockchain_tree_action( + &mut self, + action: BlockchainTreeAction, + ) -> RethResult { + match action { + BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx } => { + let start = Instant::now(); + let result = self.blockchain.make_canonical(state.head_block_hash); + let elapsed = self.record_make_canonical_latency(start, &result); + match self + .on_forkchoice_updated_make_canonical_result(state, attrs, result, elapsed) + { + Ok(on_updated) => { + trace!(target: "consensus::engine", status = ?on_updated, ?state, "Returning forkchoice status"); + let fcu_status = on_updated.forkchoice_status(); + self.on_forkchoice_updated_status(state, on_updated, tx); + + if fcu_status.is_valid() { + let tip_number = self.blockchain.canonical_tip().number; + if self.sync.has_reached_max_block(tip_number) { + // Terminate the sync early if it's reached + // the maximum user configured block. + return Ok(EngineEventOutcome::ReachedMaxBlock) + } + } + } + Err(error) => { + let _ = tx.send(Err(RethError::Canonical(error.clone()))); + if error.is_fatal() { + return Err(RethError::Canonical(error)) + } + } + }; + } + }; + Ok(EngineEventOutcome::Processed) + } } /// On initialization, the consensus engine will poll the message receiver and return @@ -1711,6 +1756,15 @@ where continue } + // Process any blockchain tree action result as set forth during engine message + // processing. + if let Some(action) = this.blockchain_tree_action.take() { + match this.on_blockchain_tree_action(action)? { + EngineEventOutcome::Processed => {} + EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), + }; + } + // Process one incoming message from the CL. We don't drain the messages right away, // because we want to sneak a polling of running hook in between them. // @@ -1719,34 +1773,7 @@ where if let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) { match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - match this.on_forkchoice_updated(state, payload_attrs) { - Ok(on_updated) => { - let fcu_status = on_updated.forkchoice_status(); - // update the forkchoice state tracker - this.forkchoice_state_tracker.set_latest(state, fcu_status); - // send the response to the CL ASAP - let _ = tx.send(Ok(on_updated)); - - if fcu_status.is_valid() { - let tip_number = this.blockchain.canonical_tip().number; - if this.sync.has_reached_max_block(tip_number) { - // Terminate the sync early if it's reached the - // maximum user configured block. - return Poll::Ready(Ok(())) - } - } - - this.on_forkchoice_updated_status(state, fcu_status); - } - Err(error) => { - if error.is_fatal() { - // fatal error, we can terminate the future - let _ = tx.send(Err(RethError::Canonical(error.clone()))); - return Poll::Ready(Err(RethError::Canonical(error).into())) - } - let _ = tx.send(Err(RethError::Canonical(error))); - } - }; + this.on_forkchoice_updated(state, payload_attrs, tx); } BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { this.metrics.new_payload_messages.increment(1); @@ -1770,9 +1797,9 @@ where if let Poll::Ready(sync_event) = this.sync.poll(cx) { match this.on_sync_event(sync_event)? { // Sync event was successfully processed - SyncEventOutcome::Processed => (), + EngineEventOutcome::Processed => (), // Max block has been reached, exit the engine loop - SyncEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), + EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), } // this could have taken a while, so we start the next cycle to handle any new @@ -1806,12 +1833,20 @@ where } } -/// Represents outcomes of processing a sync event +enum BlockchainTreeAction { + FcuMakeCanonical { + state: ForkchoiceState, + attrs: Option, + tx: oneshot::Sender>, + }, +} + +/// Represents outcomes of processing an engine event #[derive(Debug)] -enum SyncEventOutcome { - /// Sync event was processed successfully, engine should continue. +enum EngineEventOutcome { + /// Engine event was processed successfully, engine should continue. Processed, - /// Sync event was processed successfully and reached max block. + /// Engine event was processed successfully and reached max block. ReachedMaxBlock, } From bb7f1135d030575ff2a315d091e7f10b682c0ae4 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Thu, 2 May 2024 05:32:46 -0400 Subject: [PATCH 436/700] feat: write pruning config if --full is present (#7938) --- Cargo.lock | 1 + crates/config/Cargo.toml | 4 +- crates/config/src/config.rs | 31 ++++++++++-- crates/node/builder/Cargo.toml | 3 ++ crates/node/builder/src/launch/common.rs | 61 +++++++++++++++++++++++- 5 files changed, 95 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1ee55cb34577..a6d7a78841eff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7192,6 +7192,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", + "tempfile", "tokio", ] diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index ece3fa0bb8cc4..d9147d7b7a7cb 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -24,7 +24,9 @@ humantime-serde.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } -[dev-dependencies] +# toml confy.workspace = true + +[dev-dependencies] tempfile.workspace = true toml.workspace = true diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 7ce947b508689..f6537a04c79f7 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -6,10 +6,13 @@ use reth_primitives::PruneModes; use secp256k1::SecretKey; use serde::{Deserialize, Deserializer, Serialize}; use std::{ + ffi::OsStr, path::{Path, PathBuf}, time::Duration, }; +const EXTENSION: &str = "toml"; + /// Configuration for the reth node. #[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq, Serialize)] #[serde(default)] @@ -47,6 +50,22 @@ impl Config { .peer_config(peer_config) .discovery(discv4) } + + /// Save the configuration to toml file. + pub fn save(&self, path: &Path) -> Result<(), std::io::Error> { + if path.extension() != Some(OsStr::new(EXTENSION)) { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!("reth config file extension must be '{EXTENSION}'"), + )); + } + confy::store_path(path, self).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) + } + + /// Sets the pruning configuration. + pub fn update_prune_confing(&mut self, prune_config: PruneConfig) { + self.prune = Some(prune_config); + } } /// Configuration for each stage in the pipeline. @@ -325,11 +344,9 @@ where #[cfg(test)] mod tests { - use super::Config; + use super::{Config, EXTENSION}; use std::time::Duration; - const EXTENSION: &str = "toml"; - fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { let temp_dir = tempfile::tempdir().unwrap(); let config_path = temp_dir.path().join(filename).with_extension(EXTENSION); @@ -347,6 +364,14 @@ mod tests { }) } + #[test] + fn test_store_config_method() { + with_tempdir("config-store-test-method", |config_path| { + let config = Config::default(); + config.save(config_path).expect("Failed to store config"); + }) + } + #[test] fn test_load_config() { with_tempdir("config-load-test", |config_path| { diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 270b0dfe5b634..ef671f1276e00 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -54,3 +54,6 @@ eyre.workspace = true fdlimit.workspace = true confy.workspace = true rayon.workspace = true + +[dev-dependencies] +tempfile.workspace = true diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 63060f64759ff..043b587b891b7 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -22,7 +22,7 @@ use reth_prune::PrunerBuilder; use reth_rpc::JwtSecret; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{error, info}; +use reth_tracing::tracing::{error, info, warn}; /// Reusable setup for launching a node. /// @@ -66,6 +66,8 @@ impl LaunchContext { let mut toml_config = confy::load_path::(&config_path) .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; + Self::save_pruning_config_if_full_node(&mut toml_config, config, &config_path)?; + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); // Update the config with the command line arguments @@ -81,6 +83,24 @@ impl LaunchContext { Ok(toml_config) } + /// Save prune config to the toml file if node is a full node. + fn save_pruning_config_if_full_node( + reth_config: &mut reth_config::Config, + config: &NodeConfig, + config_path: impl AsRef, + ) -> eyre::Result<()> { + if reth_config.prune.is_none() { + if let Some(prune_config) = config.prune_config() { + reth_config.update_prune_confing(prune_config); + info!(target: "reth::cli", "Saving prune config to toml file"); + reth_config.save(config_path.as_ref())?; + } + } else if config.prune_config().is_none() { + warn!(target: "reth::cli", "Prune configs present in config file but --full not provided. Running as a Full node"); + } + Ok(()) + } + /// Convenience function to [Self::configure_globals] pub fn with_configured_globals(self) -> Self { self.configure_globals(); @@ -456,3 +476,42 @@ pub struct WithConfigs { /// The loaded reth.toml config. pub toml_config: reth_config::Config, } + +#[cfg(test)] +mod tests { + use super::{LaunchContext, NodeConfig}; + use reth_config::Config; + use reth_node_core::args::PruningArgs; + + const EXTENSION: &str = "toml"; + + fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { + let temp_dir = tempfile::tempdir().unwrap(); + let config_path = temp_dir.path().join(filename).with_extension(EXTENSION); + proc(&config_path); + temp_dir.close().unwrap() + } + + #[test] + fn test_save_prune_config() { + with_tempdir("prune-store-test", |config_path| { + let mut reth_config = Config::default(); + let node_config = + NodeConfig { pruning: PruningArgs { full: true }, ..NodeConfig::test() }; + LaunchContext::save_pruning_config_if_full_node( + &mut reth_config, + &node_config, + config_path, + ) + .unwrap(); + + assert_eq!( + reth_config.prune.as_ref().map(|p| p.block_interval), + node_config.prune_config().map(|p| p.block_interval) + ); + + let loaded_config: Config = confy::load_path(config_path).unwrap(); + assert_eq!(reth_config, loaded_config); + }) + } +} From 978be33a9954537b48e5cff1ee53b24a47392341 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 2 May 2024 12:49:54 +0200 Subject: [PATCH 437/700] chore(deps): rm builder dep (#8043) --- Cargo.lock | 2 +- crates/blockchain-tree/Cargo.toml | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6d7a78841eff..2840074de7df6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6453,9 +6453,9 @@ dependencies = [ "parking_lot 0.12.2", "reth-consensus", "reth-db", + "reth-evm-ethereum", "reth-interfaces", "reth-metrics", - "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-revm", diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 1757b29391568..ecb2e4ef3631e 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -41,7 +41,7 @@ reth-interfaces = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true , features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-revm.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true parking_lot.workspace = true assert_matches.workspace = true diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 09f829c7e9917..fc9e7685a1c35 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1273,7 +1273,7 @@ mod tests { use linked_hash_set::LinkedHashSet; use reth_consensus::test_utils::TestConsensus; use reth_db::{tables, test_utils::TempDatabase, transaction::DbTxMut, DatabaseEnv}; - use reth_node_ethereum::EthEvmConfig; + use reth_evm_ethereum::EthEvmConfig; #[cfg(not(feature = "optimism"))] use reth_primitives::proofs::calculate_receipt_root; #[cfg(feature = "optimism")] From 10ef202d7cea0a46189520413819955abcdb66de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?raster=20=E2=96=A6?= <102927511+raster21@users.noreply.github.com> Date: Thu, 2 May 2024 14:56:21 +0400 Subject: [PATCH 438/700] chore: update 1.0 release ETA to May (#8040) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3f5e434eee88f..47d8337126d56 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ We actively recommend professional node operators to switch to Reth in productio While we are aware of parties running Reth staking nodes in production, we do *not* encourage usage in production staking environments by non-professionals until our audits are done, and the 1.0 version of Reth is released, but we are available to support without warranty or liability. More historical context below: -* We are releasing 1.0 "production-ready" stable Reth once our Reth & Revm audits are done. ETA ~April 2024. +* We are releasing 1.0 "production-ready" stable Reth once our Reth & Revm audits are done. ETA ~May 2024. * Reth is currently undergoing an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. * Revm (the EVM used in Reth) is undergoing an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). * We are releasing [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4th 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives. From 7428573d7c5e9a215ee6a20beeb33f1e8ba4e98e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 2 May 2024 13:16:22 +0200 Subject: [PATCH 439/700] feat(discv5): bootstrap cli (#8004) --- Cargo.lock | 1 + bin/reth/src/commands/p2p/mod.rs | 12 +++++- crates/net/discv5/src/config.rs | 55 +++++++++++++++++++++++++++- crates/net/discv5/src/lib.rs | 40 +++++++++++--------- crates/node-core/Cargo.toml | 1 + crates/node-core/src/args/network.rs | 24 ++++++++++++ crates/node-core/src/node_config.rs | 12 +++++- 7 files changed, 125 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2840074de7df6..3110f8ff0f655 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7224,6 +7224,7 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-discv4", + "reth-discv5", "reth-engine-primitives", "reth-evm", "reth-interfaces", diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index 35d111e57d2fe..1cc5d4f880c37 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -138,7 +138,14 @@ impl Command { if self.discovery.enable_discv5_discovery { network_config = network_config.discovery_v5_with_config_builder(|builder| { - let DiscoveryArgs { discv5_addr, discv5_port, .. } = self.discovery; + let DiscoveryArgs { + discv5_addr, + discv5_port, + discv5_lookup_interval, + discv5_bootstrap_lookup_interval, + discv5_bootstrap_lookup_countdown, + .. + } = self.discovery; builder .discv5_config( discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( @@ -147,6 +154,9 @@ impl Command { )))) .build(), ) + .lookup_interval(discv5_lookup_interval) + .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) + .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) .build() }); } diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 371d40953f5ba..3a506902ed8e4 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -16,7 +16,18 @@ use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkS /// Default interval in seconds at which to run a lookup up query. /// /// Default is 60 seconds. -const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; +pub const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; + +/// Default number of times to do pulse lookup queries, at bootstrap (pulse intervals, defaulting +/// to 5 seconds). +/// +/// Default is 100 counts. +pub const DEFAULT_COUNT_BOOTSTRAP_LOOKUPS: u64 = 100; + +/// Default duration of look up interval, for pulse look ups at bootstrap. +/// +/// Default is 5 seconds. +pub const DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL: u64 = 5; /// Builds a [`Config`]. #[derive(Debug, Default)] @@ -39,6 +50,11 @@ pub struct ConfigBuilder { other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query to populate kbuckets. lookup_interval: Option, + /// Interval in seconds at which to run pulse lookup queries at bootstrap to boost kbucket + /// population. + bootstrap_lookup_interval: Option, + /// Number of times to run boost lookup queries at start up. + bootstrap_lookup_countdown: Option, /// Custom filter rules to apply to a discovered peer in order to determine if it should be /// passed up to rlpx or dropped. discovered_peer_filter: Option, @@ -54,6 +70,8 @@ impl ConfigBuilder { tcp_port, other_enr_kv_pairs, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, } = discv5_config; @@ -64,6 +82,8 @@ impl ConfigBuilder { tcp_port, other_enr_kv_pairs, lookup_interval: Some(lookup_interval), + bootstrap_lookup_interval: Some(bootstrap_lookup_interval), + bootstrap_lookup_countdown: Some(bootstrap_lookup_countdown), discovered_peer_filter: Some(discovered_peer_filter), } } @@ -135,6 +155,26 @@ impl ConfigBuilder { self } + /// Sets the interval at which to run lookup queries, in order to fill kbuckets. Lookup queries + /// are done periodically at the given interval for the whole run of the program. + pub fn lookup_interval(mut self, seconds: u64) -> Self { + self.lookup_interval = Some(seconds); + self + } + + /// Sets the interval at which to run boost lookup queries at start up. Queries will be started + /// at this interval for the configured number of times after start up. + pub fn bootstrap_lookup_interval(mut self, seconds: u64) -> Self { + self.bootstrap_lookup_interval = Some(seconds); + self + } + + /// Sets the the number of times at which to run boost lookup queries to bootstrap the node. + pub fn bootstrap_lookup_countdown(mut self, counts: u64) -> Self { + self.bootstrap_lookup_countdown = Some(counts); + self + } + /// Adds keys to disallow when filtering a discovered peer, to determine whether or not it /// should be passed to rlpx. The discovered node record is scanned for any kv-pairs where the /// key matches the disallowed keys. If not explicitly set, b"eth2" key will be disallowed. @@ -154,6 +194,8 @@ impl ConfigBuilder { tcp_port, other_enr_kv_pairs, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, } = self; @@ -163,6 +205,10 @@ impl ConfigBuilder { let fork = fork.map(|(key, fork_id)| (key, fork_id.into())); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); + let bootstrap_lookup_interval = + bootstrap_lookup_interval.unwrap_or(DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL); + let bootstrap_lookup_countdown = + bootstrap_lookup_countdown.unwrap_or(DEFAULT_COUNT_BOOTSTRAP_LOOKUPS); let discovered_peer_filter = discovered_peer_filter .unwrap_or_else(|| MustNotIncludeKeys::new(&[NetworkStackId::ETH2])); @@ -174,6 +220,8 @@ impl ConfigBuilder { tcp_port, other_enr_kv_pairs, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, } } @@ -197,6 +245,11 @@ pub struct Config { pub(super) other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query with to populate kbuckets. pub(super) lookup_interval: u64, + /// Interval in seconds at which to run pulse lookup queries at bootstrap to boost kbucket + /// population. + pub(super) bootstrap_lookup_interval: u64, + /// Number of times to run boost lookup queries at start up. + pub(super) bootstrap_lookup_countdown: u64, /// Custom filter rules to apply to a discovered peer in order to determine if it should be /// passed up to rlpx or dropped. pub(super) discovered_peer_filter: MustNotIncludeKeys, diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 14793fab056bc..b8b2eab242542 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -37,7 +37,10 @@ pub mod network_stack_id; pub use discv5::{self, IpMode}; -pub use config::{BootNode, Config, ConfigBuilder}; +pub use config::{ + BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, + DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, +}; pub use enr::enr_to_discv4_id; pub use error::Error; pub use filter::{FilterOutcome, MustNotIncludeKeys}; @@ -45,17 +48,6 @@ pub use network_stack_id::NetworkStackId; use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; -/// Default number of times to do pulse lookup queries, at bootstrap (pulse intervals, defaulting -/// to 5 seconds). -/// -/// Default is 100 counts. -pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; - -/// Default duration of look up interval, for pulse look ups at bootstrap. -/// -/// Default is 5 seconds. -pub const DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL: u64 = 5; - /// Max kbucket index is 255. /// /// This is the max log2distance for 32 byte [`NodeId`](discv5::enr::NodeId) - 1. See . @@ -180,7 +172,13 @@ impl Discv5 { // 2. start discv5 // let Config { - discv5_config, bootstrap_nodes, lookup_interval, discovered_peer_filter, .. + discv5_config, + bootstrap_nodes, + lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, + discovered_peer_filter, + .. } = discv5_config; let EnrCombinedKeyWrapper(enr) = enr.into(); @@ -206,7 +204,13 @@ impl Discv5 { // // 4. start bg kbuckets maintenance // - Self::spawn_populate_kbuckets_bg(lookup_interval, metrics.clone(), discv5.clone()); + Self::spawn_populate_kbuckets_bg( + lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, + metrics.clone(), + discv5.clone(), + ); Ok(( Self { discv5, ip_mode, fork_key, discovered_peer_filter, metrics }, @@ -319,6 +323,8 @@ impl Discv5 { /// Backgrounds regular look up queries, in order to keep kbuckets populated. fn spawn_populate_kbuckets_bg( lookup_interval: u64, + bootstrap_lookup_interval: u64, + bootstrap_lookup_countdown: u64, metrics: Discv5Metrics, discv5: Arc, ) { @@ -327,18 +333,18 @@ impl Discv5 { let lookup_interval = Duration::from_secs(lookup_interval); let metrics = metrics.discovered_peers; let mut kbucket_index = MAX_KBUCKET_INDEX; - let pulse_lookup_interval = Duration::from_secs(DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL); + let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval); // todo: graceful shutdown async move { // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest // log2distance from local node - for i in (0..DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP).rev() { + for i in (0..bootstrap_lookup_countdown).rev() { let target = discv5::enr::NodeId::random(); trace!(target: "net::discv5", %target, - bootstrap_boost_runs_count_down=i, + bootstrap_boost_runs_countdown=i, lookup_interval=format!("{:#?}", pulse_lookup_interval), "starting bootstrap boost lookup query" ); diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 4bce2908da751..157b44970a8c7 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -27,6 +27,7 @@ reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true reth-discv4.workspace = true +reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-api.workspace = true reth-evm.workspace = true diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 59dc6ceba6232..df6f8ece8a54d 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -7,6 +7,10 @@ use reth_discv4::{ DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT, DEFAULT_DISCOVERY_V5_ADDR, DEFAULT_DISCOVERY_V5_PORT, }; +use reth_discv5::{ + DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, + DEFAULT_SECONDS_LOOKUP_INTERVAL, +}; use reth_net_nat::NatResolver; use reth_network::{ transactions::{ @@ -235,6 +239,23 @@ pub struct DiscoveryArgs { #[arg(id = "discovery.v5.port", long = "discovery.v5.port", value_name = "DISCOVERY_V5_PORT", default_value_t = DEFAULT_DISCOVERY_V5_PORT)] pub discv5_port: u16, + + /// The interval in seconds at which to carry out periodic lookup queries, for the whole + /// run of the program. + #[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", + default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)] + pub discv5_lookup_interval: u64, + + /// The interval in seconds at which to carry out boost lookup queries, for a fixed number of + /// times, at bootstrap. + #[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval", + default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)] + pub discv5_bootstrap_lookup_interval: u64, + + /// The number of times to carry out boost lookup queries at bootstrap. + #[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown", + default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)] + pub discv5_bootstrap_lookup_countdown: u64, } impl DiscoveryArgs { @@ -278,6 +299,9 @@ impl Default for DiscoveryArgs { port: DEFAULT_DISCOVERY_PORT, discv5_addr: DEFAULT_DISCOVERY_V5_ADDR, discv5_port: DEFAULT_DISCOVERY_V5_PORT, + discv5_lookup_interval: DEFAULT_SECONDS_LOOKUP_INTERVAL, + discv5_bootstrap_lookup_interval: DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, + discv5_bootstrap_lookup_countdown: DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, } } } diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 411a8b447fb6f..3f149a824c5b3 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -476,7 +476,14 @@ impl NodeConfig { // work around since discv5 config builder can't be integrated into network config builder // due to unsatisfied trait bounds config.discovery_v5_with_config_builder(|builder| { - let DiscoveryArgs { discv5_addr, discv5_port, .. } = self.network.discovery; + let DiscoveryArgs { + discv5_addr, + discv5_port, + discv5_lookup_interval, + discv5_bootstrap_lookup_interval, + discv5_bootstrap_lookup_countdown, + .. + } = self.network.discovery; builder .discv5_config( discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( @@ -485,6 +492,9 @@ impl NodeConfig { )))) .build(), ) + .lookup_interval(discv5_lookup_interval) + .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) + .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) .build() }) } From aba48a5505e027295c9ebf996df7cbb2a3bd81d7 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 2 May 2024 13:02:51 +0100 Subject: [PATCH 440/700] perf: add `ETL` to `init_from_state_dump` (#8022) --- Cargo.lock | 127 +++++++-------- bin/reth/src/commands/init_state.rs | 10 +- crates/node-core/Cargo.toml | 2 + crates/node-core/src/init.rs | 148 ++++++++++++------ crates/storage/codecs/Cargo.toml | 3 +- .../codecs/src/alloy/genesis_account.rs | 67 ++++++++ crates/storage/codecs/src/alloy/mod.rs | 1 + .../storage/db/src/tables/codecs/compact.rs | 4 +- .../src/bundle_state/state_reverts.rs | 24 +-- 9 files changed, 251 insertions(+), 135 deletions(-) create mode 100644 crates/storage/codecs/src/alloy/genesis_account.rs diff --git a/Cargo.lock b/Cargo.lock index 3110f8ff0f655..0a831ae808976 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -514,7 +514,7 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-json-rpc", - "base64 0.22.0", + "base64 0.22.1", "futures-util", "futures-utils-wasm", "serde", @@ -550,7 +550,7 @@ dependencies = [ "arbitrary", "derive_arbitrary", "derive_more", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "nybbles", "proptest", "proptest-derive", @@ -1006,9 +1006,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -1220,7 +1220,7 @@ dependencies = [ "cfg-if", "dashmap", "fast-float", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "icu_normalizer", "indexmap 2.2.6", "intrusive-collections", @@ -1255,7 +1255,7 @@ checksum = "c055ef3cd87ea7db014779195bc90c6adfc35de4902e3b2fe587adecbd384578" dependencies = [ "boa_macros", "boa_profiler", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "thin-vec", ] @@ -1267,7 +1267,7 @@ checksum = "0cacc9caf022d92195c827a3e5bf83f96089d4bfaff834b359ac7b6be46e9187" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "indexmap 2.2.6", "once_cell", "phf", @@ -1479,9 +1479,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" +checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" dependencies = [ "jobserver", "libc", @@ -2248,7 +2248,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core 0.9.10", @@ -2256,15 +2256,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2272,9 +2272,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" dependencies = [ "data-encoding", "syn 1.0.109", @@ -3015,9 +3015,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "miniz_oxide", @@ -3364,9 +3364,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -3379,7 +3379,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -3388,7 +3388,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -3626,7 +3626,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3712,7 +3712,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower", "tower-service", @@ -4032,7 +4032,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "serde", ] @@ -4127,7 +4127,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg 0.50.0", @@ -4204,9 +4204,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b0e68d9af1f066c06d6e2397583795b912d78537d7d907c561e82c13d69fa1" +checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4222,9 +4222,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92f254f56af1ae84815b9b1325094743dcf05b92abb5e94da2e81a35cff0cada" +checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" dependencies = [ "futures-channel", "futures-util", @@ -4246,9 +4246,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "274d68152c24aa78977243bb56f28d7946e6aa309945b37d33174a3f92d89a3a" +checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" dependencies = [ "anyhow", "async-trait", @@ -4272,9 +4272,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac13bc1e44cd00448a5ff485824a128629c945f02077804cb659c07a0ba41395" +checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" dependencies = [ "async-trait", "hyper 0.14.28", @@ -4292,9 +4292,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c326f9e95aeff7d707b2ffde72c22a52acc975ba1c48587776c02b90c4747a6" +checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0" dependencies = [ "heck 0.4.1", "proc-macro-crate 3.1.0", @@ -4305,9 +4305,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b5bfbda5f8fb63f997102fd18f73e35e34c84c6dcdbdbbe72c6e48f6d2c959b" +checksum = "12d8b6a9674422a8572e0b0abb12feeb3f2aeda86528c80d0350c2bd0923ab41" dependencies = [ "futures-util", "http 0.2.12", @@ -4329,9 +4329,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dc828e537868d6b12bbb07ec20324909a22ced6efca0057c825c3e1126b2c6d" +checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" dependencies = [ "anyhow", "beef", @@ -4342,9 +4342,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cf8dcee48f383e24957e238240f997ec317ba358b4e6d2e8be3f745bcdabdb5" +checksum = "f448d8eacd945cc17b6c0b42c361531ca36a962ee186342a97cdb8fca679cd77" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4353,9 +4353,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f00abe918bf34b785f87459b9205790e5361a3f7437adb50e928dc243f27eb" +checksum = "58b9db2dfd5bb1194b0ce921504df9ceae210a345bc2f6c5a61432089bbab070" dependencies = [ "http 0.2.12", "jsonrpsee-client-transport", @@ -4443,9 +4443,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] name = "libffi" @@ -4727,7 +4727,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -5476,7 +5476,7 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "serde", ] @@ -6178,7 +6178,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", "memchr", ] @@ -6226,7 +6226,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "bytes", "futures-core", "futures-util", @@ -6481,6 +6481,7 @@ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ "alloy-eips", + "alloy-genesis", "alloy-primitives", "arbitrary", "bytes", @@ -7220,12 +7221,14 @@ dependencies = [ "proptest", "rand 0.8.5", "reth-beacon-consensus", + "reth-codecs", "reth-config", "reth-consensus-common", "reth-db", "reth-discv4", "reth-discv5", "reth-engine-primitives", + "reth-etl", "reth-evm", "reth-interfaces", "reth-metrics", @@ -8003,7 +8006,7 @@ dependencies = [ "derive_more", "dyn-clone", "enumn", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "hex", "once_cell", "serde", @@ -8088,9 +8091,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1c77081a55300e016cb86f2864415b7518741879db925b8d488a0ee0d2da6bf" +checksum = "b26f4c25a604fcb3a1bcd96dd6ba37c93840de95de8198d94c0d571a74a804d1" dependencies = [ "bytemuck", "byteorder", @@ -8285,7 +8288,7 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "rustls-pki-types", ] @@ -8582,11 +8585,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c85f8e96d1d6857f13768fcbd895fcb06225510022a2774ed8b5150581847b0" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -8600,9 +8603,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8b3a576c4eb2924262d5951a3b737ccaf16c931e39a2810c36f9a7e25575557" +checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ "darling 0.20.8", "proc-macro2", @@ -8848,9 +8851,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -9359,7 +9362,7 @@ dependencies = [ "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs index fa70264e55afb..e0558be321b2e 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/bin/reth/src/commands/init_state.rs @@ -8,6 +8,7 @@ use crate::{ dirs::{DataDirPath, MaybePlatformPath}, }; use clap::Parser; +use reth_config::config::EtlConfig; use reth_db::{database::Database, init_db}; use reth_node_core::init::{init_from_state_dump, init_genesis}; use reth_primitives::{ChainSpec, B256}; @@ -78,11 +79,15 @@ impl InitStateCommand { info!(target: "reth::cli", "Database opened"); let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?; + let etl_config = EtlConfig::new( + Some(EtlConfig::from_datadir(data_dir.data_dir())), + EtlConfig::default_file_size(), + ); info!(target: "reth::cli", "Writing genesis block"); let hash = match self.state { - Some(path) => init_at_state(path, provider_factory)?, + Some(path) => init_at_state(path, provider_factory, etl_config)?, None => init_genesis(provider_factory)?, }; @@ -95,6 +100,7 @@ impl InitStateCommand { pub fn init_at_state( state_dump_path: PathBuf, factory: ProviderFactory, + etl_config: EtlConfig, ) -> eyre::Result { info!(target: "reth::cli", path=?state_dump_path, @@ -103,5 +109,5 @@ pub fn init_at_state( let file = File::open(state_dump_path)?; let reader = BufReader::new(file); - init_from_state_dump(reader, factory) + init_from_state_dump(reader, factory, etl_config) } diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 157b44970a8c7..3caf5d9d1a60f 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -36,6 +36,8 @@ reth-tasks.workspace = true reth-trie.workspace = true reth-consensus-common.workspace = true reth-beacon-consensus.workspace = true +reth-etl.workspace = true +reth-codecs.workspace = true # ethereum discv5.workspace = true diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 92b9f5696efe7..8a7751e4e9173 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -1,10 +1,13 @@ //! Reth genesis initialization utility functions. +use reth_codecs::Compact; +use reth_config::config::EtlConfig; use reth_db::{ database::Database, tables, transaction::{DbTx, DbTxMut}, }; +use reth_etl::Collector; use reth_interfaces::{db::DatabaseError, provider::ProviderResult}; use reth_primitives::{ stage::StageId, Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, @@ -293,10 +296,16 @@ pub fn insert_genesis_header( Ok(()) } -/// Initialize chain with state at specific block, from reader of state dump. +/// Reads account state from a [`BufRead`] reader and initializes it at the highest block that can +/// be found on database. +/// +/// It's similar to [`init_genesis`] but supports importing state too big to fit in memory, and can +/// be set to the highest block present. One practical usecase is to import OP mainnet state at +/// bedrock transition block. pub fn init_from_state_dump( mut reader: impl BufRead, factory: ProviderFactory, + etl_config: EtlConfig, ) -> eyre::Result { let block = factory.last_block_number()?; let hash = factory.block_hash(block)?.unwrap(); @@ -307,47 +316,115 @@ pub fn init_from_state_dump( "Initializing state at block" ); - let mut total_inserted_accounts = 0; - let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP); - let mut chunk_total_byte_len = 0; - let mut line = String::new(); - // first line can be state root, then it can be used for verifying against computed state root + let expected_state_root = parse_state_root(&mut reader)?; + + // remaining lines are accounts + let collector = parse_accounts(&mut reader, etl_config)?; + + // write state to db + let mut provider_rw = factory.provider_rw()?; + dump_state(collector, &mut provider_rw, block)?; + + // compute and compare state root. this advances the stage checkpoints. + let computed_state_root = compute_state_root(&provider_rw)?; + if computed_state_root != expected_state_root { + error!(target: "reth::cli", + ?computed_state_root, + ?expected_state_root, + "Computed state root does not match state root in state dump" + ); + + Err(InitDatabaseError::SateRootMismatch { expected_state_root, computed_state_root })? + } else { + info!(target: "reth::cli", + ?computed_state_root, + "Computed state root matches state root in state dump" + ); + } + + provider_rw.commit()?; + + Ok(hash) +} + +/// Parses and returns expected state root. +fn parse_state_root(reader: &mut impl BufRead) -> eyre::Result { + let mut line = String::new(); reader.read_line(&mut line)?; - let expected_state_root = serde_json::from_str::(&line)?.root; + let expected_state_root = serde_json::from_str::(&line)?.root; trace!(target: "reth::cli", root=%expected_state_root, "Read state root from file" ); + Ok(expected_state_root) +} - line.clear(); +/// Parses accounts and pushes them to a [`Collector`]. +fn parse_accounts( + mut reader: impl BufRead, + etl_config: EtlConfig, +) -> Result, eyre::Error> { + let mut line = String::new(); + let mut collector = Collector::new(etl_config.file_size, etl_config.dir); - // remaining lines are accounts - let mut provider_rw = factory.provider_rw()?; while let Ok(n) = reader.read_line(&mut line) { - chunk_total_byte_len += n; - if DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK <= chunk_total_byte_len || n == 0 { - // acc + if n == 0 { + break; + } + + let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?; + collector.insert(address, genesis_account)?; + + if !collector.is_empty() && collector.len() % AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP == 0 + { + info!(target: "reth::cli", + parsed_new_accounts=collector.len(), + ); + } + + line.clear(); + } + + Ok(collector) +} + +/// Takes a [`Collector`] and processes all accounts. +fn dump_state( + mut collector: Collector, + provider_rw: &mut DatabaseProviderRW, + block: u64, +) -> Result<(), eyre::Error> { + let accounts_len = collector.len(); + let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP); + let mut total_inserted_accounts = 0; + + for (index, entry) in collector.iter()?.enumerate() { + let (address, account) = entry?; + let (address, _) = Address::from_compact(address.as_slice(), address.len()); + let (account, _) = GenesisAccount::from_compact(account.as_slice(), account.len()); + + accounts.push((address, account)); + + if (index > 0 && index % AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP == 0) || + index == accounts_len - 1 + { total_inserted_accounts += accounts.len(); info!(target: "reth::cli", - chunk_total_byte_len, - parsed_new_accounts=accounts.len(), total_inserted_accounts, "Writing accounts to db" ); - // reset - chunk_total_byte_len = 0; - // use transaction to insert genesis header insert_genesis_hashes( - &provider_rw, + provider_rw, accounts.iter().map(|(address, account)| (address, account)), )?; + insert_history( - &provider_rw, + provider_rw, accounts.iter().map(|(address, account)| (address, account)), block, )?; @@ -363,37 +440,8 @@ pub fn init_from_state_dump( accounts.clear(); } - - if n == 0 { - break; - } - - let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?; - accounts.push((address, genesis_account)); - - line.clear(); } - - // compute and compare state root. this advances the stage checkpoints. - let computed_state_root = compute_state_root(&provider_rw)?; - if computed_state_root != expected_state_root { - error!(target: "reth::cli", - ?computed_state_root, - ?expected_state_root, - "Computed state root does not match state root in state dump" - ); - - Err(InitDatabaseError::SateRootMismatch { expected_state_root, computed_state_root })? - } else { - info!(target: "reth::cli", - ?computed_state_root, - "Computed state root matches state root in state dump" - ); - } - - provider_rw.commit()?; - - Ok(hash) + Ok(()) } /// Computes the state root (from scratch) based on the accounts and storages present in the diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index ab8f1a3232866..958ccf9174eaa 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -16,6 +16,7 @@ reth-codecs-derive = { path = "./derive", default-features = false } # eth alloy-eips = { workspace = true, optional = true } +alloy-genesis = { workspace = true, optional = true } alloy-primitives.workspace = true # misc @@ -36,5 +37,5 @@ proptest-derive.workspace = true [features] default = ["std", "alloy"] std = ["alloy-primitives/std", "bytes/std"] -alloy = ["dep:alloy-eips", "dep:modular-bitfield"] +alloy = ["dep:alloy-eips", "dep:alloy-genesis", "dep:modular-bitfield"] optimism = ["reth-codecs-derive/optimism"] diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs new file mode 100644 index 0000000000000..619d9db517cce --- /dev/null +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -0,0 +1,67 @@ +use crate::Compact; +use alloy_genesis::GenesisAccount as AlloyGenesisAccount; +use alloy_primitives::{Bytes, B256, U256}; +use reth_codecs_derive::main_codec; + +/// GenesisAccount acts as bridge which simplifies Compact implementation for AlloyGenesisAccount. +/// +/// Notice: Make sure this struct is 1:1 with `alloy_genesis::GenesisAccount` +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct GenesisAccount { + /// The nonce of the account at genesis. + nonce: Option, + /// The balance of the account at genesis. + balance: U256, + /// The account's bytecode at genesis. + code: Option, + /// The account's storage at genesis. + storage: Option, + /// The account's private key. Should only be used for testing. + private_key: Option, +} + +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct StorageEntries { + entries: Vec, +} + +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct StorageEntry { + key: B256, + value: B256, +} + +impl Compact for AlloyGenesisAccount { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let account = GenesisAccount { + nonce: self.nonce, + balance: self.balance, + code: self.code, + storage: self.storage.map(|s| StorageEntries { + entries: s.into_iter().map(|(key, value)| StorageEntry { key, value }).collect(), + }), + private_key: self.private_key, + }; + account.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (account, _) = GenesisAccount::from_compact(buf, len); + let alloy_account = AlloyGenesisAccount { + nonce: account.nonce, + balance: account.balance, + code: account.code, + storage: account + .storage + .map(|s| s.entries.into_iter().map(|entry| (entry.key, entry.value)).collect()), + private_key: account.private_key, + }; + (alloy_account, buf) + } +} diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index aff164642586b..664ab26077cde 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,4 +1,5 @@ mod access_list; +mod genesis_account; mod log; mod txkind; mod withdrawal; diff --git a/crates/storage/db/src/tables/codecs/compact.rs b/crates/storage/db/src/tables/codecs/compact.rs index 452f5c6324474..aed8d97efee93 100644 --- a/crates/storage/db/src/tables/codecs/compact.rs +++ b/crates/storage/db/src/tables/codecs/compact.rs @@ -50,7 +50,9 @@ impl_compression_for_compact!( CompactU256, StageCheckpoint, PruneCheckpoint, - ClientVersion + ClientVersion, + // Non-DB + GenesisAccount ); macro_rules! impl_compression_fixed_compact { diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 006f87b4053c2..cc16a50ccabd4 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -1,6 +1,6 @@ use rayon::slice::ParallelSliceMut; use reth_db::{ - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, + cursor::{DbCursorRO, DbDupCursorRO, DbDupCursorRW}, models::{AccountBeforeTx, BlockNumberAddress}, tables, transaction::{DbTx, DbTxMut}, @@ -75,30 +75,16 @@ impl StateReverts { tracing::trace!(target: "provider::reverts", "Writing account changes"); let mut account_changeset_cursor = tx.cursor_dup_write::()?; - // append entries if key is new - let should_append_accounts = - account_changeset_cursor.last()?.map_or(true, |(block_number, _)| { - block_number < first_block || block_number == first_block && block_number == 0 - }); for (block_index, mut account_block_reverts) in self.0.accounts.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; // Sort accounts by address. account_block_reverts.par_sort_by_key(|a| a.0); for (address, info) in account_block_reverts { - if should_append_accounts { - account_changeset_cursor.append_dup( - block_number, - AccountBeforeTx { address, info: info.map(into_reth_acc) }, - )?; - } else { - // upsert on dupsort tables will append to subkey. see implementation of - // DbCursorRW::upsert for reth_db::implementation::mdbx::cursor::Cursor - account_changeset_cursor.upsert( - block_number, - AccountBeforeTx { address, info: info.map(into_reth_acc) }, - )?; - } + account_changeset_cursor.append_dup( + block_number, + AccountBeforeTx { address, info: info.map(into_reth_acc) }, + )?; } } From f6649c31b2947a0be9807ff3e21a4827553df3fb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 2 May 2024 14:43:37 +0100 Subject: [PATCH 441/700] docs(libmdbx): `static_files` -> `snapshots` (#8046) --- crates/storage/libmdbx-rs/src/environment.rs | 2 +- crates/storage/libmdbx-rs/src/flags.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 218196c49e680..03afb47843520 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -976,7 +976,7 @@ mod tests { } // Insert more data in the database, so we hit the DB size limit error, and MDBX tries to - // kick long-lived readers and delete their static_files + // kick long-lived readers and delete their snapshots { let tx = env.begin_rw_txn().unwrap(); let db = tx.open_db(None).unwrap(); diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index e6b2697a859af..843ae161c0652 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -25,7 +25,7 @@ pub enum SyncMode { /// /// [SyncMode::UtterlyNoSync] the [SyncMode::SafeNoSync] flag disable similarly flush system /// buffers to disk when committing a transaction. But there is a huge difference in how - /// are recycled the MVCC static_files corresponding to previous "steady" transactions (see + /// are recycled the MVCC snapshots corresponding to previous "steady" transactions (see /// below). /// /// With [crate::EnvironmentKind::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use From 14d91c3ba0b043e6ab9e97ca9c76936bdb40f594 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 2 May 2024 15:58:17 +0200 Subject: [PATCH 442/700] fix: make discv4 packets adhere to eip-8 (#8039) --- Cargo.lock | 1 + crates/ethereum-forks/src/forkid.rs | 75 ++++++++++- crates/net/discv4/Cargo.toml | 8 +- crates/net/discv4/src/proto.rs | 199 +++++++++++++++++++++++++++- 4 files changed, 275 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0a831ae808976..2c587c8c77f3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6583,6 +6583,7 @@ name = "reth-discv4" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", + "assert_matches", "discv5", "enr", "generic-array", diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index 3be3e3ab84d99..ee4edb8bdf8f6 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -4,7 +4,7 @@ use crate::Head; use alloy_primitives::{hex, BlockNumber, B256}; -use alloy_rlp::*; +use alloy_rlp::{Error as RlpError, *}; #[cfg(any(test, feature = "arbitrary"))] use arbitrary::Arbitrary; use crc::*; @@ -116,19 +116,51 @@ pub struct ForkId { } /// Represents a forward-compatible ENR entry for including the forkid in a node record via -/// EIP-868. Forward compatibility is achieved by allowing trailing fields. +/// EIP-868. Forward compatibility is achieved via EIP-8. /// /// See: /// /// /// for how geth implements ForkId values and forward compatibility. -#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] -#[rlp(trailing)] +#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable)] pub struct EnrForkIdEntry { /// The inner forkid pub fork_id: ForkId, } +impl Decodable for EnrForkIdEntry { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { fork_id: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + impl From for EnrForkIdEntry { fn from(fork_id: ForkId) -> Self { Self { fork_id } @@ -652,4 +684,39 @@ mod tests { assert!(fork_filter.set_head_priv(Head { number: b2, ..Default::default() }).is_some()); assert_eq!(fork_filter.current(), h2); } + + mod eip8 { + use super::*; + + fn junk_enr_fork_id_entry() -> Vec { + let mut buf = Vec::new(); + // enr request is just an expiration + let fork_id = ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADDCAFE }; + + // add some junk + let junk: u64 = 112233; + + // rlp header encoding + let payload_length = fork_id.length() + junk.length(); + alloy_rlp::Header { list: true, payload_length }.encode(&mut buf); + + // fields + fork_id.encode(&mut buf); + junk.encode(&mut buf); + + buf + } + + #[test] + fn eip8_decode_enr_fork_id_entry() { + let enr_fork_id_entry_with_junk = junk_enr_fork_id_entry(); + + let mut buf = enr_fork_id_entry_with_junk.as_slice(); + let decoded = EnrForkIdEntry::decode(&mut buf).unwrap(); + assert_eq!( + decoded.fork_id, + ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADDCAFE } + ); + } + } } diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index bd7e99ee6d062..49e9b4ecc2e0f 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -21,7 +21,12 @@ reth-network-types.workspace = true # ethereum alloy-rlp = { workspace = true, features = ["derive"] } discv5.workspace = true -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", + "serde", +] } enr.workspace = true # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } @@ -36,6 +41,7 @@ generic-array = "0.14" serde = { workspace = true, optional = true } [dev-dependencies] +assert_matches.workspace = true rand.workspace = true tokio = { workspace = true, features = ["macros"] } reth-tracing.workspace = true diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index da84dc05aa243..62dd9235d0f44 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -215,7 +215,7 @@ impl NodeEndpoint { } /// A [FindNode packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#findnode-packet-0x03). -#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable)] pub struct FindNode { /// The target node's ID, a 64-byte secp256k1 public key. pub id: PeerId, @@ -223,8 +223,41 @@ pub struct FindNode { pub expire: u64, } +impl Decodable for FindNode { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { id: Decodable::decode(b)?, expire: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + /// A [Neighbours packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#neighbors-packet-0x04). -#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] +#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable)] pub struct Neighbours { /// The list of nodes containing IP, UDP port, TCP port, and node ID. pub nodes: Vec, @@ -232,16 +265,82 @@ pub struct Neighbours { pub expire: u64, } +impl Decodable for Neighbours { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { nodes: Decodable::decode(b)?, expire: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + /// A [ENRRequest packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrrequest-packet-0x05). /// /// This packet is used to request the current version of a node's Ethereum Node Record (ENR). -#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable)] pub struct EnrRequest { /// The expiration timestamp for the request. No reply should be sent if it refers to a time in /// the past. pub expire: u64, } +impl Decodable for EnrRequest { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { expire: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + /// A [ENRResponse packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrresponse-packet-0x06). /// /// This packet is used to respond to an ENRRequest packet and includes the requested ENR along with @@ -442,6 +541,7 @@ mod tests { test_utils::{rng_endpoint, rng_ipv4_record, rng_ipv6_record, rng_message}, DEFAULT_DISCOVERY_PORT, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; + use assert_matches::assert_matches; use enr::EnrPublicKey; use rand::{thread_rng, Rng, RngCore}; use reth_primitives::{hex, ForkHash}; @@ -769,4 +869,97 @@ mod tests { ); assert!(decoded_enr.verify()); } + + mod eip8 { + use super::*; + + fn junk_enr_request() -> Vec { + let mut buf = Vec::new(); + // enr request is just an expiration + let expire: u64 = 123456; + + // add some junk + let junk: u64 = 112233; + + // rlp header encoding + let payload_length = expire.length() + junk.length(); + alloy_rlp::Header { list: true, payload_length }.encode(&mut buf); + + // fields + expire.encode(&mut buf); + junk.encode(&mut buf); + + buf + } + + // checks that junk data at the end of the packet is discarded according to eip-8 + #[test] + fn eip8_decode_enr_request() { + let enr_request_with_junk = junk_enr_request(); + + let mut buf = enr_request_with_junk.as_slice(); + let decoded = EnrRequest::decode(&mut buf).unwrap(); + assert_eq!(decoded.expire, 123456); + } + + // checks that junk data at the end of the packet is discarded according to eip-8 + // + // test vector from eip-8: https://eips.ethereum.org/EIPS/eip-8 + #[test] + fn eip8_decode_findnode() { + let findnode_with_junk = hex!("c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260add7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396"); + + let buf = findnode_with_junk.as_slice(); + let decoded = Message::decode(buf).unwrap(); + + let expected_id = hex!("ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"); + assert_matches!(decoded.msg, Message::FindNode(FindNode { id, expire: 1136239445 }) if id == expected_id); + } + + // checks that junk data at the end of the packet is discarded according to eip-8 + // + // test vector from eip-8: https://eips.ethereum.org/EIPS/eip-8 + #[test] + fn eip8_decode_neighbours() { + let neighbours_with_junk = hex!("c679fc8fe0b8b12f06577f2e802d34f6fa257e6137a995f6f4cbfc9ee50ed3710faf6e66f932c4c8d81d64343f429651328758b47d3dbc02c4042f0fff6946a50f4a49037a72bb550f3a7872363a83e1b9ee6469856c24eb4ef80b7535bcf99c0004f9015bf90150f84d846321163782115c82115db8403155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32f84984010203040101b840312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069dbf8599020010db83c4d001500000000abcdef12820d05820d05b84038643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aacf8599020010db885a308d313198a2e037073488203e78203e8b8408dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df738443b9a355010203b525a138aa34383fec3d2719a0"); + + let buf = neighbours_with_junk.as_slice(); + let decoded = Message::decode(buf).unwrap(); + + let _ = NodeRecord { + address: "99.33.22.55".parse().unwrap(), + tcp_port: 4444, + udp_port: 4445, + id: hex!("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32").into(), + }.length(); + + let expected_nodes: Vec = vec![ + NodeRecord { + address: "99.33.22.55".parse().unwrap(), + tcp_port: 4444, + udp_port: 4445, + id: hex!("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32").into(), + }, + NodeRecord { + address: "1.2.3.4".parse().unwrap(), + tcp_port: 1, + udp_port: 1, + id: hex!("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db").into(), + }, + NodeRecord { + address: "2001:db8:3c4d:15::abcd:ef12".parse().unwrap(), + tcp_port: 3333, + udp_port: 3333, + id: hex!("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac").into(), + }, + NodeRecord { + address: "2001:db8:85a3:8d3:1319:8a2e:370:7348".parse().unwrap(), + tcp_port: 999, + udp_port: 1000, + id: hex!("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73").into(), + }, + ]; + assert_matches!(decoded.msg, Message::Neighbours(Neighbours { nodes, expire: 1136239445 }) if nodes == expected_nodes); + } + } } From 2eee1920ea658ccd93f1ebe0961f6ca72ec1f7e7 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 2 May 2024 16:10:40 +0200 Subject: [PATCH 443/700] fix: check for oob offset access in nippy jar (#8037) --- crates/storage/nippy-jar/src/cursor.rs | 4 ++-- crates/storage/nippy-jar/src/error.rs | 5 +++++ crates/storage/nippy-jar/src/lib.rs | 19 ++++++++++++------- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 984206c36b42f..541fcfa63f3ca 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -213,13 +213,13 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { ) -> Result<(), NippyJarError> { // Find out the offset of the column value let offset_pos = self.row as usize * self.jar.columns + column; - let value_offset = self.reader.offset(offset_pos) as usize; + let value_offset = self.reader.offset(offset_pos)? as usize; let column_offset_range = if self.jar.rows * self.jar.columns == offset_pos + 1 { // It's the last column of the last row value_offset..self.reader.size() } else { - let next_value_offset = self.reader.offset(offset_pos + 1) as usize; + let next_value_offset = self.reader.offset(offset_pos + 1)? as usize; value_offset..next_value_offset }; diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index 3763be3dcfe1f..d447770580803 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -42,6 +42,11 @@ pub enum NippyJarError { /// The read offset size in number of bytes. offset_size: u64, }, + #[error("attempted to read an out of bounds offset: {index}")] + OffsetOutOfBounds { + /// The index of the offset that was being read. + index: usize, + }, #[error("compression or decompression requires a bigger destination output")] OutputTooSmall, #[error("dictionary is not loaded.")] diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 59fc586e4b395..1cecdba40b019 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -498,7 +498,7 @@ impl DataReader { } /// Returns the offset for the requested data index - pub fn offset(&self, index: usize) -> u64 { + pub fn offset(&self, index: usize) -> Result { // + 1 represents the offset_len u8 which is in the beginning of the file let from = index * self.offset_size as usize + 1; @@ -512,7 +512,7 @@ impl DataReader { if offsets_file_size > 1 { let from = offsets_file_size - self.offset_size as usize * (index + 1); - Ok(self.offset_at(from)) + self.offset_at(from) } else { Ok(0) } @@ -525,11 +525,16 @@ impl DataReader { } /// Reads one offset-sized (determined by the offset file) u64 at the provided index. - fn offset_at(&self, index: usize) -> u64 { + fn offset_at(&self, index: usize) -> Result { let mut buffer: [u8; 8] = [0; 8]; - buffer[..self.offset_size as usize] - .copy_from_slice(&self.offset_mmap[index..(index + self.offset_size as usize)]); - u64::from_le_bytes(buffer) + + let offset_end = index + self.offset_size as usize; + if offset_end > self.offset_mmap.len() { + return Err(NippyJarError::OffsetOutOfBounds { index }); + } + + buffer[..self.offset_size as usize].copy_from_slice(&self.offset_mmap[index..offset_end]); + Ok(u64::from_le_bytes(buffer)) } /// Returns number of bytes that represent one offset. @@ -1292,7 +1297,7 @@ mod tests { let data_reader = nippy.open_data_reader().unwrap(); // there are only two valid offsets. so index 2 actually represents the expected file // data size. - assert_eq!(data_reader.offset(2), expected_data_size as u64); + assert_eq!(data_reader.offset(2).unwrap(), expected_data_size as u64); } // This should prune from the ondisk offset list and clear the jar. From 78f62dd34c8c6c1b0a045990d4bf0d235a9d32b2 Mon Sep 17 00:00:00 2001 From: Daniel Ramirez Date: Thu, 2 May 2024 10:30:04 -0400 Subject: [PATCH 444/700] feat: add spawn_replay_transaction to EthTransactions (#8036) Co-authored-by: Oliver Nordbjerg --- crates/rpc/rpc/src/eth/api/transactions.rs | 55 ++++++++++++++++++++++ crates/rpc/rpc/src/otterscan.rs | 13 ++--- 2 files changed, 59 insertions(+), 9 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 15e2b6f5650be..51bde5bfaaf3b 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -376,6 +376,20 @@ pub trait EthTransactions: Send + Sync { .await } + /// Retrieves the transaction if it exists and returns its trace. + /// + /// Before the transaction is traced, all previous transaction in the block are applied to the + /// state by executing them first. + /// The callback `f` is invoked with the [ResultAndState] after the transaction was executed and + /// the database that points to the beginning of the transaction. + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [BlockingTaskPool](reth_tasks::pool::BlockingTaskPool). + async fn spawn_replay_transaction(&self, hash: B256, f: F) -> EthResult> + where + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, + R: Send + 'static; + /// Retrieves the transaction if it exists and returns its trace. /// /// Before the transaction is traced, all previous transaction in the block are applied to the @@ -1173,6 +1187,47 @@ where Ok(block.map(|block| (transaction, block.seal(block_hash)))) } + async fn spawn_replay_transaction(&self, hash: B256, f: F) -> EthResult> + where + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, + R: Send + 'static, + { + let (transaction, block) = match self.transaction_and_block(hash).await? { + None => return Ok(None), + Some(res) => res, + }; + let (tx, tx_info) = transaction.split(); + + let (cfg, block_env, _) = self.evm_env_at(block.hash().into()).await?; + + // we need to get the state of the parent block because we're essentially replaying the + // block the transaction is included in + let parent_block = block.parent_hash; + let block_txs = block.into_transactions_ecrecovered(); + + let this = self.clone(); + self.spawn_with_state_at_block(parent_block.into(), move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // replay all transactions prior to the targeted transaction + this.replay_transactions_until( + &mut db, + cfg.clone(), + block_env.clone(), + block_txs, + tx.hash, + )?; + + let env = + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, tx_env_with_recovered(&tx)); + + let (res, _) = this.transact(&mut db, env)?; + f(tx_info, res, db) + }) + .await + .map(Some) + } + async fn spawn_trace_transaction_in_block_with_inspector( &self, hash: B256, diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index bdfbc1293e3ab..2f62e66a31d59 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,7 +1,6 @@ use alloy_primitives::Bytes; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use revm::inspectors::NoOpInspector; use revm_inspectors::transfer::{TransferInspector, TransferKind}; use revm_primitives::ExecutionResult; @@ -81,14 +80,10 @@ where async fn get_transaction_error(&self, tx_hash: TxHash) -> RpcResult> { let maybe_revert = self .eth - .spawn_trace_transaction_in_block_with_inspector( - tx_hash, - NoOpInspector, - |_tx_info, _inspector, res, _| match res.result { - ExecutionResult::Revert { output, .. } => Ok(Some(output)), - _ => Ok(None), - }, - ) + .spawn_replay_transaction(tx_hash, |_tx_info, res, _| match res.result { + ExecutionResult::Revert { output, .. } => Ok(Some(output)), + _ => Ok(None), + }) .await .map(Option::flatten)?; Ok(maybe_revert) From 1603113ce5c0a99f9af6f5a42d0d11a2d64a0d4f Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 2 May 2024 15:54:34 +0100 Subject: [PATCH 445/700] docs(libmdbx): more `static_files` -> `snapshots` (#8047) --- crates/storage/libmdbx-rs/src/environment.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 03afb47843520..ba7385b949a89 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -950,8 +950,7 @@ mod tests { .open(tempdir.path()) .unwrap(); - // Insert some data in the database, so the read transaction can lock on the static file of - // it + // Insert some data in the database, so the read transaction can lock on the snapshot of it { let tx = env.begin_rw_txn().unwrap(); let db = tx.open_db(None).unwrap(); @@ -964,8 +963,7 @@ mod tests { // Create a read transaction let _tx_ro = env.begin_ro_txn().unwrap(); - // Change previously inserted data, so the read transaction would use the previous static - // file + // Change previously inserted data, so the read transaction would use the previous snapshot { let tx = env.begin_rw_txn().unwrap(); let db = tx.open_db(None).unwrap(); From e68ab2f58c93259f4e0c25411bf36d2b3db3cba0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 17:17:28 +0200 Subject: [PATCH 446/700] refactor: engine interceptors (#8048) --- Cargo.lock | 3 + .../src/commands/debug_cmd/replay_engine.rs | 11 ++- crates/consensus/beacon/src/engine/mod.rs | 32 ++++----- crates/node-core/Cargo.toml | 2 + .../engine_store.rs} | 62 +++++++++++----- crates/node-core/src/engine/mod.rs | 71 +++++++++++++++++++ crates/node-core/src/engine/skip_fcu.rs | 64 +++++++++++++++++ crates/node-core/src/engine_skip_fcu.rs | 55 -------------- crates/node-core/src/lib.rs | 3 +- crates/node/builder/Cargo.toml | 1 + crates/node/builder/src/launch/mod.rs | 43 ++++------- 11 files changed, 217 insertions(+), 130 deletions(-) rename crates/node-core/src/{engine_api_store.rs => engine/engine_store.rs} (78%) create mode 100644 crates/node-core/src/engine/mod.rs create mode 100644 crates/node-core/src/engine/skip_fcu.rs delete mode 100644 crates/node-core/src/engine_skip_fcu.rs diff --git a/Cargo.lock b/Cargo.lock index 2c587c8c77f3f..fcb0dfa5e023f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7196,6 +7196,7 @@ dependencies = [ "reth-transaction-pool", "tempfile", "tokio", + "tokio-stream", ] [[package]] @@ -7218,6 +7219,7 @@ dependencies = [ "metrics-process", "metrics-util", "once_cell", + "pin-project", "procfs", "proptest", "rand 0.8.5", @@ -7256,6 +7258,7 @@ dependencies = [ "thiserror", "tikv-jemalloc-ctl", "tokio", + "tokio-util", "tracing", "vergen", ] diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 1360c2f1ba11b..947c127452ed2 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -19,7 +19,7 @@ use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_core::engine_api_store::{EngineApiStore, StoredEngineApiMessage}; +use reth_node_core::engine::engine_store::{EngineMessageStore, StoredEngineApiMessage}; #[cfg(not(feature = "optimism"))] use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; @@ -34,7 +34,7 @@ use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_transaction_pool::noop::NoopTransactionPool; use std::{net::SocketAddr, path::PathBuf, sync::Arc, time::Duration}; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::oneshot; use tracing::*; /// `reth debug replay-engine` command @@ -191,8 +191,7 @@ impl Command { // Configure the consensus engine let network_client = network.fetch_client().await?; - let (consensus_engine_tx, consensus_engine_rx) = mpsc::unbounded_channel(); - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( + let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::new( network_client, Pipeline::builder().build( provider_factory.clone(), @@ -210,8 +209,6 @@ impl Command { payload_builder, None, u64::MAX, - consensus_engine_tx, - consensus_engine_rx, EngineHooks::new(), )?; info!(target: "reth::cli", "Consensus engine initialized"); @@ -224,7 +221,7 @@ impl Command { let _ = tx.send(res); }); - let engine_api_store = EngineApiStore::new(self.engine_api_store.clone()); + let engine_api_store = EngineMessageStore::new(self.engine_api_store.clone()); for filepath in engine_api_store.engine_messages_iter()? { let contents = fs::read(&filepath).wrap_err(format!("failed to read: {}", filepath.display()))?; diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 839bb0278c90b..a7761615ce6d7 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,18 +1,10 @@ -use crate::{ - engine::{ - forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}, - metrics::EngineMetrics, - }, - hooks::{EngineHookContext, EngineHooksController}, - sync::{EngineSyncController, EngineSyncEvent}, -}; -use futures::{Future, StreamExt}; +use futures::{stream::BoxStream, Future, StreamExt}; use reth_db::database::Database; use reth_engine_primitives::{EngineTypes, PayloadAttributes, PayloadBuilderAttributes}; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockStatus, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, + BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }, executor::BlockValidationError, p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, @@ -21,6 +13,7 @@ use reth_interfaces::{ RethError, RethResult, }; use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ constants::EPOCH_SLOTS, stage::StageId, BlockNumHash, BlockNumber, Head, Header, SealedBlock, SealedHeader, B256, @@ -43,7 +36,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{ - mpsc::{self, UnboundedReceiver, UnboundedSender}, + mpsc::{self, UnboundedSender}, oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -68,18 +61,19 @@ mod handle; pub use handle::BeaconConsensusEngineHandle; mod forkchoice; -use crate::hooks::{EngineHookEvent, EngineHooks, PolledHook}; pub use forkchoice::ForkchoiceStatus; -use reth_interfaces::blockchain_tree::BlockValidationKind; -use reth_payload_validator::ExecutionPayloadValidator; +use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}; mod metrics; +use metrics::EngineMetrics; pub(crate) mod sync; +use sync::{EngineSyncController, EngineSyncEvent}; /// Hooks for running during the main loop of /// [consensus engine][`crate::engine::BeaconConsensusEngine`]. pub mod hooks; +use hooks::{EngineHookContext, EngineHookEvent, EngineHooks, EngineHooksController, PolledHook}; #[cfg(test)] pub mod test_utils; @@ -180,7 +174,7 @@ where /// Used for emitting updates about whether the engine is syncing or not. sync_state_updater: Box, /// The Engine API message receiver. - engine_message_rx: UnboundedReceiverStream>, + engine_message_stream: BoxStream<'static, BeaconEngineMessage>, /// A clone of the handle handle: BeaconConsensusEngineHandle, /// Tracks the received forkchoice state updates received by the CL. @@ -254,7 +248,7 @@ where target, pipeline_run_threshold, to_engine, - rx, + Box::pin(UnboundedReceiverStream::from(rx)), hooks, ) } @@ -284,7 +278,7 @@ where target: Option, pipeline_run_threshold: u64, to_engine: UnboundedSender>, - rx: UnboundedReceiver>, + engine_message_stream: BoxStream<'static, BeaconEngineMessage>, hooks: EngineHooks, ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { let handle = BeaconConsensusEngineHandle { to_engine }; @@ -303,7 +297,7 @@ where payload_validator: ExecutionPayloadValidator::new(blockchain.chain_spec()), blockchain, sync_state_updater, - engine_message_rx: UnboundedReceiverStream::new(rx), + engine_message_stream, handle: handle.clone(), forkchoice_state_tracker: Default::default(), payload_builder, @@ -1770,7 +1764,7 @@ where // // These messages can affect the state of the SyncController and they're also time // sensitive, hence they are polled first. - if let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) { + if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { this.on_forkchoice_updated(state, payload_attrs, tx); diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 3caf5d9d1a60f..e19b4d242fa80 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -44,6 +44,8 @@ discv5.workspace = true # async tokio.workspace = true +tokio-util.workspace = true +pin-project.workspace = true # metrics metrics-exporter-prometheus = "0.12.1" diff --git a/crates/node-core/src/engine_api_store.rs b/crates/node-core/src/engine/engine_store.rs similarity index 78% rename from crates/node-core/src/engine_api_store.rs rename to crates/node-core/src/engine/engine_store.rs index 5552137f621a4..524e2c89bc269 100644 --- a/crates/node-core/src/engine_api_store.rs +++ b/crates/node-core/src/engine/engine_store.rs @@ -1,5 +1,6 @@ //! Stores engine API messages to disk for later inspection and replay. +use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; use reth_primitives::fs; @@ -8,8 +9,13 @@ use reth_rpc_types::{ ExecutionPayload, }; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, path::PathBuf, time::SystemTime}; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; +use std::{ + collections::BTreeMap, + path::PathBuf, + pin::Pin, + task::{ready, Context, Poll}, + time::SystemTime, +}; use tracing::*; /// A message from the engine API that has been stored to disk. @@ -34,13 +40,13 @@ pub enum StoredEngineApiMessage { /// This can read and write engine API messages in a specific directory. #[derive(Debug)] -pub struct EngineApiStore { +pub struct EngineMessageStore { /// The path to the directory that stores the engine API messages. path: PathBuf, } -impl EngineApiStore { - /// Creates a new [EngineApiStore] at the given path. +impl EngineMessageStore { + /// Creates a new [EngineMessageStore] at the given path. /// /// The path is expected to be a directory, where individual message JSON files will be stored. pub fn new(path: PathBuf) -> Self { @@ -108,22 +114,42 @@ impl EngineApiStore { } Ok(filenames_by_ts.into_iter().flat_map(|(_, paths)| paths)) } +} - /// Intercepts an incoming engine API message, storing it to disk and forwarding it to the - /// engine channel. - pub async fn intercept( - self, - mut rx: UnboundedReceiver>, - to_engine: UnboundedSender>, - ) where - Engine: EngineTypes, - BeaconEngineMessage: std::fmt::Debug, - { - while let Some(msg) = rx.recv().await { - if let Err(error) = self.on_message(&msg, SystemTime::now()) { +/// A wrapper stream that stores Engine API messages in +/// the specified directory. +#[derive(Debug)] +#[pin_project::pin_project] +pub struct EngineStoreStream { + /// Inner message stream. + #[pin] + stream: S, + /// Engine message store. + store: EngineMessageStore, +} + +impl EngineStoreStream { + /// Create new engine store stream wrapper. + pub fn new(stream: S, path: PathBuf) -> Self { + Self { stream, store: EngineMessageStore::new(path) } + } +} + +impl Stream for EngineStoreStream +where + Engine: EngineTypes, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + let next = ready!(this.stream.poll_next_unpin(cx)); + if let Some(msg) = &next { + if let Err(error) = this.store.on_message(msg, SystemTime::now()) { error!(target: "engine::intercept", ?msg, %error, "Error handling Engine API message"); } - let _ = to_engine.send(msg); } + Poll::Ready(next) } } diff --git a/crates/node-core/src/engine/mod.rs b/crates/node-core/src/engine/mod.rs new file mode 100644 index 0000000000000..4ba8479e90e42 --- /dev/null +++ b/crates/node-core/src/engine/mod.rs @@ -0,0 +1,71 @@ +//! Collection of various stream utilities for consensus engine. + +use futures::Stream; +use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use std::path::PathBuf; +use tokio_util::either::Either; + +pub mod engine_store; +use engine_store::EngineStoreStream; + +pub mod skip_fcu; +use skip_fcu::EngineSkipFcu; + +/// The collection of stream extensions for engine API message stream. +pub trait EngineMessageStreamExt: + Stream> +{ + /// Skips the specified number of [BeaconEngineMessage::ForkchoiceUpdated] messages from the + /// engine message stream. + fn skip_fcu(self, count: usize) -> EngineSkipFcu + where + Self: Sized, + { + EngineSkipFcu::new(self, count) + } + + /// If the count is [Some], returns the stream that skips the specified number of + /// [BeaconEngineMessage::ForkchoiceUpdated] messages. Otherwise, returns `Self`. + fn maybe_skip_fcu(self, maybe_count: Option) -> Either, Self> + where + Self: Sized, + { + if let Some(count) = maybe_count { + Either::Left(self.skip_fcu(count)) + } else { + Either::Right(self) + } + } + + /// Stores engine messages at the specified location. + fn store_messages(self, path: PathBuf) -> EngineStoreStream + where + Self: Sized, + { + EngineStoreStream::new(self, path) + } + + /// If the path is [Some], returns the stream that stores engine messages at the specified + /// location. Otherwise, returns `Self`. + fn maybe_store_messages( + self, + maybe_path: Option, + ) -> Either, Self> + where + Self: Sized, + { + if let Some(path) = maybe_path { + Either::Left(self.store_messages(path)) + } else { + Either::Right(self) + } + } +} + +impl EngineMessageStreamExt for T +where + Engine: EngineTypes, + T: Stream>, +{ +} diff --git a/crates/node-core/src/engine/skip_fcu.rs b/crates/node-core/src/engine/skip_fcu.rs new file mode 100644 index 0000000000000..34004134f3937 --- /dev/null +++ b/crates/node-core/src/engine/skip_fcu.rs @@ -0,0 +1,64 @@ +//! Stores engine API messages to disk for later inspection and replay. + +use futures::{Stream, StreamExt}; +use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; +use reth_engine_primitives::EngineTypes; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; + +/// Engine API stream wrapper that skips the specified number of forkchoice updated messages. +#[derive(Debug)] +#[pin_project::pin_project] +pub struct EngineSkipFcu { + #[pin] + stream: S, + /// The number of FCUs to skip. + threshold: usize, + /// Current count of skipped FCUs. + skipped: usize, +} + +impl EngineSkipFcu { + /// Creates new [EngineSkipFcu] stream wrapper. + pub fn new(stream: S, threshold: usize) -> Self { + Self { + stream, + threshold, + // Start with `threshold` so that the first FCU goes through. + skipped: threshold, + } + } +} + +impl Stream for EngineSkipFcu +where + Engine: EngineTypes, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + + loop { + let next = ready!(this.stream.poll_next_unpin(cx)); + let item = match next { + Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) => { + if this.skipped < this.threshold { + *this.skipped += 1; + tracing::warn!(target: "engine::intercept", ?state, ?payload_attrs, threshold=this.threshold, skipped=this.skipped, "Skipping FCU"); + let _ = tx.send(Ok(OnForkChoiceUpdated::syncing())); + continue + } else { + *this.skipped = 0; + Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + } + } + next => next, + }; + return Poll::Ready(item) + } + } +} diff --git a/crates/node-core/src/engine_skip_fcu.rs b/crates/node-core/src/engine_skip_fcu.rs deleted file mode 100644 index a6e5e1b01d4fb..0000000000000 --- a/crates/node-core/src/engine_skip_fcu.rs +++ /dev/null @@ -1,55 +0,0 @@ -//! Stores engine API messages to disk for later inspection and replay. - -use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; - -/// Intercept Engine API message and skip FCUs. -#[derive(Debug)] -pub struct EngineApiSkipFcu { - /// The number of FCUs to skip. - threshold: usize, - /// Current count of skipped FCUs. - skipped: usize, -} - -impl EngineApiSkipFcu { - /// Creates new [EngineApiSkipFcu] interceptor. - pub fn new(threshold: usize) -> Self { - Self { - threshold, - // Start with `threshold` so that the first FCU goes through. - skipped: threshold, - } - } - - /// Intercepts an incoming engine API message, skips FCU or forwards it - /// to the engine depending on current number of skipped FCUs. - pub async fn intercept( - mut self, - mut rx: UnboundedReceiver>, - to_engine: UnboundedSender>, - ) where - Engine: EngineTypes, - BeaconEngineMessage: std::fmt::Debug, - { - while let Some(msg) = rx.recv().await { - if let BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } = msg { - if self.skipped < self.threshold { - self.skipped += 1; - tracing::warn!(target: "engine::intercept", ?state, ?payload_attrs, threshold=self.threshold, skipped=self.skipped, "Skipping FCU"); - let _ = tx.send(Ok(OnForkChoiceUpdated::syncing())); - } else { - self.skipped = 0; - let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs, - tx, - }); - } - } else { - let _ = to_engine.send(msg); - } - } - } -} diff --git a/crates/node-core/src/lib.rs b/crates/node-core/src/lib.rs index 3d73e0e610796..024467ab16c05 100644 --- a/crates/node-core/src/lib.rs +++ b/crates/node-core/src/lib.rs @@ -11,8 +11,7 @@ pub mod args; pub mod cli; pub mod dirs; -pub mod engine_api_store; -pub mod engine_skip_fcu; +pub mod engine; pub mod exit; pub mod init; pub mod metrics; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index ef671f1276e00..136c27d7c4007 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -47,6 +47,7 @@ tokio = { workspace = true, features = [ "time", "rt-multi-thread", ] } +tokio-stream.workspace = true ## misc aquamarine.workspace = true diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index bd81f83864907..a372bedf0366c 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -14,7 +14,8 @@ use reth_beacon_consensus::{ BeaconConsensus, BeaconConsensusEngine, }; use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, + noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, + TreeExternals, }; use reth_consensus::Consensus; use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; @@ -23,8 +24,7 @@ use reth_network::NetworkEvents; use reth_node_api::{FullNodeComponents, FullNodeTypes}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, - engine_api_store::EngineApiStore, - engine_skip_fcu::EngineApiSkipFcu, + engine::EngineMessageStreamExt, exit::NodeExitFuture, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; @@ -37,10 +37,10 @@ use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; use std::{future::Future, sync::Arc}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; +use tokio_stream::wrappers::UnboundedReceiverStream; pub mod common; pub use common::LaunchContext; -use reth_blockchain_tree::noop::NoopBlockchainTree; /// A general purpose trait that launches a new node of any kind. /// @@ -261,29 +261,15 @@ where // create pipeline let network_client = node_adapter.network().fetch_client().await?; - let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); + let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel(); - if let Some(skip_fcu_threshold) = ctx.node_config().debug.skip_fcu { - debug!(target: "reth::cli", "spawning skip FCU task"); - let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); - let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); - ctx.task_executor().spawn_critical( - "skip FCU interceptor", - engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), - ); - consensus_engine_rx = skip_fcu_rx; - } - - if let Some(store_path) = ctx.node_config().debug.engine_api_store.clone() { - debug!(target: "reth::cli", "spawning engine API store"); - let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); - let engine_api_store = EngineApiStore::new(store_path); - ctx.task_executor().spawn_critical( - "engine api interceptor", - engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx), - ); - consensus_engine_rx = engine_intercept_rx; - }; + let node_config = ctx.node_config(); + let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx) + .maybe_skip_fcu(node_config.debug.skip_fcu) + // Store messages _after_ skipping messages so that `replay-engine` command + // would replay the exact same messages that were observed by the engine + // during this run. + .maybe_store_messages(node_config.debug.engine_api_store.clone()); let max_block = ctx.max_block(network_client.clone()).await?; let mut hooks = EngineHooks::new(); @@ -303,8 +289,7 @@ where info!(target: "reth::cli", "Starting Reth in dev mode"); for (idx, (address, alloc)) in ctx.chain_spec().genesis.alloc.iter().enumerate() { - info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, -address.to_string(), format_ether(alloc.balance)); + info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); } // install auto-seal @@ -395,7 +380,7 @@ address.to_string(), format_ether(alloc.balance)); initial_target, reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, consensus_engine_tx, - consensus_engine_rx, + Box::pin(consensus_engine_stream), hooks, )?; info!(target: "reth::cli", "Consensus engine initialized"); From 2af2f0ba46ff9f1324f755bb50f24a2604445e55 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 17:22:23 +0200 Subject: [PATCH 447/700] chore(engine): new payload blockchain tree action (#8041) --- crates/consensus/beacon/src/engine/mod.rs | 193 +++++++++++++--------- 1 file changed, 114 insertions(+), 79 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index a7761615ce6d7..e8b27dc57ccff 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -504,8 +504,10 @@ where // or cannot be processed at the moment. self.on_forkchoice_updated_status(state, on_updated, tx); } else { - self.blockchain_tree_action = - Some(BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx }); + let previous_action = self + .blockchain_tree_action + .replace(BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx }); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); } } @@ -1030,13 +1032,17 @@ where &mut self, payload: ExecutionPayload, cancun_fields: Option, - ) -> Result { + tx: oneshot::Sender>, + ) { + self.metrics.new_payload_messages.increment(1); + let block = match self.ensure_well_formed_payload(payload, cancun_fields) { Ok(block) => block, - Err(status) => return Ok(status), + Err(status) => { + let _ = tx.send(Ok(status)); + return + } }; - let block_hash = block.hash(); - let block_num_hash = block.num_hash(); let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block.hash()); if lowest_buffered_ancestor == block.hash() { @@ -1047,74 +1053,14 @@ where if let Some(status) = self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block.hash()) { - return Ok(status) + let _ = tx.send(Ok(status)); + return } - let res = if self.sync.is_pipeline_idle() { - // we can only insert new payloads if the pipeline is _not_ running, because it holds - // exclusive access to the database - self.try_insert_new_payload(block) - } else { - self.try_buffer_payload(block) - }; - - let status = match res { - Ok(status) => { - if status.is_valid() { - if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { - // if we're currently syncing and the inserted block is the targeted FCU - // head block, we can try to make it canonical. - if block_hash == target.head_block_hash { - if let Err((_hash, error)) = - self.try_make_sync_target_canonical(block_num_hash) - { - return if error.is_fatal() { - error!(target: "consensus::engine", %error, "Encountered fatal error"); - Err(BeaconOnNewPayloadError::Internal(Box::new(error))) - } else { - // If we could not make the sync target block canonical, we - // should return the error as an invalid payload status. - Ok(PayloadStatus::new( - PayloadStatusEnum::Invalid { - validation_error: error.to_string(), - }, - // TODO: return a proper latest valid hash - // - // See: - self.forkchoice_state_tracker.last_valid_head(), - )) - } - } - } - } - // block was successfully inserted, so we can cancel the full block request, if - // any exists - self.sync.cancel_full_block_request(block_hash); - } - Ok(status) - } - Err(error) => { - warn!(target: "consensus::engine", %error, "Error while processing payload"); - - // If the error was due to an invalid payload, the payload is added to the invalid - // headers cache and `Ok` with [PayloadStatusEnum::Invalid] is returned. - let (block, error) = error.split(); - if error.is_invalid_block() { - warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); - let latest_valid_hash = - self.latest_valid_hash_for_invalid_payload(block.parent_hash, Some(&error)); - // keep track of the invalid header - self.invalid_headers.insert(block.header); - let status = PayloadStatusEnum::Invalid { validation_error: error.to_string() }; - Ok(PayloadStatus::new(status, latest_valid_hash)) - } else { - Err(BeaconOnNewPayloadError::Internal(Box::new(error))) - } - } - }; - - trace!(target: "consensus::engine", ?status, "Returning payload status"); - status + let previous_action = self + .blockchain_tree_action + .replace(BlockchainTreeAction::InsertNewPayload { block, tx }); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); } /// Ensures that the given payload does not violate any consensus rules that concern the block's @@ -1670,7 +1616,9 @@ where Ok(()) } - /// Process the outcome of blockchain tree action. + /// Process the next set blockchain tree action. + /// The handler might set next blockchain tree action to perform, + /// so the state change should be handled accordingly. fn on_blockchain_tree_action( &mut self, action: BlockchainTreeAction, @@ -1705,6 +1653,84 @@ where } }; } + BlockchainTreeAction::InsertNewPayload { block, tx } => { + let block_hash = block.hash(); + let block_num_hash = block.num_hash(); + let result = if self.sync.is_pipeline_idle() { + // we can only insert new payloads if the pipeline is _not_ running, because it + // holds exclusive access to the database + self.try_insert_new_payload(block) + } else { + self.try_buffer_payload(block) + }; + + let status = match result { + Ok(status) => status, + Err(error) => { + warn!(target: "consensus::engine", %error, "Error while processing payload"); + + let (block, error) = error.split(); + if !error.is_invalid_block() { + // TODO: revise if any error should be considered fatal at this point. + let _ = + tx.send(Err(BeaconOnNewPayloadError::Internal(Box::new(error)))); + return Ok(EngineEventOutcome::Processed) + } + + // If the error was due to an invalid payload, the payload is added to the + // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is + // returned. + warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); + let latest_valid_hash = self + .latest_valid_hash_for_invalid_payload(block.parent_hash, Some(&error)); + // keep track of the invalid header + self.invalid_headers.insert(block.header); + PayloadStatus::new( + PayloadStatusEnum::Invalid { validation_error: error.to_string() }, + latest_valid_hash, + ) + } + }; + + if status.is_valid() { + if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { + // if we're currently syncing and the inserted block is the targeted + // FCU head block, we can try to make it canonical. + if block_hash == target.head_block_hash { + if let Err((_hash, error)) = + self.try_make_sync_target_canonical(block_num_hash) + { + if error.is_fatal() { + let response = Err(BeaconOnNewPayloadError::Internal( + Box::new(error.clone()), + )); + let _ = tx.send(response); + return Err(RethError::Canonical(error)) + } + + // If we could not make the sync target block canonical, + // we should return the error as an invalid payload status. + let status = Ok(PayloadStatus::new( + PayloadStatusEnum::Invalid { + validation_error: error.to_string(), + }, + // TODO: return a proper latest valid hash + // See: + self.forkchoice_state_tracker.last_valid_head(), + )); + let _ = tx.send(status); + return Ok(EngineEventOutcome::Processed) + } + } + } + // block was successfully inserted, so we can cancel the full block + // request, if any exists + self.sync.cancel_full_block_request(block_hash); + } + + trace!(target: "consensus::engine", ?status, "Returning payload status"); + let _ = tx.send(Ok(status)); + } }; Ok(EngineEventOutcome::Processed) } @@ -1753,10 +1779,17 @@ where // Process any blockchain tree action result as set forth during engine message // processing. if let Some(action) = this.blockchain_tree_action.take() { - match this.on_blockchain_tree_action(action)? { - EngineEventOutcome::Processed => {} - EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), + match this.on_blockchain_tree_action(action) { + Ok(EngineEventOutcome::Processed) => {} + Ok(EngineEventOutcome::ReachedMaxBlock) => return Poll::Ready(Ok(())), + Err(error) => { + error!(target: "consensus::engine", %error, "Encountered fatal error"); + return Poll::Ready(Err(error.into())) + } }; + + // Blockchain tree action handler might set next action to take. + continue } // Process one incoming message from the CL. We don't drain the messages right away, @@ -1770,9 +1803,7 @@ where this.on_forkchoice_updated(state, payload_attrs, tx); } BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - this.metrics.new_payload_messages.increment(1); - let res = this.on_new_payload(payload, cancun_fields); - let _ = tx.send(res); + this.on_new_payload(payload, cancun_fields, tx); } BeaconEngineMessage::TransitionConfigurationExchanged => { this.blockchain.on_transition_configuration_exchanged(); @@ -1833,6 +1864,10 @@ enum BlockchainTreeAction { attrs: Option, tx: oneshot::Sender>, }, + InsertNewPayload { + block: SealedBlock, + tx: oneshot::Sender>, + }, } /// Represents outcomes of processing an engine event From 7845c9c897ff4781c33067f646c1237fdece49aa Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Thu, 2 May 2024 21:39:00 +0530 Subject: [PATCH 448/700] minor typo fix (#8052) Co-authored-by: Matthias Seitz --- crates/net/network/tests/it/multiplex.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/tests/it/multiplex.rs b/crates/net/network/tests/it/multiplex.rs index aac55a982813a..650b754238fd9 100644 --- a/crates/net/network/tests/it/multiplex.rs +++ b/crates/net/network/tests/it/multiplex.rs @@ -22,7 +22,7 @@ use std::{ use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -/// A simple Rplx subprotocol for +/// A simple Rlpx subprotocol that sends pings and pongs mod proto { use super::*; use reth_eth_wire::capability::Capability; From 5378dd79e226172cd96c60b5e438f051cdc2a469 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 19:01:32 +0200 Subject: [PATCH 449/700] feat: new payload skipper (#8050) --- crates/node-core/src/args/debug.rs | 4 ++ crates/node-core/src/engine/mod.rs | 28 +++++++++ crates/node-core/src/engine/skip_fcu.rs | 2 +- .../node-core/src/engine/skip_new_payload.rs | 60 +++++++++++++++++++ crates/node/builder/src/launch/mod.rs | 5 +- 5 files changed, 96 insertions(+), 3 deletions(-) create mode 100644 crates/node-core/src/engine/skip_new_payload.rs diff --git a/crates/node-core/src/args/debug.rs b/crates/node-core/src/args/debug.rs index 916b4a1efa04d..3eda71ad0a7d2 100644 --- a/crates/node-core/src/args/debug.rs +++ b/crates/node-core/src/args/debug.rs @@ -63,6 +63,10 @@ pub struct DebugArgs { #[arg(long = "debug.skip-fcu", help_heading = "Debug")] pub skip_fcu: Option, + /// If provided, the engine will skip `n` consecutive new payloads. + #[arg(long = "debug.skip-new-payload", help_heading = "Debug")] + pub skip_new_payload: Option, + /// The path to store engine API messages at. /// If specified, all of the intercepted engine API messages /// will be written to specified location. diff --git a/crates/node-core/src/engine/mod.rs b/crates/node-core/src/engine/mod.rs index 4ba8479e90e42..2c4e12e68a5d6 100644 --- a/crates/node-core/src/engine/mod.rs +++ b/crates/node-core/src/engine/mod.rs @@ -12,6 +12,9 @@ use engine_store::EngineStoreStream; pub mod skip_fcu; use skip_fcu::EngineSkipFcu; +pub mod skip_new_payload; +use skip_new_payload::EngineSkipNewPayload; + /// The collection of stream extensions for engine API message stream. pub trait EngineMessageStreamExt: Stream> @@ -38,6 +41,31 @@ pub trait EngineMessageStreamExt: } } + /// Skips the specified number of [BeaconEngineMessage::NewPayload] messages from the + /// engine message stream. + fn skip_new_payload(self, count: usize) -> EngineSkipNewPayload + where + Self: Sized, + { + EngineSkipNewPayload::new(self, count) + } + + /// If the count is [Some], returns the stream that skips the specified number of + /// [BeaconEngineMessage::NewPayload] messages. Otherwise, returns `Self`. + fn maybe_skip_new_payload( + self, + maybe_count: Option, + ) -> Either, Self> + where + Self: Sized, + { + if let Some(count) = maybe_count { + Either::Left(self.skip_new_payload(count)) + } else { + Either::Right(self) + } + } + /// Stores engine messages at the specified location. fn store_messages(self, path: PathBuf) -> EngineStoreStream where diff --git a/crates/node-core/src/engine/skip_fcu.rs b/crates/node-core/src/engine/skip_fcu.rs index 34004134f3937..6deb342638ccb 100644 --- a/crates/node-core/src/engine/skip_fcu.rs +++ b/crates/node-core/src/engine/skip_fcu.rs @@ -1,4 +1,4 @@ -//! Stores engine API messages to disk for later inspection and replay. +//! Stream wrapper that skips specified number of FCUs. use futures::{Stream, StreamExt}; use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; diff --git a/crates/node-core/src/engine/skip_new_payload.rs b/crates/node-core/src/engine/skip_new_payload.rs new file mode 100644 index 0000000000000..ea5cf61e956a9 --- /dev/null +++ b/crates/node-core/src/engine/skip_new_payload.rs @@ -0,0 +1,60 @@ +//! Stream wrapper that skips specified number of new payload messages. + +use futures::{Stream, StreamExt}; +use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use reth_rpc_types::engine::{PayloadStatus, PayloadStatusEnum}; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; + +/// Engine API stream wrapper that skips the specified number of new payload messages. +#[derive(Debug)] +#[pin_project::pin_project] +pub struct EngineSkipNewPayload { + #[pin] + stream: S, + /// The number of messages to skip. + threshold: usize, + /// Current count of skipped messages. + skipped: usize, +} + +impl EngineSkipNewPayload { + /// Creates new [EngineSkipNewPayload] stream wrapper. + pub fn new(stream: S, threshold: usize) -> Self { + Self { stream, threshold, skipped: 0 } + } +} + +impl Stream for EngineSkipNewPayload +where + Engine: EngineTypes, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + + loop { + let next = ready!(this.stream.poll_next_unpin(cx)); + let item = match next { + Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) => { + if this.skipped < this.threshold { + *this.skipped += 1; + tracing::warn!(target: "engine::intercept", ?payload, ?cancun_fields, threshold=this.threshold, skipped=this.skipped, "Skipping new payload"); + let _ = tx.send(Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing))); + continue + } else { + *this.skipped = 0; + Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) + } + } + next => next, + }; + return Poll::Ready(item) + } + } +} diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index a372bedf0366c..cd93dbe5ec54c 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -266,8 +266,9 @@ where let node_config = ctx.node_config(); let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx) .maybe_skip_fcu(node_config.debug.skip_fcu) - // Store messages _after_ skipping messages so that `replay-engine` command - // would replay the exact same messages that were observed by the engine + .maybe_skip_new_payload(node_config.debug.skip_new_payload) + // Store messages _after_ skipping so that `replay-engine` command + // would replay only the messages that were observed by the engine // during this run. .maybe_store_messages(node_config.debug.engine_api_store.clone()); From 0e3f031ada389dd740ed944e455de6f6a8c7e87a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 19:01:45 +0200 Subject: [PATCH 450/700] feat(engine): make new payload canonical action (#8042) --- crates/consensus/beacon/src/engine/mod.rs | 69 ++++++++++++++--------- 1 file changed, 42 insertions(+), 27 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index e8b27dc57ccff..f3aa249fa1ba2 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -506,7 +506,7 @@ where } else { let previous_action = self .blockchain_tree_action - .replace(BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx }); + .replace(BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx }); debug_assert!(previous_action.is_none(), "Pre-existing action found"); } } @@ -1624,7 +1624,7 @@ where action: BlockchainTreeAction, ) -> RethResult { match action { - BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx } => { + BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx } => { let start = Instant::now(); let result = self.blockchain.make_canonical(state.head_block_hash); let elapsed = self.record_make_canonical_latency(start, &result); @@ -1697,30 +1697,15 @@ where // if we're currently syncing and the inserted block is the targeted // FCU head block, we can try to make it canonical. if block_hash == target.head_block_hash { - if let Err((_hash, error)) = - self.try_make_sync_target_canonical(block_num_hash) - { - if error.is_fatal() { - let response = Err(BeaconOnNewPayloadError::Internal( - Box::new(error.clone()), - )); - let _ = tx.send(response); - return Err(RethError::Canonical(error)) - } - - // If we could not make the sync target block canonical, - // we should return the error as an invalid payload status. - let status = Ok(PayloadStatus::new( - PayloadStatusEnum::Invalid { - validation_error: error.to_string(), - }, - // TODO: return a proper latest valid hash - // See: - self.forkchoice_state_tracker.last_valid_head(), - )); - let _ = tx.send(status); - return Ok(EngineEventOutcome::Processed) - } + let previous_action = self.blockchain_tree_action.replace( + BlockchainTreeAction::MakeNewPayloadCanonical { + payload_num_hash: block_num_hash, + status, + tx, + }, + ); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); + return Ok(EngineEventOutcome::Processed) } } // block was successfully inserted, so we can cancel the full block @@ -1728,6 +1713,31 @@ where self.sync.cancel_full_block_request(block_hash); } + trace!(target: "consensus::engine", ?status, "Returning payload status"); + let _ = tx.send(Ok(status)); + } + BlockchainTreeAction::MakeNewPayloadCanonical { payload_num_hash, status, tx } => { + let status = match self.try_make_sync_target_canonical(payload_num_hash) { + Ok(()) => status, + Err((_hash, error)) => { + if error.is_fatal() { + let response = + Err(BeaconOnNewPayloadError::Internal(Box::new(error.clone()))); + let _ = tx.send(response); + return Err(RethError::Canonical(error)) + } + + // If we could not make the sync target block canonical, + // we should return the error as an invalid payload status. + PayloadStatus::new( + PayloadStatusEnum::Invalid { validation_error: error.to_string() }, + // TODO: return a proper latest valid hash + // See: + self.forkchoice_state_tracker.last_valid_head(), + ) + } + }; + trace!(target: "consensus::engine", ?status, "Returning payload status"); let _ = tx.send(Ok(status)); } @@ -1859,7 +1869,7 @@ where } enum BlockchainTreeAction { - FcuMakeCanonical { + MakeForkchoiceHeadCanonical { state: ForkchoiceState, attrs: Option, tx: oneshot::Sender>, @@ -1868,6 +1878,11 @@ enum BlockchainTreeAction { block: SealedBlock, tx: oneshot::Sender>, }, + MakeNewPayloadCanonical { + payload_num_hash: BlockNumHash, + status: PayloadStatus, + tx: oneshot::Sender>, + }, } /// Represents outcomes of processing an engine event From 29be4072cb9a0e8d8a3999ca6d5b9e3efa96983e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 2 May 2024 19:24:19 +0200 Subject: [PATCH 451/700] fix(op): bootnodes (#7990) --- crates/primitives/src/lib.rs | 4 ++-- crates/primitives/src/net.rs | 36 +++++++++++++++--------------------- 2 files changed, 17 insertions(+), 23 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index ae20cf6b2c8a3..3473ef82e0e3f 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -144,8 +144,8 @@ mod optimism { pub use crate::{ chain::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}, net::{ - base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes, BASE_BOOTNODES, - BASE_TESTNET_BOOTNODES, OP_BOOTNODES, OP_TESTNET_BOOTNODES, + base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes, OP_BOOTNODES, + OP_TESTNET_BOOTNODES, }, transaction::{TxDeposit, DEPOSIT_TX_TYPE_ID}, }; diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index c8ff2a3ccf4ae..068e47e5b25ec 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -43,24 +43,13 @@ pub static HOLESKY_BOOTNODES : [&str; 2] = [ ]; #[cfg(feature = "optimism")] -/// OP Mainnet Bootnodes -pub static OP_BOOTNODES: [&str; 3] = [ +/// OP stack mainnet boot nodes. +pub static OP_BOOTNODES: &[&str] = &[ + // OP Labs "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", -]; - -#[cfg(feature = "optimism")] -/// OP Testnet Bootnodes -pub static OP_TESTNET_BOOTNODES: [&str; 3] = [ - "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", - "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", - "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", -]; - -#[cfg(feature = "optimism")] -/// Base Mainnet Bootnodes -pub static BASE_BOOTNODES: [&str; 5] = [ + // Base "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", @@ -69,8 +58,13 @@ pub static BASE_BOOTNODES: [&str; 5] = [ ]; #[cfg(feature = "optimism")] -/// Base Testnet Bootnodes -pub static BASE_TESTNET_BOOTNODES: [&str; 2] = [ +/// OP stack testnet boot nodes. +pub static OP_TESTNET_BOOTNODES: &[&str] = &[ + // OP Labs + "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", + "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", + "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", + // Base "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", ]; @@ -98,25 +92,25 @@ pub fn holesky_nodes() -> Vec { #[cfg(feature = "optimism")] /// Returns parsed op-stack mainnet nodes pub fn op_nodes() -> Vec { - parse_nodes(&OP_BOOTNODES[..]) + parse_nodes(OP_BOOTNODES) } #[cfg(feature = "optimism")] /// Returns parsed op-stack testnet nodes pub fn op_testnet_nodes() -> Vec { - parse_nodes(&OP_TESTNET_BOOTNODES[..]) + parse_nodes(OP_TESTNET_BOOTNODES) } #[cfg(feature = "optimism")] /// Returns parsed op-stack base mainnet nodes pub fn base_nodes() -> Vec { - parse_nodes(&BASE_BOOTNODES[..]) + parse_nodes(OP_BOOTNODES) } #[cfg(feature = "optimism")] /// Returns parsed op-stack base testnet nodes pub fn base_testnet_nodes() -> Vec { - parse_nodes(&BASE_TESTNET_BOOTNODES[..]) + parse_nodes(OP_TESTNET_BOOTNODES) } /// Parses all the nodes From 9eb7d961d71757bad3efa6270c4d8c3b1d9c208d Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Thu, 2 May 2024 13:45:34 -0400 Subject: [PATCH 452/700] integrate FullNodeComponents in exexcomponents (#8051) --- Cargo.lock | 2 ++ crates/exex/Cargo.toml | 2 ++ crates/exex/src/context.rs | 50 +++++++++++++++++++++++---- crates/node/builder/src/launch/mod.rs | 4 +-- 4 files changed, 48 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fcb0dfa5e023f..ffb669d5bd112 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6910,8 +6910,10 @@ dependencies = [ "metrics", "reth-config", "reth-metrics", + "reth-network", "reth-node-api", "reth-node-core", + "reth-payload-builder", "reth-primitives", "reth-provider", "reth-tasks", diff --git a/crates/exex/Cargo.toml b/crates/exex/Cargo.toml index 71f9c8bdef0df..d16cb53f791f8 100644 --- a/crates/exex/Cargo.toml +++ b/crates/exex/Cargo.toml @@ -21,6 +21,8 @@ reth-primitives.workspace = true reth-provider.workspace = true reth-tasks.workspace = true reth-tracing.workspace = true +reth-network.workspace = true +reth-payload-builder.workspace = true ## async tokio.workspace = true diff --git a/crates/exex/src/context.rs b/crates/exex/src/context.rs index df2b5137797d4..733047400ac15 100644 --- a/crates/exex/src/context.rs +++ b/crates/exex/src/context.rs @@ -1,4 +1,4 @@ -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, @@ -14,18 +14,12 @@ use crate::{ExExEvent, ExExNotification}; pub struct ExExContext { /// The current head of the blockchain at launch. pub head: Head, - /// The configured provider to interact with the blockchain. - pub provider: Node::Provider, - /// The task executor of the node. - pub task_executor: TaskExecutor, /// The data dir of the node. pub data_dir: ChainPath, /// The config of the node pub config: NodeConfig, /// The loaded node config pub reth_config: reth_config::Config, - /// The transaction pool of the node. - pub pool: Node::Pool, /// Channel used to send [`ExExEvent`]s to the rest of the node. /// /// # Important @@ -41,4 +35,46 @@ pub struct ExExContext { /// Once a an [`ExExNotification`] is sent over the channel, it is considered delivered by the /// node. pub notifications: Receiver, + + /// node components + pub components: Node, +} + +impl NodeTypes for ExExContext { + type Primitives = Node::Primitives; + type Engine = Node::Engine; +} + +impl FullNodeTypes for ExExContext { + type DB = Node::DB; + type Provider = Node::Provider; +} + +impl FullNodeComponents for ExExContext { + type Pool = Node::Pool; + type Evm = Node::Evm; + + fn pool(&self) -> &Self::Pool { + self.components.pool() + } + + fn provider(&self) -> &Self::Provider { + self.components.provider() + } + + fn network(&self) -> &reth_network::NetworkHandle { + self.components.network() + } + + fn payload_builder(&self) -> &reth_payload_builder::PayloadBuilderHandle { + self.components.payload_builder() + } + + fn task_executor(&self) -> &TaskExecutor { + self.components.task_executor() + } + + fn evm_config(&self) -> &Self::Evm { + self.components.evm_config() + } } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index cd93dbe5ec54c..201965fa9b33e 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -196,12 +196,10 @@ where // create the launch context for the exex let context = ExExContext { head, - provider: blockchain_db.clone(), - task_executor: ctx.task_executor().clone(), data_dir: ctx.data_dir().clone(), config: ctx.node_config().clone(), reth_config: ctx.toml_config().clone(), - pool: node_adapter.components.pool().clone(), + components: node_adapter.clone(), events, notifications, }; From e90dc44be9bb3e9aaacc742b3cfa9d22a8252b5c Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Fri, 3 May 2024 00:02:36 +0530 Subject: [PATCH 453/700] feat: remove field ```max_gas_limit``` from ```BasicPayloadJobGeneratorConfig``` (#7949) --- crates/node-ethereum/src/node.rs | 3 +-- crates/optimism/node/src/node.rs | 3 +-- crates/payload/basic/src/lib.rs | 13 +------------ examples/custom-engine-types/src/main.rs | 3 +-- examples/custom-payload-builder/src/main.rs | 3 +-- 5 files changed, 5 insertions(+), 20 deletions(-) diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 9de0cbe6ee5ef..815b949de4aba 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -175,8 +175,7 @@ where .interval(conf.interval()) .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) - .extradata(conf.extradata_bytes()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(conf.extradata_bytes()); let payload_generator = BasicPayloadJobGenerator::with_builder( ctx.provider().clone(), diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index c95f3dd9587c2..7e7d5470305a9 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -224,8 +224,7 @@ where .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) // no extradata for OP - .extradata(Default::default()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(Default::default()); let payload_generator = BasicPayloadJobGenerator::with_builder( ctx.provider().clone(), diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 8da9163d0cd8b..6529710ca4f07 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -18,7 +18,7 @@ use reth_payload_builder::{ PayloadJobGenerator, }; use reth_primitives::{ - constants::{EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, RETH_CLIENT_VERSION, SLOT_DURATION}, + constants::{EMPTY_WITHDRAWALS, RETH_CLIENT_VERSION, SLOT_DURATION}, proofs, BlockNumberOrTag, Bytes, ChainSpec, SealedBlock, Withdrawals, B256, U256, }; use reth_provider::{ @@ -251,8 +251,6 @@ impl PayloadTaskGuard { pub struct BasicPayloadJobGeneratorConfig { /// Data to include in the block's extra data field. extradata: Bytes, - /// Target gas ceiling for built blocks, defaults to [ETHEREUM_BLOCK_GAS_LIMIT] gas. - max_gas_limit: u64, /// The interval at which the job should build a new payload after the last. interval: Duration, /// The deadline for when the payload builder job should resolve. @@ -296,21 +294,12 @@ impl BasicPayloadJobGeneratorConfig { self.extradata = extradata; self } - - /// Sets the target gas ceiling for mined blocks. - /// - /// Defaults to [ETHEREUM_BLOCK_GAS_LIMIT] gas. - pub fn max_gas_limit(mut self, max_gas_limit: u64) -> Self { - self.max_gas_limit = max_gas_limit; - self - } } impl Default for BasicPayloadJobGeneratorConfig { fn default() -> Self { Self { extradata: alloy_rlp::encode(RETH_CLIENT_VERSION.as_bytes()).into(), - max_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, interval: Duration::from_secs(1), // 12s slot time deadline: SLOT_DURATION, diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index ada28c0f3e7c9..d16146420d0d9 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -235,8 +235,7 @@ where .interval(conf.interval()) .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) - .extradata(conf.extradata_bytes()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(conf.extradata_bytes()); let payload_generator = BasicPayloadJobGenerator::with_builder( ctx.provider().clone(), diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index 2c468c34af183..b2bc6af3607cc 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -47,8 +47,7 @@ where .interval(conf.interval()) .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) - .extradata(conf.extradata_bytes()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(conf.extradata_bytes()); let payload_generator = EmptyBlockPayloadJobGenerator::with_builder( ctx.provider().clone(), From 1a1c24ba24be968ec40c01c10b1c3c3d638d7bfc Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Fri, 3 May 2024 00:40:45 +0530 Subject: [PATCH 454/700] dropped ```RUST_LOG=info``` from recommended commands (#8054) --- book/jsonrpc/intro.md | 2 +- book/run/mainnet.md | 8 ++++---- book/run/observability.md | 2 +- book/run/pruning.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/book/jsonrpc/intro.md b/book/jsonrpc/intro.md index 1c602f6d2b5b3..21ded5bcc5431 100644 --- a/book/jsonrpc/intro.md +++ b/book/jsonrpc/intro.md @@ -114,7 +114,7 @@ You can use `curl`, a programming language with a low-level library, or a tool l As a reminder, you need to run the command below to enable all of these APIs using an HTTP transport: ```bash -RUST_LOG=info reth node --http --http.api "admin,debug,eth,net,trace,txpool,web3,rpc" +reth node --http --http.api "admin,debug,eth,net,trace,txpool,web3,rpc" ``` This allows you to then call: diff --git a/book/run/mainnet.md b/book/run/mainnet.md index 67e70b9dbf209..4412f51c7bf0f 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -20,12 +20,12 @@ First, ensure that you have Reth installed by following the [installation instru Now, to start the archive node, run: ```bash -RUST_LOG=info reth node +reth node ``` And to start the full node, run: ```bash -RUST_LOG=info reth node --full +reth node --full ``` On differences between archive and full nodes, see [Pruning & Full Node](./pruning.md#basic-concepts) section. @@ -39,7 +39,7 @@ You can override this path using the `--authrpc.jwtsecret` option. You MUST use So one might do: ```bash -RUST_LOG=info reth node \ +reth node \ --authrpc.jwtsecret /path/to/secret \ --authrpc.addr 127.0.0.1 \ --authrpc.port 8551 @@ -54,7 +54,7 @@ First, make sure you have Lighthouse installed. Sigma Prime provides excellent [ Assuming you have done that, run: ```bash -RUST_LOG=info lighthouse bn \ +lighthouse bn \ --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ --execution-endpoint http://localhost:8551 \ --execution-jwt /path/to/secret diff --git a/book/run/observability.md b/book/run/observability.md index 39d485e1f3628..9f0f1b852b721 100644 --- a/book/run/observability.md +++ b/book/run/observability.md @@ -3,7 +3,7 @@ Reth exposes a number of metrics, which are listed [here][metrics]. We can serve them from an HTTP endpoint by adding the `--metrics` flag: ```bash -RUST_LOG=info reth node --metrics 127.0.0.1:9001 +reth node --metrics 127.0.0.1:9001 ``` Now, as the node is running, you can `curl` the endpoint you provided to the `--metrics` flag to get a text dump of the metrics at that time: diff --git a/book/run/pruning.md b/book/run/pruning.md index 4e69665510346..b6f23f54459f8 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -39,7 +39,7 @@ To run Reth as a full node, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md), and add a `--full` flag. For example: ```bash -RUST_LOG=info reth node \ +reth node \ --full \ --authrpc.jwtsecret /path/to/secret \ --authrpc.addr 127.0.0.1 \ From 232e7bf19b8948718325e3eaaf1629d69efae4db Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 2 May 2024 20:31:48 +0100 Subject: [PATCH 455/700] feat(cli): make `db stats` non-detailed by default (#8056) --- bin/reth/src/commands/db/stats.rs | 40 ++--- book/SUMMARY.md | 2 + book/cli/SUMMARY.md | 2 + book/cli/reth.md | 1 + book/cli/reth/db.md | 1 + book/cli/reth/db/stats.md | 13 +- book/cli/reth/import.md | 6 + book/cli/reth/node.md | 203 ++++++++++++----------- book/cli/reth/p2p.md | 44 ++++- book/cli/reth/stage/drop.md | 4 +- book/cli/reth/stage/run.md | 38 ++++- book/cli/reth/stage/unwind.md | 115 ++++++++++++- book/cli/reth/stage/unwind/num-blocks.md | 2 +- book/cli/reth/stage/unwind/to-block.md | 2 +- 14 files changed, 337 insertions(+), 136 deletions(-) diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index 474603c746468..b47e7980b02e2 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -25,11 +25,11 @@ use tracing::info; pub struct Command { /// Show only the total size for static files. #[arg(long, default_value_t = false)] - only_total_size: bool, + detailed_sizes: bool, - /// Show only the summary per static file segment. + /// Show detailed information per static file segment. #[arg(long, default_value_t = false)] - summary: bool, + detailed_segments: bool, /// Show a checksum of each table in the database. /// @@ -152,7 +152,7 @@ impl Command { let mut table = ComfyTable::new(); table.load_preset(comfy_table::presets::ASCII_MARKDOWN); - if !self.only_total_size { + if self.detailed_sizes { table.set_header([ "Segment", "Block Range", @@ -216,18 +216,7 @@ impl Command { .map(|metadata| metadata.len()) .unwrap_or_default(); - if self.summary { - if segment_columns > 0 { - assert_eq!(segment_columns, columns); - } else { - segment_columns = columns; - } - segment_rows += rows; - segment_data_size += data_size; - segment_index_size += index_size; - segment_offsets_size += offsets_size; - segment_config_size += config_size; - } else { + if self.detailed_segments { let mut row = Row::new(); row.add_cell(Cell::new(segment)) .add_cell(Cell::new(format!("{block_range}"))) @@ -235,7 +224,7 @@ impl Command { tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")), )) .add_cell(Cell::new(format!("{columns} x {rows}"))); - if !self.only_total_size { + if self.detailed_sizes { row.add_cell(Cell::new(human_bytes(data_size as f64))) .add_cell(Cell::new(human_bytes(index_size as f64))) .add_cell(Cell::new(human_bytes(offsets_size as f64))) @@ -245,6 +234,17 @@ impl Command { (data_size + index_size + offsets_size + config_size) as f64, ))); table.add_row(row); + } else { + if segment_columns > 0 { + assert_eq!(segment_columns, columns); + } else { + segment_columns = columns; + } + segment_rows += rows; + segment_data_size += data_size; + segment_index_size += index_size; + segment_offsets_size += offsets_size; + segment_config_size += config_size; } total_data_size += data_size; @@ -253,7 +253,7 @@ impl Command { total_config_size += config_size; } - if self.summary { + if !self.detailed_segments { let first_ranges = ranges.first().expect("not empty list of ranges"); let last_ranges = ranges.last().expect("not empty list of ranges"); @@ -271,7 +271,7 @@ impl Command { tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")), )) .add_cell(Cell::new(format!("{segment_columns} x {segment_rows}"))); - if !self.only_total_size { + if self.detailed_sizes { row.add_cell(Cell::new(human_bytes(segment_data_size as f64))) .add_cell(Cell::new(human_bytes(segment_index_size as f64))) .add_cell(Cell::new(human_bytes(segment_offsets_size as f64))) @@ -299,7 +299,7 @@ impl Command { .add_cell(Cell::new("")) .add_cell(Cell::new("")) .add_cell(Cell::new("")); - if !self.only_total_size { + if self.detailed_sizes { row.add_cell(Cell::new(human_bytes(total_data_size as f64))) .add_cell(Cell::new(human_bytes(total_index_size as f64))) .add_cell(Cell::new(human_bytes(total_offsets_size as f64))) diff --git a/book/SUMMARY.md b/book/SUMMARY.md index ffd5f67e0bdae..fc6deb28295aa 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -30,11 +30,13 @@ - [`reth`](./cli/reth.md) - [`reth node`](./cli/reth/node.md) - [`reth init`](./cli/reth/init.md) + - [`reth init-state`](./cli/reth/init-state.md) - [`reth import`](./cli/reth/import.md) - [`reth dump-genesis`](./cli/reth/dump-genesis.md) - [`reth db`](./cli/reth/db.md) - [`reth db stats`](./cli/reth/db/stats.md) - [`reth db list`](./cli/reth/db/list.md) + - [`reth db checksum`](./cli/reth/db/checksum.md) - [`reth db diff`](./cli/reth/db/diff.md) - [`reth db get`](./cli/reth/db/get.md) - [`reth db get mdbx`](./cli/reth/db/get/mdbx.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index 07711434e31f9..ee3d714b2bb5f 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -1,11 +1,13 @@ - [`reth`](./reth.md) - [`reth node`](./reth/node.md) - [`reth init`](./reth/init.md) + - [`reth init-state`](./reth/init-state.md) - [`reth import`](./reth/import.md) - [`reth dump-genesis`](./reth/dump-genesis.md) - [`reth db`](./reth/db.md) - [`reth db stats`](./reth/db/stats.md) - [`reth db list`](./reth/db/list.md) + - [`reth db checksum`](./reth/db/checksum.md) - [`reth db diff`](./reth/db/diff.md) - [`reth db get`](./reth/db/get.md) - [`reth db get mdbx`](./reth/db/get/mdbx.md) diff --git a/book/cli/reth.md b/book/cli/reth.md index f213a30f2c56c..8b6f757c936b2 100644 --- a/book/cli/reth.md +++ b/book/cli/reth.md @@ -9,6 +9,7 @@ Usage: reth [OPTIONS] Commands: node Start the node init Initialize the database from a genesis file + init-state Initialize the database from a state dump file import This syncs RLP encoded blocks from a file dump-genesis Dumps genesis block JSON configuration to stdout db Database debugging utilities diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 77137dadb38e0..bd5989d7f34c8 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -9,6 +9,7 @@ Usage: reth db [OPTIONS] Commands: stats Lists all the tables, their entry count and their size list Lists the contents of a table + checksum Calculates the content checksum of a table diff Create a diff between two database tables or two entire databases get Gets the content of a table for the given key drop Deletes all database entries diff --git a/book/cli/reth/db/stats.md b/book/cli/reth/db/stats.md index dea5e3d058c01..437c10bd0a1c3 100644 --- a/book/cli/reth/db/stats.md +++ b/book/cli/reth/db/stats.md @@ -18,7 +18,7 @@ Options: [default: default] - --only-total-size + --detailed-sizes Show only the total size for static files --chain @@ -30,8 +30,15 @@ Options: [default: mainnet] - --summary - Show only the summary per static file segment + --detailed-segments + Show detailed information per static file segment + + --checksum + Show a checksum of each table in the database. + + WARNING: this option will take a long time to run, as it needs to traverse and hash the entire database. + + For individual table checksums, use the `reth db checksum` command. --instance Add a new instance of a node. diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 382efb8ef9ea9..411527f9e84ba 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -30,6 +30,12 @@ Options: [default: mainnet] + --no-state + Disables stages that require state. + + --chunk-len + Chunk byte length. + --instance Add a new instance of a node. diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 450180c84e4a3..d1972a6085228 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -4,21 +4,18 @@ Start the node ```bash $ reth node --help - -Start the node - Usage: reth node [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --config @@ -27,26 +24,26 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] --with-unused-ports Sets all ports to unused, allowing the OS to choose random unused ports when sockets are bound. - + Mutually exclusive with `--instance`. -h, --help @@ -55,7 +52,7 @@ Options: Metrics: --metrics Enable Prometheus metrics. - + The metrics will be served at the given interface and port. Networking: @@ -73,27 +70,42 @@ Networking: --discovery.addr The UDP address to use for devp2p peer discovery version 4 - + [default: 0.0.0.0] --discovery.port The UDP port to use for devp2p peer discovery version 4 - + [default: 30303] --discovery.v5.addr The UDP address to use for devp2p peer discovery version 5 - + [default: 0.0.0.0] --discovery.v5.port The UDP port to use for devp2p peer discovery version 5 - + [default: 9000] + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. - + --trusted-peers enode://abcd@192.168.0.1:30303 --trusted-only @@ -101,7 +113,7 @@ Networking: --bootnodes Comma separated enode URLs for P2P discovery bootstrap. - + Will fall back to a network-specific default if not specified. --peers-file @@ -110,12 +122,12 @@ Networking: --identity Custom node identity - - [default: reth/-/-gnu] + + [default: reth/-/] --p2p-secret-key Secret key to use for this node. - + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. --no-persist-peers @@ -123,17 +135,17 @@ Networking: --nat NAT resolution method (any|none|upnp|publicip|extip:\) - + [default: any] --addr Network listening address - + [default: 0.0.0.0] --port Network listening port - + [default: 30303] --max-outbound-peers @@ -144,14 +156,14 @@ Networking: --pooled-tx-response-soft-limit Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. - + . - + [default: 2097152] --pooled-tx-pack-soft-limit Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB - + [default: 131072] RPC: @@ -160,17 +172,17 @@ RPC: --http.addr Http server address to listen on - + [default: 127.0.0.1] --http.port Http server port to listen on - + [default: 8545] --http.api Rpc Modules to be configured for the HTTP server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --http.corsdomain @@ -181,12 +193,12 @@ RPC: --ws.addr Ws server address to listen on - + [default: 127.0.0.1] --ws.port Ws server port to listen on - + [default: 8546] --ws.origins @@ -194,7 +206,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --ipcdisable @@ -202,176 +214,176 @@ RPC: --ipcpath Filename for IPC socket/pipe within the datadir - - [default: /tmp/reth.ipc] + + [default: .ipc] --authrpc.addr Auth server address to listen on - + [default: 127.0.0.1] --authrpc.port Auth server port to listen on - + [default: 8551] --authrpc.jwtsecret Path to a JWT secret to use for the authenticated engine-API RPC server. - + This will enforce JWT authentication for all requests coming from the consensus layer. - + If no path is provided, a secret will be generated and stored in the datadir under `//jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default. --auth-ipc - Enable auth engine api over IPC + Enable auth engine API over IPC --auth-ipc.path Filename for auth IPC socket/pipe within the datadir - - [default: /tmp/reth_engine_api.ipc] + + [default: _engine_api.ipc] --rpc.jwtsecret Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and `--ws.api`. - + This is __not__ used for the authenticated engine-API RPC server, see `--authrpc.jwtsecret`. --rpc.max-request-size Set the maximum RPC request payload size for both HTTP and WS in megabytes - + [default: 15] --rpc.max-response-size Set the maximum RPC response payload size for both HTTP and WS in megabytes - + [default: 160] [aliases: rpc.returndata.limit] --rpc.max-subscriptions-per-connection Set the maximum concurrent subscriptions per connection - + [default: 1024] --rpc.max-connections Maximum number of RPC server connections - + [default: 500] --rpc.max-tracing-requests Maximum number of concurrent tracing requests - - [default: 14] + + [default: 8] --rpc.max-blocks-per-filter Maximum number of blocks that could be scanned per filter request. (0 = entire chain) - + [default: 100000] --rpc.max-logs-per-response Maximum number of logs that can be returned in a single response. (0 = no limit) - + [default: 20000] --rpc.gascap Maximum gas limit for `eth_call` and call tracing RPC methods - + [default: 50000000] RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache - + [default: 5000] --rpc-cache.max-receipts Max number receipts in cache - + [default: 2000] --rpc-cache.max-envs Max number of bytes for cached env data - + [default: 1000] --rpc-cache.max-concurrent-db-requests Max number of concurrent database requests - + [default: 512] Gas Price Oracle: --gpo.blocks Number of recent blocks to check for gas price - + [default: 20] --gpo.ignoreprice Gas Price below which gpo will ignore transactions - + [default: 2] --gpo.maxprice Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo - + [default: 500000000000] --gpo.percentile The percentile of gas prices to use for the estimate - + [default: 60] TxPool: --txpool.pending-max-count Max number of transaction in the pending sub-pool - + [default: 10000] --txpool.pending-max-size Max size of the pending sub-pool in megabytes - + [default: 20] --txpool.basefee-max-count Max number of transaction in the basefee sub-pool - + [default: 10000] --txpool.basefee-max-size Max size of the basefee sub-pool in megabytes - + [default: 20] --txpool.queued-max-count Max number of transaction in the queued sub-pool - + [default: 10000] --txpool.queued-max-size Max size of the queued sub-pool in megabytes - + [default: 20] --txpool.max-account-slots Max number of executable transaction slots guaranteed per account - + [default: 16] --txpool.pricebump Price bump (in %) for the transaction pool underpriced check - + [default: 10] --blobpool.pricebump Price bump percentage to replace an already existing blob transaction - + [default: 100] --txpool.max-tx-input-bytes Max size in bytes of a single transaction allowed to enter the pool - + [default: 131072] --txpool.max-cached-entries The maximum number of blobs to keep in the in memory blob cache - + [default: 100] --txpool.nolocals @@ -386,33 +398,33 @@ TxPool: Builder: --builder.extradata Block extra data set by the payload builder - + [default: reth//] --builder.gaslimit Target gas ceiling for built blocks - + [default: 30000000] --builder.interval The interval at which the job should build a new payload after the last (in seconds) - + [default: 1] --builder.deadline The deadline for when the payload builder job should resolve - + [default: 12] --builder.max-tasks Maximum number of tasks to spawn for building a payload - + [default: 3] Debug: --debug.continuous Prompt the downloader to download blocks one at a time. - + NOTE: This is for testing purposes only. --debug.terminate @@ -420,7 +432,7 @@ Debug: --debug.tip Set the chain tip manually for testing purposes. - + NOTE: This is a temporary flag --debug.max-block @@ -438,6 +450,9 @@ Debug: --debug.hook-all Hook on every transaction in a block + --debug.skip-fcu + If provided, the engine will skip `n` consecutive FCUs + --debug.engine-api-store The path to store engine API messages at. If specified, all of the intercepted engine API messages will be written to specified location @@ -457,13 +472,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Dev testnet: --dev Start the node in dev mode - + This mode uses a local proof-of-authority consensus engine with either fixed block times or automatically mined blocks. Disables network discovery and enables local http server. @@ -475,7 +490,7 @@ Dev testnet: --dev.block-time Interval between blocks. - + Parses strings using [humantime::parse_duration] --dev.block-time 12s @@ -486,7 +501,7 @@ Pruning: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -496,12 +511,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -511,22 +526,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -534,12 +549,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -550,7 +565,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info @@ -559,4 +574,4 @@ Display: -q, --quiet Silence all log output -``` +``` \ No newline at end of file diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 17cd396cf548b..6f1c1d3e60b40 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -49,24 +49,36 @@ Options: --disable-discv4-discovery Disable Discv4 discovery + --enable-discv5-discovery + Enable Discv5 discovery + --discovery.addr - The UDP address to use for P2P discovery/networking + The UDP address to use for devp2p peer discovery version 4 [default: 0.0.0.0] --discovery.port - The UDP port to use for P2P discovery/networking + The UDP port to use for devp2p peer discovery version 4 [default: 30303] - --trusted-peer - Target trusted peer + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] - --trusted-only - Connect only to trusted peers + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] - --retries - The number of retries per request + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap [default: 5] @@ -81,6 +93,22 @@ Options: [default: 1] + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + + --trusted-peer + Target trusted peer + + --trusted-only + Connect only to trusted peers + + --retries + The number of retries per request + + [default: 5] + --nat [default: any] diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index 2efe9ed78fd63..2b647574cde12 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -68,8 +68,8 @@ Database: - execution: The execution stage within the pipeline - account-hashing: The account hashing stage within the pipeline - storage-hashing: The storage hashing stage within the pipeline - - hashing: The hashing stage within the pipeline - - merkle: The Merkle stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline - tx-lookup: The transaction lookup stage within the pipeline - account-history: The account history stage within the pipeline - storage-history: The storage history stage within the pipeline diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index f20eb3f68a8af..348f082c4fad4 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -17,8 +17,8 @@ Arguments: - execution: The execution stage within the pipeline - account-hashing: The account hashing stage within the pipeline - storage-hashing: The storage hashing stage within the pipeline - - hashing: The hashing stage within the pipeline - - merkle: The Merkle stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline - tx-lookup: The transaction lookup stage within the pipeline - account-history: The account history stage within the pipeline - storage-history: The storage history stage within the pipeline @@ -96,16 +96,44 @@ Networking: --disable-discv4-discovery Disable Discv4 discovery + --enable-discv5-discovery + Enable Discv5 discovery + --discovery.addr - The UDP address to use for P2P discovery/networking + The UDP address to use for devp2p peer discovery version 4 [default: 0.0.0.0] --discovery.port - The UDP port to use for P2P discovery/networking + The UDP port to use for devp2p peer discovery version 4 [default: 30303] + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] + + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. @@ -126,7 +154,7 @@ Networking: --identity Custom node identity - [default: reth/-/-gnu] + [default: reth/-/] --p2p-secret-key Secret key to use for this node. diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index 8479bca514eba..44968aeded6b1 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -7,8 +7,8 @@ $ reth stage unwind --help Usage: reth stage unwind [OPTIONS] Commands: - to-block Unwinds the database until the given block number (range is inclusive) - num-blocks Unwinds the given number of blocks from the database + to-block Unwinds the database from the latest block, until the given block number or hash has been reached, that block is not included + num-blocks Unwinds the database from the latest block, until the given number of blocks have been reached help Print this message or the help of the given subcommand(s) Options: @@ -65,6 +65,117 @@ Database: [possible values: true, false] +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] + + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect only to trusted peers + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound requests. default: 100 + + --max-inbound-peers + Maximum number of inbound requests. default: 30 + + --pooled-tx-response-soft-limit + Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. + + . + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB + + [default: 131072] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/unwind/num-blocks.md b/book/cli/reth/stage/unwind/num-blocks.md index 9737bd4fa5fd1..24d2bc5169b74 100644 --- a/book/cli/reth/stage/unwind/num-blocks.md +++ b/book/cli/reth/stage/unwind/num-blocks.md @@ -1,6 +1,6 @@ # reth stage unwind num-blocks -Unwinds the given number of blocks from the database +Unwinds the database from the latest block, until the given number of blocks have been reached ```bash $ reth stage unwind num-blocks --help diff --git a/book/cli/reth/stage/unwind/to-block.md b/book/cli/reth/stage/unwind/to-block.md index 74f8ec4b74a88..f8aa3bd6ef5e6 100644 --- a/book/cli/reth/stage/unwind/to-block.md +++ b/book/cli/reth/stage/unwind/to-block.md @@ -1,6 +1,6 @@ # reth stage unwind to-block -Unwinds the database until the given block number (range is inclusive) +Unwinds the database from the latest block, until the given block number or hash has been reached, that block is not included ```bash $ reth stage unwind to-block --help From 2ac2433a96cf27895565d4e138bb79961bf3154e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 2 May 2024 21:19:44 +0100 Subject: [PATCH 456/700] feat(examples): add blob support to the rollup ExEx (#8028) --- Cargo.lock | 150 ++++- .../transaction-pool/src/test_utils/mock.rs | 1 - deny.toml | 1 + examples/exex/rollup/Cargo.toml | 8 +- examples/exex/rollup/rollup_abi.json | 627 +----------------- examples/exex/rollup/src/execution.rs | 488 ++++++++++++++ examples/exex/rollup/src/main.rs | 400 ++--------- 7 files changed, 655 insertions(+), 1020 deletions(-) create mode 100644 examples/exex/rollup/src/execution.rs diff --git a/Cargo.lock b/Cargo.lock index ffb669d5bd112..24b07b8ab3161 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,16 +135,30 @@ name = "alloy-consensus" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-eips", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "c-kzg", "serde", "sha2 0.10.8", "thiserror", ] +[[package]] +name = "alloy-consensus" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "c-kzg", + "serde", + "sha2 0.10.8", +] + [[package]] name = "alloy-dyn-abi" version = "0.7.1" @@ -170,7 +184,7 @@ source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f67 dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "arbitrary", "c-kzg", "derive_more", @@ -182,13 +196,37 @@ dependencies = [ "serde", ] +[[package]] +name = "alloy-eips" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "c-kzg", + "once_cell", + "serde", +] + [[package]] name = "alloy-genesis" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-genesis" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-primitives", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", "serde", "serde_json", ] @@ -222,11 +260,11 @@ name = "alloy-network" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-consensus", - "alloy-eips", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -239,7 +277,7 @@ name = "alloy-node-bindings" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-genesis", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "k256", "serde_json", @@ -281,12 +319,12 @@ name = "alloy-provider" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-eips", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -351,12 +389,12 @@ name = "alloy-rpc-types" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -368,13 +406,31 @@ dependencies = [ "thiserror", ] +[[package]] +name = "alloy-rpc-types" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-sol-types", + "itertools 0.12.1", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "serde", ] @@ -383,12 +439,12 @@ name = "alloy-rpc-types-engine" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-consensus", - "alloy-eips", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types", - "alloy-serde", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -404,8 +460,8 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", - "alloy-rpc-types", - "alloy-serde", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "serde", "serde_json", ] @@ -420,6 +476,16 @@ dependencies = [ "serde_json", ] +[[package]] +name = "alloy-serde" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + [[package]] name = "alloy-signer" version = "0.1.0" @@ -438,7 +504,7 @@ name = "alloy-signer-wallet" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-consensus", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -2885,9 +2951,11 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-rlp", "alloy-sol-types", "eyre", + "foundry-blob-explorers", "futures", "once_cell", "reth", @@ -3038,6 +3106,21 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "foundry-blob-explorers" +version = "0.1.0" +source = "git+https://github.com/foundry-rs/block-explorers#cd824d3fc53feca59ca6a2fc76f191fbb3ac2011" +dependencies = [ + "alloy-chains", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-primitives", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "chrono", + "reqwest 0.12.4", + "serde", +] + [[package]] name = "fragile" version = "2.0.0" @@ -6258,6 +6341,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.52.0", ] @@ -6480,8 +6564,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips", - "alloy-genesis", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "arbitrary", "bytes", @@ -6686,9 +6770,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-network", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7422,8 +7506,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips", - "alloy-genesis", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7686,10 +7770,10 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7716,7 +7800,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7821,7 +7905,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "reth-primitives", "secp256k1", ] @@ -7958,7 +8042,7 @@ version = "0.1.0" source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=3d2077e#3d2077ee665046c256448a8bd90d8e93ea85de56" dependencies = [ "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index bcacff2da1914..17ad1f7c340ca 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -733,7 +733,6 @@ impl PoolTransaction for MockTransaction { fn chain_id(&self) -> Option { match self { MockTransaction::Legacy { chain_id, .. } => *chain_id, - MockTransaction::Eip1559 { chain_id, .. } | MockTransaction::Eip4844 { chain_id, .. } | MockTransaction::Eip2930 { chain_id, .. } => Some(*chain_id), diff --git a/deny.toml b/deny.toml index 61cced4fbed3a..38994d1974557 100644 --- a/deny.toml +++ b/deny.toml @@ -90,6 +90,7 @@ unknown-git = "deny" allow-git = [ # TODO: remove, see ./Cargo.toml "https://github.com/alloy-rs/alloy", + "https://github.com/foundry-rs/block-explorers", "https://github.com/paradigmxyz/evm-inspectors", "https://github.com/sigp/discv5", ] diff --git a/examples/exex/rollup/Cargo.toml b/examples/exex/rollup/Cargo.toml index 8d338c241e720..f32a7762926d8 100644 --- a/examples/exex/rollup/Cargo.toml +++ b/examples/exex/rollup/Cargo.toml @@ -21,16 +21,18 @@ reth-tracing.workspace = true reth-trie.workspace = true # async -tokio.workspace = true futures.workspace = true +tokio.workspace = true # misc -alloy-sol-types = { workspace = true, features = ["json"] } +alloy-consensus = { workspace = true, features = ["kzg"] } alloy-rlp.workspace = true +alloy-sol-types = { workspace = true, features = ["json"] } eyre.workspace = true +foundry-blob-explorers = { git = "https://github.com/foundry-rs/block-explorers" } +once_cell.workspace = true rusqlite = { version = "0.31.0", features = ["bundled"] } serde_json.workspace = true -once_cell.workspace = true [dev-dependencies] reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/examples/exex/rollup/rollup_abi.json b/examples/exex/rollup/rollup_abi.json index 08bc23f0e7829..d7278e9f66201 100644 --- a/examples/exex/rollup/rollup_abi.json +++ b/examples/exex/rollup/rollup_abi.json @@ -1,626 +1 @@ -[ - { - "inputs": [ - { "internalType": "address", "name": "admin", "type": "address" } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { "inputs": [], "name": "AccessControlBadConfirmation", "type": "error" }, - { - "inputs": [ - { "internalType": "uint48", "name": "schedule", "type": "uint48" } - ], - "name": "AccessControlEnforcedDefaultAdminDelay", - "type": "error" - }, - { - "inputs": [], - "name": "AccessControlEnforcedDefaultAdminRules", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "defaultAdmin", - "type": "address" - } - ], - "name": "AccessControlInvalidDefaultAdmin", - "type": "error" - }, - { - "inputs": [ - { "internalType": "address", "name": "account", "type": "address" }, - { - "internalType": "bytes32", - "name": "neededRole", - "type": "bytes32" - } - ], - "name": "AccessControlUnauthorizedAccount", - "type": "error" - }, - { - "inputs": [ - { "internalType": "uint256", "name": "expected", "type": "uint256" } - ], - "name": "BadSequence", - "type": "error" - }, - { "inputs": [], "name": "BadSignature", "type": "error" }, - { "inputs": [], "name": "BlockExpired", "type": "error" }, - { - "inputs": [ - { - "internalType": "address", - "name": "sequencer", - "type": "address" - } - ], - "name": "NotSequencer", - "type": "error" - }, - { "inputs": [], "name": "OrderExpired", "type": "error" }, - { - "inputs": [ - { "internalType": "uint8", "name": "bits", "type": "uint8" }, - { "internalType": "uint256", "name": "value", "type": "uint256" } - ], - "name": "SafeCastOverflowedUintDowncast", - "type": "error" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "sequencer", - "type": "address" - }, - { - "components": [ - { - "internalType": "uint256", - "name": "rollupChainId", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "sequence", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "confirmBy", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "gasLimit", - "type": "uint256" - }, - { - "internalType": "address", - "name": "rewardAddress", - "type": "address" - } - ], - "indexed": true, - "internalType": "struct CalldataZenith.BlockHeader", - "name": "header", - "type": "tuple" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "blockData", - "type": "bytes" - } - ], - "name": "BlockSubmitted", - "type": "event" - }, - { - "anonymous": false, - "inputs": [], - "name": "DefaultAdminDelayChangeCanceled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint48", - "name": "newDelay", - "type": "uint48" - }, - { - "indexed": false, - "internalType": "uint48", - "name": "effectSchedule", - "type": "uint48" - } - ], - "name": "DefaultAdminDelayChangeScheduled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [], - "name": "DefaultAdminTransferCanceled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "newAdmin", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint48", - "name": "acceptSchedule", - "type": "uint48" - } - ], - "name": "DefaultAdminTransferScheduled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "token", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "rollupRecipient", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "amount", - "type": "uint256" - } - ], - "name": "Enter", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "token", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "hostRecipient", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "amount", - "type": "uint256" - } - ], - "name": "ExitFilled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "role", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "bytes32", - "name": "previousAdminRole", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "bytes32", - "name": "newAdminRole", - "type": "bytes32" - } - ], - "name": "RoleAdminChanged", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "role", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "address", - "name": "account", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "sender", - "type": "address" - } - ], - "name": "RoleGranted", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "role", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "address", - "name": "account", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "sender", - "type": "address" - } - ], - "name": "RoleRevoked", - "type": "event" - }, - { "stateMutability": "payable", "type": "fallback" }, - { - "inputs": [], - "name": "DEFAULT_ADMIN_ROLE", - "outputs": [ - { "internalType": "bytes32", "name": "", "type": "bytes32" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "SEQUENCER_ROLE", - "outputs": [ - { "internalType": "bytes32", "name": "", "type": "bytes32" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "acceptDefaultAdminTransfer", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "address", "name": "newAdmin", "type": "address" } - ], - "name": "beginDefaultAdminTransfer", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "components": [ - { - "internalType": "uint256", - "name": "rollupChainId", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "sequence", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "confirmBy", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "gasLimit", - "type": "uint256" - }, - { - "internalType": "address", - "name": "rewardAddress", - "type": "address" - } - ], - "internalType": "struct CalldataZenith.BlockHeader", - "name": "header", - "type": "tuple" - }, - { "internalType": "bytes", "name": "blockData", "type": "bytes" } - ], - "name": "blockCommitment", - "outputs": [ - { "internalType": "bytes32", "name": "commit", "type": "bytes32" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "cancelDefaultAdminTransfer", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "uint48", "name": "newDelay", "type": "uint48" } - ], - "name": "changeDefaultAdminDelay", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "defaultAdmin", - "outputs": [ - { "internalType": "address", "name": "", "type": "address" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "defaultAdminDelay", - "outputs": [{ "internalType": "uint48", "name": "", "type": "uint48" }], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "defaultAdminDelayIncreaseWait", - "outputs": [{ "internalType": "uint48", "name": "", "type": "uint48" }], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "rollupRecipient", - "type": "address" - } - ], - "name": "enter", - "outputs": [], - "stateMutability": "payable", - "type": "function" - }, - { - "inputs": [ - { - "components": [ - { - "internalType": "address", - "name": "token", - "type": "address" - }, - { - "internalType": "address", - "name": "recipient", - "type": "address" - }, - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "deadline", - "type": "uint256" - } - ], - "internalType": "struct HostPassage.ExitOrder[]", - "name": "orders", - "type": "tuple[]" - } - ], - "name": "fulfillExits", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" } - ], - "name": "getRoleAdmin", - "outputs": [ - { "internalType": "bytes32", "name": "", "type": "bytes32" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" }, - { "internalType": "address", "name": "account", "type": "address" } - ], - "name": "grantRole", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" }, - { "internalType": "address", "name": "account", "type": "address" } - ], - "name": "hasRole", - "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { "internalType": "uint256", "name": "", "type": "uint256" } - ], - "name": "nextSequence", - "outputs": [ - { "internalType": "uint256", "name": "", "type": "uint256" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "owner", - "outputs": [ - { "internalType": "address", "name": "", "type": "address" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "pendingDefaultAdmin", - "outputs": [ - { - "internalType": "address", - "name": "newAdmin", - "type": "address" - }, - { "internalType": "uint48", "name": "schedule", "type": "uint48" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "pendingDefaultAdminDelay", - "outputs": [ - { "internalType": "uint48", "name": "newDelay", "type": "uint48" }, - { "internalType": "uint48", "name": "schedule", "type": "uint48" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" }, - { "internalType": "address", "name": "account", "type": "address" } - ], - "name": "renounceRole", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" }, - { "internalType": "address", "name": "account", "type": "address" } - ], - "name": "revokeRole", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "rollbackDefaultAdminDelay", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "components": [ - { - "internalType": "uint256", - "name": "rollupChainId", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "sequence", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "confirmBy", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "gasLimit", - "type": "uint256" - }, - { - "internalType": "address", - "name": "rewardAddress", - "type": "address" - } - ], - "internalType": "struct CalldataZenith.BlockHeader", - "name": "header", - "type": "tuple" - }, - { "internalType": "bytes", "name": "blockData", "type": "bytes" }, - { "internalType": "uint8", "name": "v", "type": "uint8" }, - { "internalType": "bytes32", "name": "r", "type": "bytes32" }, - { "internalType": "bytes32", "name": "s", "type": "bytes32" } - ], - "name": "submitBlock", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes4", - "name": "interfaceId", - "type": "bytes4" - } - ], - "name": "supportsInterface", - "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], - "stateMutability": "view", - "type": "function" - }, - { "stateMutability": "payable", "type": "receive" } -] +{"abi":[{"type":"constructor","inputs":[{"name":"defaultRollupChainId","type":"uint256","internalType":"uint256"},{"name":"admin","type":"address","internalType":"address"}],"stateMutability":"nonpayable"},{"type":"fallback","stateMutability":"payable"},{"type":"receive","stateMutability":"payable"},{"type":"function","name":"DEFAULT_ADMIN_ROLE","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"SEQUENCER_ROLE","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"acceptDefaultAdminTransfer","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"beginDefaultAdminTransfer","inputs":[{"name":"newAdmin","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"blockCommitment","inputs":[{"name":"header","type":"tuple","internalType":"struct Zenith.BlockHeader","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"sequence","type":"uint256","internalType":"uint256"},{"name":"confirmBy","type":"uint256","internalType":"uint256"},{"name":"gasLimit","type":"uint256","internalType":"uint256"},{"name":"rewardAddress","type":"address","internalType":"address"}]},{"name":"blockDataHash","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"commit","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"cancelDefaultAdminTransfer","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"changeDefaultAdminDelay","inputs":[{"name":"newDelay","type":"uint48","internalType":"uint48"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"defaultAdmin","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"defaultAdminDelay","inputs":[],"outputs":[{"name":"","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"defaultAdminDelayIncreaseWait","inputs":[],"outputs":[{"name":"","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"enter","inputs":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"rollupRecipient","type":"address","internalType":"address"},{"name":"token","type":"address","internalType":"address"},{"name":"amount","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"enter","inputs":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"rollupRecipient","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"fulfillExits","inputs":[{"name":"orders","type":"tuple[]","internalType":"struct Passage.ExitOrder[]","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"token","type":"address","internalType":"address"},{"name":"recipient","type":"address","internalType":"address"},{"name":"amount","type":"uint256","internalType":"uint256"}]}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"getRoleAdmin","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"grantRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"hasRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"lastSubmittedAtBlock","inputs":[{"name":"","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"nextSequence","inputs":[{"name":"","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"owner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"pendingDefaultAdmin","inputs":[],"outputs":[{"name":"newAdmin","type":"address","internalType":"address"},{"name":"schedule","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"pendingDefaultAdminDelay","inputs":[],"outputs":[{"name":"newDelay","type":"uint48","internalType":"uint48"},{"name":"schedule","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"renounceRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"revokeRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"rollbackDefaultAdminDelay","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"submitBlock","inputs":[{"name":"header","type":"tuple","internalType":"struct Zenith.BlockHeader","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"sequence","type":"uint256","internalType":"uint256"},{"name":"confirmBy","type":"uint256","internalType":"uint256"},{"name":"gasLimit","type":"uint256","internalType":"uint256"},{"name":"rewardAddress","type":"address","internalType":"address"}]},{"name":"blockDataHash","type":"bytes32","internalType":"bytes32"},{"name":"v","type":"uint8","internalType":"uint8"},{"name":"r","type":"bytes32","internalType":"bytes32"},{"name":"s","type":"bytes32","internalType":"bytes32"},{"name":"blockData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"supportsInterface","inputs":[{"name":"interfaceId","type":"bytes4","internalType":"bytes4"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"event","name":"BlockData","inputs":[{"name":"blockData","type":"bytes","indexed":false,"internalType":"bytes"}],"anonymous":false},{"type":"event","name":"BlockSubmitted","inputs":[{"name":"sequencer","type":"address","indexed":true,"internalType":"address"},{"name":"header","type":"tuple","indexed":true,"internalType":"struct Zenith.BlockHeader","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"sequence","type":"uint256","internalType":"uint256"},{"name":"confirmBy","type":"uint256","internalType":"uint256"},{"name":"gasLimit","type":"uint256","internalType":"uint256"},{"name":"rewardAddress","type":"address","internalType":"address"}]},{"name":"blockDataHash","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"DefaultAdminDelayChangeCanceled","inputs":[],"anonymous":false},{"type":"event","name":"DefaultAdminDelayChangeScheduled","inputs":[{"name":"newDelay","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"effectSchedule","type":"uint48","indexed":false,"internalType":"uint48"}],"anonymous":false},{"type":"event","name":"DefaultAdminTransferCanceled","inputs":[],"anonymous":false},{"type":"event","name":"DefaultAdminTransferScheduled","inputs":[{"name":"newAdmin","type":"address","indexed":true,"internalType":"address"},{"name":"acceptSchedule","type":"uint48","indexed":false,"internalType":"uint48"}],"anonymous":false},{"type":"event","name":"Enter","inputs":[{"name":"rollupChainId","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"token","type":"address","indexed":true,"internalType":"address"},{"name":"rollupRecipient","type":"address","indexed":true,"internalType":"address"},{"name":"amount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"ExitFilled","inputs":[{"name":"rollupChainId","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"token","type":"address","indexed":true,"internalType":"address"},{"name":"hostRecipient","type":"address","indexed":true,"internalType":"address"},{"name":"amount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"RoleAdminChanged","inputs":[{"name":"role","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"previousAdminRole","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"newAdminRole","type":"bytes32","indexed":true,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"RoleGranted","inputs":[{"name":"role","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"account","type":"address","indexed":true,"internalType":"address"},{"name":"sender","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"RoleRevoked","inputs":[{"name":"role","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"account","type":"address","indexed":true,"internalType":"address"},{"name":"sender","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"error","name":"AccessControlBadConfirmation","inputs":[]},{"type":"error","name":"AccessControlEnforcedDefaultAdminDelay","inputs":[{"name":"schedule","type":"uint48","internalType":"uint48"}]},{"type":"error","name":"AccessControlEnforcedDefaultAdminRules","inputs":[]},{"type":"error","name":"AccessControlInvalidDefaultAdmin","inputs":[{"name":"defaultAdmin","type":"address","internalType":"address"}]},{"type":"error","name":"AccessControlUnauthorizedAccount","inputs":[{"name":"account","type":"address","internalType":"address"},{"name":"neededRole","type":"bytes32","internalType":"bytes32"}]},{"type":"error","name":"BadSequence","inputs":[{"name":"expected","type":"uint256","internalType":"uint256"}]},{"type":"error","name":"BadSignature","inputs":[{"name":"derivedSequencer","type":"address","internalType":"address"}]},{"type":"error","name":"BlockExpired","inputs":[]},{"type":"error","name":"OneRollupBlockPerHostBlock","inputs":[]},{"type":"error","name":"OrderExpired","inputs":[]},{"type":"error","name":"SafeCastOverflowedUintDowncast","inputs":[{"name":"bits","type":"uint8","internalType":"uint8"},{"name":"value","type":"uint256","internalType":"uint256"}]}],"bytecode":{"object":"0x60a060405234801561000f575f80fd5b50604051611a98380380611a9883398101604081905261002e916101ae565b608082905262015180816001600160a01b03811661006557604051636116401160e11b81525f600482015260240160405180910390fd5b600180546001600160d01b0316600160d01b65ffffffffffff85160217905561008e5f82610098565b50505050506101e8565b5f826100f4575f6100b16002546001600160a01b031690565b6001600160a01b0316146100d857604051631fe1e13d60e11b815260040160405180910390fd5b600280546001600160a01b0319166001600160a01b0384161790555b6100fe8383610107565b90505b92915050565b5f828152602081815260408083206001600160a01b038516845290915281205460ff166101a7575f838152602081815260408083206001600160a01b03861684529091529020805460ff1916600117905561015f3390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a4506001610101565b505f610101565b5f80604083850312156101bf575f80fd5b825160208401519092506001600160a01b03811681146101dd575f80fd5b809150509250929050565b6080516118916102075f395f818161018e01526101ba01526118915ff3fe608060405260043610610184575f3560e01c80637e82bb01116100d0578063c7bc4a6211610089578063cf6eefb711610063578063cf6eefb7146104b7578063d547741f146104f1578063d602b9fd14610510578063ea3b9ba114610524576101b5565b8063c7bc4a6214610470578063cc8463c81461048f578063cefc1429146104a3576101b5565b80637e82bb011461039b57806384ef8ffc146103c65780638da5cb5b146103f757806391d148541461040b578063a1eda53c1461042a578063a217fddf1461045d576101b5565b806336568abe1161013d5780634842855c116101175780634842855c1461031a578063634e93da1461033e578063649a5ec71461035d5780637e5692741461037c576101b5565b806336568abe146102d557806336702119146102f45780633805c6bd14610307576101b5565b806301ffc9a7146101df578063022d63fb146102135780630aa6220b1461023b5780631e6637201461024f578063248a9ca3146102885780632f2ff15d146102b6576101b5565b366101b5576101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b005b6101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b3480156101ea575f80fd5b506101fe6101f93660046114a8565b61057b565b60405190151581526020015b60405180910390f35b34801561021e575f80fd5b50620697805b60405165ffffffffffff909116815260200161020a565b348015610246575f80fd5b506101b36105a5565b34801561025a575f80fd5b5061027a6102693660046114cf565b60036020525f908152604090205481565b60405190815260200161020a565b348015610293575f80fd5b5061027a6102a23660046114cf565b5f9081526020819052604090206001015490565b3480156102c1575f80fd5b506101b36102d0366004611501565b6105ba565b3480156102e0575f80fd5b506101b36102ef366004611501565b6105e6565b6101b361030236600461152b565b610691565b6101b361031536600461159a565b610960565b348015610325575f80fd5b5061027a6d53455155454e4345525f524f4c4560901b81565b348015610349575f80fd5b506101b36103583660046115db565b610a31565b348015610368575f80fd5b506101b36103773660046115f4565b610a44565b348015610387575f80fd5b506101b361039636600461169a565b610a57565b3480156103a6575f80fd5b5061027a6103b53660046114cf565b60046020525f908152604090205481565b3480156103d1575f80fd5b506002546001600160a01b03165b6040516001600160a01b03909116815260200161020a565b348015610402575f80fd5b506103df610aa6565b348015610416575f80fd5b506101fe610425366004611501565b610abe565b348015610435575f80fd5b5061043e610ae6565b6040805165ffffffffffff93841681529290911660208301520161020a565b348015610468575f80fd5b5061027a5f81565b34801561047b575f80fd5b5061027a61048a36600461174d565b610b38565b34801561049a575f80fd5b50610224610bcd565b3480156104ae575f80fd5b506101b3610c2b565b3480156104c2575f80fd5b506104cb610c6a565b604080516001600160a01b03909316835265ffffffffffff90911660208301520161020a565b3480156104fc575f80fd5b506101b361050b366004611501565b610c8b565b34801561051b575f80fd5b506101b3610cb3565b6101b3610532366004611501565b604080518381523460208201526001600160a01b038316915f917fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f910160405180910390a35050565b5f6001600160e01b031982166318a4c3c360e11b148061059f575061059f82610cc5565b92915050565b5f6105af81610cf9565b6105b7610d03565b50565b816105d857604051631fe1e13d60e11b815260040160405180910390fd5b6105e28282610d0f565b5050565b8115801561060157506002546001600160a01b038281169116145b15610687575f80610610610c6a565b90925090506001600160a01b038216151580610632575065ffffffffffff8116155b8061064557504265ffffffffffff821610155b15610672576040516319ca5ebb60e01b815265ffffffffffff821660048201526024015b60405180910390fd5b50506001805465ffffffffffff60a01b191690555b6105e28282610d33565b345f5b8281101561095a575f8484838181106106af576106af611776565b90506080020160200160208101906106c791906115db565b6001600160a01b03160361077e578383828181106106e7576106e7611776565b90506080020160400160208101906106ff91906115db565b6001600160a01b03166108fc85858481811061071d5761071d611776565b9050608002016060013590811502906040515f60405180830381858888f1935050505015801561074f573d5f803e3d5ffd5b5083838281811061076257610762611776565b9050608002016060013582610777919061179e565b9150610875565b83838281811061079057610790611776565b90506080020160200160208101906107a891906115db565b6001600160a01b03166323b872dd338686858181106107c9576107c9611776565b90506080020160400160208101906107e191906115db565b8787868181106107f3576107f3611776565b6040516001600160e01b031960e088901b1681526001600160a01b039586166004820152949093166024850152506060608090920201013560448201526064016020604051808303815f875af115801561084f573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061087391906117b1565b505b83838281811061088757610887611776565b905060800201604001602081019061089f91906115db565b6001600160a01b03168484838181106108ba576108ba611776565b90506080020160200160208101906108d291906115db565b6001600160a01b03167fe93d7a771f81dc20f1d474f6868677269fdfa09830508e48edb0aa4d6569983386868581811061090e5761090e611776565b9050608002015f013587878681811061092957610929611776565b9050608002016060013560405161094a929190918252602082015260400190565b60405180910390a3600101610694565b50505050565b6040516323b872dd60e01b8152336004820152306024820152604481018290526001600160a01b038316906323b872dd906064016020604051808303815f875af11580156109b0573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906109d491906117b1565b50826001600160a01b0316826001600160a01b03167fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f8684604051610a23929190918252602082015260400190565b60405180910390a350505050565b5f610a3b81610cf9565b6105e282610d6b565b5f610a4e81610cf9565b6105e282610ddd565b610a648787878787610e4c565b7fc030727dea5440ebb1789967645e2595e4e67cf55821175a3f9f8b33aff41fa58282604051610a959291906117d0565b60405180910390a150505050505050565b5f610ab96002546001600160a01b031690565b905090565b5f918252602082815260408084206001600160a01b0393909316845291905290205460ff1690565b6002545f90600160d01b900465ffffffffffff168015158015610b1157504265ffffffffffff821610155b610b1c575f80610b30565b600254600160a01b900465ffffffffffff16815b915091509091565b81516020808401516060808601516040808801516080909801518151710696e6974342e73657175656e6365722e76360741b8188015246603282015260528101979097526072870194909452609286019190915260b28501969096521b6bffffffffffffffffffffffff191660d283015260e68083019390935283518083039093018352610106909101909252805191012090565b6002545f90600160d01b900465ffffffffffff168015158015610bf757504265ffffffffffff8216105b610c1257600154600160d01b900465ffffffffffff16610c25565b600254600160a01b900465ffffffffffff165b91505090565b5f610c34610c6a565b509050336001600160a01b03821614610c6257604051636116401160e11b8152336004820152602401610669565b6105b7611047565b6001546001600160a01b03811691600160a01b90910465ffffffffffff1690565b81610ca957604051631fe1e13d60e11b815260040160405180910390fd5b6105e282826110dd565b5f610cbd81610cf9565b6105b7611101565b5f6001600160e01b03198216637965db0b60e01b148061059f57506301ffc9a760e01b6001600160e01b031983161461059f565b6105b7813361110b565b610d0d5f80611144565b565b5f82815260208190526040902060010154610d2981610cf9565b61095a8383611203565b6001600160a01b0381163314610d5c5760405163334bd91960e11b815260040160405180910390fd5b610d668282611270565b505050565b5f610d74610bcd565b610d7d426112ac565b610d8791906117fe565b9050610d9382826112e2565b60405165ffffffffffff821681526001600160a01b038316907f3377dc44241e779dd06afab5b788a35ca5f3b778836e2990bdb26a2a4b2e5ed69060200160405180910390a25050565b5f610de78261135f565b610df0426112ac565b610dfa91906117fe565b9050610e068282611144565b6040805165ffffffffffff8085168252831660208201527ff1038c18cf84a56e432fdbfaf746924b7ea511dfe03a6506a0ceba4888788d9b910160405180910390a15050565b84515f90815260036020526040812080549082610e6883611824565b91905055905085602001518114610e9557604051635f64988d60e11b815260048101829052602401610669565b8560400151421115610eba576040516378fd448d60e01b815260040160405180910390fd5b5f610ec58787610b38565b604080515f8082526020820180845284905260ff89169282019290925260608101879052608081018690529192509060019060a0016020604051602081039080840390855afa158015610f1a573d5f803e3d5ffd5b505050602060405103519050610f416d53455155454e4345525f524f4c4560901b82610abe565b610f6957604051639a7d38d960e01b81526001600160a01b0382166004820152602401610669565b87515f90815260046020526040902054439003610f9957604051632ce0494b60e01b815260040160405180910390fd5b87515f908152600460205260409081902043905551610ff2908990815181526020808301519082015260408083015190820152606080830151908201526080918201516001600160a01b03169181019190915260a00190565b6040518091039020816001600160a01b03167f9c5702b5639f451bda4f9dba7fdf9d125a675ccddd315b81ce962d3ddd986a238960405161103591815260200190565b60405180910390a35050505050505050565b5f80611051610c6a565b915091506110668165ffffffffffff16151590565b158061107a57504265ffffffffffff821610155b156110a2576040516319ca5ebb60e01b815265ffffffffffff82166004820152602401610669565b6110bd5f6110b86002546001600160a01b031690565b611270565b506110c85f83611203565b5050600180546001600160d01b031916905550565b5f828152602081905260409020600101546110f781610cf9565b61095a8383611270565b610d0d5f806112e2565b6111158282610abe565b6105e25760405163e2517d3f60e01b81526001600160a01b038216600482015260248101839052604401610669565b600254600160d01b900465ffffffffffff1680156111c6574265ffffffffffff8216101561119d57600254600180546001600160d01b0316600160a01b90920465ffffffffffff16600160d01b029190911790556111c6565b6040517f2b1fa2edafe6f7b9e97c1a9e0c3660e645beb2dcaa2d45bdbf9beaf5472e1ec5905f90a15b50600280546001600160a01b0316600160a01b65ffffffffffff948516026001600160d01b031617600160d01b9290931691909102919091179055565b5f8261125f575f61121c6002546001600160a01b031690565b6001600160a01b03161461124357604051631fe1e13d60e11b815260040160405180910390fd5b600280546001600160a01b0319166001600160a01b0384161790555b61126983836113b0565b9392505050565b5f8215801561128c57506002546001600160a01b038381169116145b156112a257600280546001600160a01b03191690555b611269838361143f565b5f65ffffffffffff8211156112de576040516306dfcc6560e41b81526030600482015260248101839052604401610669565b5090565b5f6112eb610c6a565b6001805465ffffffffffff8616600160a01b026001600160d01b03199091166001600160a01b03881617179055915061132d90508165ffffffffffff16151590565b15610d66576040517f8886ebfc4259abdbc16601dd8fb5678e54878f47b3c34836cfc51154a9605109905f90a1505050565b5f80611369610bcd565b90508065ffffffffffff168365ffffffffffff16116113915761138c838261183c565b611269565b61126965ffffffffffff8416620697805f828218828410028218611269565b5f6113bb8383610abe565b611438575f838152602081815260408083206001600160a01b03861684529091529020805460ff191660011790556113f03390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a450600161059f565b505f61059f565b5f61144a8383610abe565b15611438575f838152602081815260408083206001600160a01b0386168085529252808320805460ff1916905551339286917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a450600161059f565b5f602082840312156114b8575f80fd5b81356001600160e01b031981168114611269575f80fd5b5f602082840312156114df575f80fd5b5035919050565b80356001600160a01b03811681146114fc575f80fd5b919050565b5f8060408385031215611512575f80fd5b82359150611522602084016114e6565b90509250929050565b5f806020838503121561153c575f80fd5b823567ffffffffffffffff80821115611553575f80fd5b818501915085601f830112611566575f80fd5b813581811115611574575f80fd5b8660208260071b8501011115611588575f80fd5b60209290920196919550909350505050565b5f805f80608085870312156115ad575f80fd5b843593506115bd602086016114e6565b92506115cb604086016114e6565b9396929550929360600135925050565b5f602082840312156115eb575f80fd5b611269826114e6565b5f60208284031215611604575f80fd5b813565ffffffffffff81168114611269575f80fd5b5f60a08284031215611629575f80fd5b60405160a0810181811067ffffffffffffffff8211171561165857634e487b7160e01b5f52604160045260245ffd5b80604052508091508235815260208301356020820152604083013560408201526060830135606082015261168e608084016114e6565b60808201525092915050565b5f805f805f805f610140888a0312156116b1575f80fd5b6116bb8989611619565b965060a0880135955060c088013560ff811681146116d7575f80fd5b945060e08801359350610100880135925061012088013567ffffffffffffffff80821115611703575f80fd5b818a0191508a601f830112611716575f80fd5b813581811115611724575f80fd5b8b6020828501011115611735575f80fd5b60208301945080935050505092959891949750929550565b5f8060c0838503121561175e575f80fd5b6117688484611619565b9460a0939093013593505050565b634e487b7160e01b5f52603260045260245ffd5b634e487b7160e01b5f52601160045260245ffd5b8181038181111561059f5761059f61178a565b5f602082840312156117c1575f80fd5b81518015158114611269575f80fd5b60208152816020820152818360408301375f818301604090810191909152601f909201601f19160101919050565b65ffffffffffff81811683821601908082111561181d5761181d61178a565b5092915050565b5f600182016118355761183561178a565b5060010190565b65ffffffffffff82811682821603908082111561181d5761181d61178a56fea2646970667358221220111de8e40c8e2761ed9ab04f385dfef1dffcd646c5a270f4fc3dc0858a0d605764736f6c63430008190033","sourceMap":"281:7248:35:-:0;;;3619:155;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;2256:44:34;;;;3753:6:35;3761:5;-1:-1:-1;;;;;2384:33:23;;2380:115;;2440:44;;-1:-1:-1;;;2440:44:23;;2481:1;2440:44;;;516:51:38;489:18;;2440:44:23;;;;;;;2380:115;2504:13;:28;;-1:-1:-1;;;;;2504:28:23;-1:-1:-1;;;2504:28:23;;;;;;;2542:51;-1:-1:-1;2573:19:23;2542:10;:51::i;:::-;;2308:292;;3619:155:35;;281:7248;;5509:370:23;5595:4;5615;5611:214;;5687:1;5661:14;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;5661:14;-1:-1:-1;;;;;5661:28:23;;5657:114;;5716:40;;-1:-1:-1;;;5716:40:23;;;;;;;;;;;5657:114;5784:20;:30;;-1:-1:-1;;;;;;5784:30:23;-1:-1:-1;;;;;5784:30:23;;;;;5611:214;5841:31;5858:4;5864:7;5841:16;:31::i;:::-;5834:38;;5509:370;;;;;:::o;6179:316:21:-;6256:4;2954:12;;;;;;;;;;;-1:-1:-1;;;;;2954:29:21;;;;;;;;;;;;6272:217;;6315:6;:12;;;;;;;;;;;-1:-1:-1;;;;;6315:29:21;;;;;;;;;:36;;-1:-1:-1;;6315:36:21;6347:4;6315:36;;;6397:12;735:10:27;;656:96;6397:12:21;-1:-1:-1;;;;;6370:40:21;6388:7;-1:-1:-1;;;;;6370:40:21;6382:4;6370:40;;;;;;;;;;-1:-1:-1;6431:4:21;6424:11;;6272:217;-1:-1:-1;6473:5:21;6466:12;;14:351:38;93:6;101;154:2;142:9;133:7;129:23;125:32;122:52;;;170:1;167;160:12;122:52;193:16;;252:2;237:18;;231:25;193:16;;-1:-1:-1;;;;;;285:31:38;;275:42;;265:70;;331:1;328;321:12;265:70;354:5;344:15;;;14:351;;;;;:::o;370:203::-;281:7248:35;;;;;;;;;;;;;;;;;","linkReferences":{}},"deployedBytecode":{"object":"0x608060405260043610610184575f3560e01c80637e82bb01116100d0578063c7bc4a6211610089578063cf6eefb711610063578063cf6eefb7146104b7578063d547741f146104f1578063d602b9fd14610510578063ea3b9ba114610524576101b5565b8063c7bc4a6214610470578063cc8463c81461048f578063cefc1429146104a3576101b5565b80637e82bb011461039b57806384ef8ffc146103c65780638da5cb5b146103f757806391d148541461040b578063a1eda53c1461042a578063a217fddf1461045d576101b5565b806336568abe1161013d5780634842855c116101175780634842855c1461031a578063634e93da1461033e578063649a5ec71461035d5780637e5692741461037c576101b5565b806336568abe146102d557806336702119146102f45780633805c6bd14610307576101b5565b806301ffc9a7146101df578063022d63fb146102135780630aa6220b1461023b5780631e6637201461024f578063248a9ca3146102885780632f2ff15d146102b6576101b5565b366101b5576101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b005b6101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b3480156101ea575f80fd5b506101fe6101f93660046114a8565b61057b565b60405190151581526020015b60405180910390f35b34801561021e575f80fd5b50620697805b60405165ffffffffffff909116815260200161020a565b348015610246575f80fd5b506101b36105a5565b34801561025a575f80fd5b5061027a6102693660046114cf565b60036020525f908152604090205481565b60405190815260200161020a565b348015610293575f80fd5b5061027a6102a23660046114cf565b5f9081526020819052604090206001015490565b3480156102c1575f80fd5b506101b36102d0366004611501565b6105ba565b3480156102e0575f80fd5b506101b36102ef366004611501565b6105e6565b6101b361030236600461152b565b610691565b6101b361031536600461159a565b610960565b348015610325575f80fd5b5061027a6d53455155454e4345525f524f4c4560901b81565b348015610349575f80fd5b506101b36103583660046115db565b610a31565b348015610368575f80fd5b506101b36103773660046115f4565b610a44565b348015610387575f80fd5b506101b361039636600461169a565b610a57565b3480156103a6575f80fd5b5061027a6103b53660046114cf565b60046020525f908152604090205481565b3480156103d1575f80fd5b506002546001600160a01b03165b6040516001600160a01b03909116815260200161020a565b348015610402575f80fd5b506103df610aa6565b348015610416575f80fd5b506101fe610425366004611501565b610abe565b348015610435575f80fd5b5061043e610ae6565b6040805165ffffffffffff93841681529290911660208301520161020a565b348015610468575f80fd5b5061027a5f81565b34801561047b575f80fd5b5061027a61048a36600461174d565b610b38565b34801561049a575f80fd5b50610224610bcd565b3480156104ae575f80fd5b506101b3610c2b565b3480156104c2575f80fd5b506104cb610c6a565b604080516001600160a01b03909316835265ffffffffffff90911660208301520161020a565b3480156104fc575f80fd5b506101b361050b366004611501565b610c8b565b34801561051b575f80fd5b506101b3610cb3565b6101b3610532366004611501565b604080518381523460208201526001600160a01b038316915f917fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f910160405180910390a35050565b5f6001600160e01b031982166318a4c3c360e11b148061059f575061059f82610cc5565b92915050565b5f6105af81610cf9565b6105b7610d03565b50565b816105d857604051631fe1e13d60e11b815260040160405180910390fd5b6105e28282610d0f565b5050565b8115801561060157506002546001600160a01b038281169116145b15610687575f80610610610c6a565b90925090506001600160a01b038216151580610632575065ffffffffffff8116155b8061064557504265ffffffffffff821610155b15610672576040516319ca5ebb60e01b815265ffffffffffff821660048201526024015b60405180910390fd5b50506001805465ffffffffffff60a01b191690555b6105e28282610d33565b345f5b8281101561095a575f8484838181106106af576106af611776565b90506080020160200160208101906106c791906115db565b6001600160a01b03160361077e578383828181106106e7576106e7611776565b90506080020160400160208101906106ff91906115db565b6001600160a01b03166108fc85858481811061071d5761071d611776565b9050608002016060013590811502906040515f60405180830381858888f1935050505015801561074f573d5f803e3d5ffd5b5083838281811061076257610762611776565b9050608002016060013582610777919061179e565b9150610875565b83838281811061079057610790611776565b90506080020160200160208101906107a891906115db565b6001600160a01b03166323b872dd338686858181106107c9576107c9611776565b90506080020160400160208101906107e191906115db565b8787868181106107f3576107f3611776565b6040516001600160e01b031960e088901b1681526001600160a01b039586166004820152949093166024850152506060608090920201013560448201526064016020604051808303815f875af115801561084f573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061087391906117b1565b505b83838281811061088757610887611776565b905060800201604001602081019061089f91906115db565b6001600160a01b03168484838181106108ba576108ba611776565b90506080020160200160208101906108d291906115db565b6001600160a01b03167fe93d7a771f81dc20f1d474f6868677269fdfa09830508e48edb0aa4d6569983386868581811061090e5761090e611776565b9050608002015f013587878681811061092957610929611776565b9050608002016060013560405161094a929190918252602082015260400190565b60405180910390a3600101610694565b50505050565b6040516323b872dd60e01b8152336004820152306024820152604481018290526001600160a01b038316906323b872dd906064016020604051808303815f875af11580156109b0573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906109d491906117b1565b50826001600160a01b0316826001600160a01b03167fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f8684604051610a23929190918252602082015260400190565b60405180910390a350505050565b5f610a3b81610cf9565b6105e282610d6b565b5f610a4e81610cf9565b6105e282610ddd565b610a648787878787610e4c565b7fc030727dea5440ebb1789967645e2595e4e67cf55821175a3f9f8b33aff41fa58282604051610a959291906117d0565b60405180910390a150505050505050565b5f610ab96002546001600160a01b031690565b905090565b5f918252602082815260408084206001600160a01b0393909316845291905290205460ff1690565b6002545f90600160d01b900465ffffffffffff168015158015610b1157504265ffffffffffff821610155b610b1c575f80610b30565b600254600160a01b900465ffffffffffff16815b915091509091565b81516020808401516060808601516040808801516080909801518151710696e6974342e73657175656e6365722e76360741b8188015246603282015260528101979097526072870194909452609286019190915260b28501969096521b6bffffffffffffffffffffffff191660d283015260e68083019390935283518083039093018352610106909101909252805191012090565b6002545f90600160d01b900465ffffffffffff168015158015610bf757504265ffffffffffff8216105b610c1257600154600160d01b900465ffffffffffff16610c25565b600254600160a01b900465ffffffffffff165b91505090565b5f610c34610c6a565b509050336001600160a01b03821614610c6257604051636116401160e11b8152336004820152602401610669565b6105b7611047565b6001546001600160a01b03811691600160a01b90910465ffffffffffff1690565b81610ca957604051631fe1e13d60e11b815260040160405180910390fd5b6105e282826110dd565b5f610cbd81610cf9565b6105b7611101565b5f6001600160e01b03198216637965db0b60e01b148061059f57506301ffc9a760e01b6001600160e01b031983161461059f565b6105b7813361110b565b610d0d5f80611144565b565b5f82815260208190526040902060010154610d2981610cf9565b61095a8383611203565b6001600160a01b0381163314610d5c5760405163334bd91960e11b815260040160405180910390fd5b610d668282611270565b505050565b5f610d74610bcd565b610d7d426112ac565b610d8791906117fe565b9050610d9382826112e2565b60405165ffffffffffff821681526001600160a01b038316907f3377dc44241e779dd06afab5b788a35ca5f3b778836e2990bdb26a2a4b2e5ed69060200160405180910390a25050565b5f610de78261135f565b610df0426112ac565b610dfa91906117fe565b9050610e068282611144565b6040805165ffffffffffff8085168252831660208201527ff1038c18cf84a56e432fdbfaf746924b7ea511dfe03a6506a0ceba4888788d9b910160405180910390a15050565b84515f90815260036020526040812080549082610e6883611824565b91905055905085602001518114610e9557604051635f64988d60e11b815260048101829052602401610669565b8560400151421115610eba576040516378fd448d60e01b815260040160405180910390fd5b5f610ec58787610b38565b604080515f8082526020820180845284905260ff89169282019290925260608101879052608081018690529192509060019060a0016020604051602081039080840390855afa158015610f1a573d5f803e3d5ffd5b505050602060405103519050610f416d53455155454e4345525f524f4c4560901b82610abe565b610f6957604051639a7d38d960e01b81526001600160a01b0382166004820152602401610669565b87515f90815260046020526040902054439003610f9957604051632ce0494b60e01b815260040160405180910390fd5b87515f908152600460205260409081902043905551610ff2908990815181526020808301519082015260408083015190820152606080830151908201526080918201516001600160a01b03169181019190915260a00190565b6040518091039020816001600160a01b03167f9c5702b5639f451bda4f9dba7fdf9d125a675ccddd315b81ce962d3ddd986a238960405161103591815260200190565b60405180910390a35050505050505050565b5f80611051610c6a565b915091506110668165ffffffffffff16151590565b158061107a57504265ffffffffffff821610155b156110a2576040516319ca5ebb60e01b815265ffffffffffff82166004820152602401610669565b6110bd5f6110b86002546001600160a01b031690565b611270565b506110c85f83611203565b5050600180546001600160d01b031916905550565b5f828152602081905260409020600101546110f781610cf9565b61095a8383611270565b610d0d5f806112e2565b6111158282610abe565b6105e25760405163e2517d3f60e01b81526001600160a01b038216600482015260248101839052604401610669565b600254600160d01b900465ffffffffffff1680156111c6574265ffffffffffff8216101561119d57600254600180546001600160d01b0316600160a01b90920465ffffffffffff16600160d01b029190911790556111c6565b6040517f2b1fa2edafe6f7b9e97c1a9e0c3660e645beb2dcaa2d45bdbf9beaf5472e1ec5905f90a15b50600280546001600160a01b0316600160a01b65ffffffffffff948516026001600160d01b031617600160d01b9290931691909102919091179055565b5f8261125f575f61121c6002546001600160a01b031690565b6001600160a01b03161461124357604051631fe1e13d60e11b815260040160405180910390fd5b600280546001600160a01b0319166001600160a01b0384161790555b61126983836113b0565b9392505050565b5f8215801561128c57506002546001600160a01b038381169116145b156112a257600280546001600160a01b03191690555b611269838361143f565b5f65ffffffffffff8211156112de576040516306dfcc6560e41b81526030600482015260248101839052604401610669565b5090565b5f6112eb610c6a565b6001805465ffffffffffff8616600160a01b026001600160d01b03199091166001600160a01b03881617179055915061132d90508165ffffffffffff16151590565b15610d66576040517f8886ebfc4259abdbc16601dd8fb5678e54878f47b3c34836cfc51154a9605109905f90a1505050565b5f80611369610bcd565b90508065ffffffffffff168365ffffffffffff16116113915761138c838261183c565b611269565b61126965ffffffffffff8416620697805f828218828410028218611269565b5f6113bb8383610abe565b611438575f838152602081815260408083206001600160a01b03861684529091529020805460ff191660011790556113f03390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a450600161059f565b505f61059f565b5f61144a8383610abe565b15611438575f838152602081815260408083206001600160a01b0386168085529252808320805460ff1916905551339286917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a450600161059f565b5f602082840312156114b8575f80fd5b81356001600160e01b031981168114611269575f80fd5b5f602082840312156114df575f80fd5b5035919050565b80356001600160a01b03811681146114fc575f80fd5b919050565b5f8060408385031215611512575f80fd5b82359150611522602084016114e6565b90509250929050565b5f806020838503121561153c575f80fd5b823567ffffffffffffffff80821115611553575f80fd5b818501915085601f830112611566575f80fd5b813581811115611574575f80fd5b8660208260071b8501011115611588575f80fd5b60209290920196919550909350505050565b5f805f80608085870312156115ad575f80fd5b843593506115bd602086016114e6565b92506115cb604086016114e6565b9396929550929360600135925050565b5f602082840312156115eb575f80fd5b611269826114e6565b5f60208284031215611604575f80fd5b813565ffffffffffff81168114611269575f80fd5b5f60a08284031215611629575f80fd5b60405160a0810181811067ffffffffffffffff8211171561165857634e487b7160e01b5f52604160045260245ffd5b80604052508091508235815260208301356020820152604083013560408201526060830135606082015261168e608084016114e6565b60808201525092915050565b5f805f805f805f610140888a0312156116b1575f80fd5b6116bb8989611619565b965060a0880135955060c088013560ff811681146116d7575f80fd5b945060e08801359350610100880135925061012088013567ffffffffffffffff80821115611703575f80fd5b818a0191508a601f830112611716575f80fd5b813581811115611724575f80fd5b8b6020828501011115611735575f80fd5b60208301945080935050505092959891949750929550565b5f8060c0838503121561175e575f80fd5b6117688484611619565b9460a0939093013593505050565b634e487b7160e01b5f52603260045260245ffd5b634e487b7160e01b5f52601160045260245ffd5b8181038181111561059f5761059f61178a565b5f602082840312156117c1575f80fd5b81518015158114611269575f80fd5b60208152816020820152818360408301375f818301604090810191909152601f909201601f19160101919050565b65ffffffffffff81811683821601908082111561181d5761181d61178a565b5092915050565b5f600182016118355761183561178a565b5060010190565b65ffffffffffff82811682821603908082111561181d5761181d61178a56fea2646970667358221220111de8e40c8e2761ed9ab04f385dfef1dffcd646c5a270f4fc3dc0858a0d605764736f6c63430008190033","sourceMap":"281:7248:35:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;2632:39:34;2638:20;2660:10;2632:5;:39::i;:::-;281:7248:35;;2447:39:34;2453:20;2475:10;2447:5;:39::i;2667:219:23:-;;;;;;;;;;-1:-1:-1;2667:219:23;;;;;:::i;:::-;;:::i;:::-;;;470:14:38;;463:22;445:41;;433:2;418:18;2667:219:23;;;;;;;;7766:108;;;;;;;;;;-1:-1:-1;7861:6:23;7766:108;;;671:14:38;659:27;;;641:46;;629:2;614:18;7766:108:23;497:196:38;10927:126:23;;;;;;;;;;;;;:::i;1478:47:35:-;;;;;;;;;;-1:-1:-1;1478:47:35;;;;;:::i;:::-;;;;;;;;;;;;;;;;;1029:25:38;;;1017:2;1002:18;1478:47:35;883:177:38;3810:120:21;;;;;;;;;;-1:-1:-1;3810:120:21;;;;;:::i;:::-;3875:7;3901:12;;;;;;;;;;:22;;;;3810:120;3198:265:23;;;;;;;;;;-1:-1:-1;3198:265:23;;;;;:::i;:::-;;:::i;4515:566::-;;;;;;;;;;-1:-1:-1;4515:566:23;;;;;:::i;:::-;;:::i;5794:881:34:-;;;;;;:::i;:::-;;:::i;3733:254::-;;;;;;:::i;:::-;;:::i;1256:66:35:-;;;;;;;;;;;;-1:-1:-1;;;1256:66:35;;8068:150:23;;;;;;;;;;-1:-1:-1;8068:150:23;;;;;:::i;:::-;;:::i;10296:145::-;;;;;;;;;;-1:-1:-1;10296:145:23;;;;;:::i;:::-;;:::i;5410:287:35:-;;;;;;;;;;-1:-1:-1;5410:287:35;;;;;:::i;:::-;;:::i;1708:55::-;;;;;;;;;;-1:-1:-1;1708:55:35;;;;;:::i;:::-;;;;;;;;;;;;;;6707:106:23;;;;;;;;;;-1:-1:-1;6786:20:23;;-1:-1:-1;;;;;6786:20:23;6707:106;;;-1:-1:-1;;;;;5436:32:38;;;5418:51;;5406:2;5391:18;6707:106:23;5272:203:38;2942:93:23;;;;;;;;;;;;;:::i;2854:136:21:-;;;;;;;;;;-1:-1:-1;2854:136:21;;;;;:::i;:::-;;:::i;7432:261:23:-;;;;;;;;;;;;;:::i;:::-;;;;5660:14:38;5701:15;;;5683:34;;5753:15;;;;5748:2;5733:18;;5726:43;5623:18;7432:261:23;5480:295:38;2187:49:21;;;;;;;;;;-1:-1:-1;2187:49:21;2232:4;2187:49;;7068:459:35;;;;;;;;;;-1:-1:-1;7068:459:35;;;;;:::i;:::-;;:::i;7130:229:23:-;;;;;;;;;;;;;:::i;9146:344::-;;;;;;;;;;;;;:::i;6886:171::-;;;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;;;6281:32:38;;;6263:51;;6362:14;6350:27;;;6345:2;6330:18;;6323:55;6236:18;6886:171:23;6091:293:38;3563:267:23;;;;;;;;;;-1:-1:-1;3563:267:23;;;;;:::i;:::-;;:::i;8706:128::-;;;;;;;;;;;;;:::i;3056:160:34:-;;;;;;:::i;:::-;3149:60;;;6822:25:38;;;3199:9:34;6878:2:38;6863:18;;6856:34;-1:-1:-1;;;;;3149:60:34;;;3178:1;;3149:60;;6795:18:38;3149:60:34;;;;;;;3056:160;;:::o;2667:219:23:-;2752:4;-1:-1:-1;;;;;;2775:64:23;;-1:-1:-1;;;2775:64:23;;:104;;;2843:36;2867:11;2843:23;:36::i;:::-;2768:111;2667:219;-1:-1:-1;;2667:219:23:o;10927:126::-;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;11018:28:23::1;:26;:28::i;:::-;10927:126:::0;:::o;3198:265::-;3317:4;3313:104;;3366:40;;-1:-1:-1;;;3366:40:23;;;;;;;;;;;3313:104;3426:30;3442:4;3448:7;3426:15;:30::i;:::-;3198:265;;:::o;4515:566::-;4637:26;;:55;;;;-1:-1:-1;6786:20:23;;-1:-1:-1;;;;;4667:25:23;;;6786:20;;4667:25;4637:55;4633:399;;;4709:23;4734:15;4753:21;:19;:21::i;:::-;4708:66;;-1:-1:-1;4708:66:23;-1:-1:-1;;;;;;4792:29:23;;;;;:58;;-1:-1:-1;14557:13:23;;;;4792:58;:91;;;-1:-1:-1;14785:15:23;14774:26;;;;4854:29;4792:91;4788:185;;;4910:48;;-1:-1:-1;;;4910:48:23;;671:14:38;659:27;;4910:48:23;;;641:46:38;614:18;;4910:48:23;;;;;;;;4788:185;-1:-1:-1;;4993:28:23;4986:35;;-1:-1:-1;;;;4986:35:23;;;4633:399;5041:33;5060:4;5066:7;5041:18;:33::i;5794:881:34:-;5895:9;5872:20;5914:755;5934:17;;;5914:755;;;6033:1;6006:6;;6013:1;6006:9;;;;;;;:::i;:::-;;;;;;:15;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6006:29:34;;6002:527;;6121:6;;6128:1;6121:9;;;;;;;:::i;:::-;;;;;;:19;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6113:37:34;:55;6151:6;;6158:1;6151:9;;;;;;;:::i;:::-;;;;;;:16;;;6113:55;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;6320:6;;6327:1;6320:9;;;;;;;:::i;:::-;;;;;;:16;;;6304:32;;;;;:::i;:::-;;;6002:527;;;6434:6;;6441:1;6434:9;;;;;;;:::i;:::-;;;;;;:15;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6427:36:34;;6464:10;6476:6;;6483:1;6476:9;;;;;;;:::i;:::-;;;;;;:19;;;;;;;;;;:::i;:::-;6497:6;;6504:1;6497:9;;;;;;;:::i;:::-;6427:87;;-1:-1:-1;;;;;;6427:87:34;;;;;;;-1:-1:-1;;;;;7556:15:38;;;6427:87:34;;;7538:34:38;7608:15;;;;7588:18;;;7581:43;-1:-1:-1;6497:16:34;:9;;;;;:16;;7640:18:38;;;7633:34;7473:18;;6427:87:34;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;6002:527;6620:6;;6627:1;6620:9;;;;;;;:::i;:::-;;;;;;:19;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6567:91:34;6603:6;;6610:1;6603:9;;;;;;;:::i;:::-;;;;;;:15;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6567:91:34;;6578:6;;6585:1;6578:9;;;;;;;:::i;:::-;;;;;;:23;;;6641:6;;6648:1;6641:9;;;;;;;:::i;:::-;;;;;;:16;;;6567:91;;;;;;6822:25:38;;;6878:2;6863:18;;6856:34;6810:2;6795:18;;6648:248;6567:91:34;;;;;;;;5953:3;;5914:755;;;;5862:813;5794:881;;:::o;3733:254::-;3852:61;;-1:-1:-1;;;3852:61:34;;3879:10;3852:61;;;7538:34:38;3899:4:34;7588:18:38;;;7581:43;7640:18;;;7633:34;;;-1:-1:-1;;;;;3852:26:34;;;;;7473:18:38;;3852:61:34;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;3956:15;-1:-1:-1;;;;;3928:52:34;3949:5;-1:-1:-1;;;;;3928:52:34;;3934:13;3973:6;3928:52;;;;;;6822:25:38;;;6878:2;6863:18;;6856:34;6810:2;6795:18;;6648:248;3928:52:34;;;;;;;;3733:254;;;;:::o;8068:150:23:-;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;8175:36:23::1;8202:8;8175:26;:36::i;10296:145::-:0;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;10400:34:23::1;10425:8;10400:24;:34::i;5410:287:35:-:0;5611:44;5624:6;5632:13;5647:1;5650;5653;5611:12;:44::i;:::-;5670:20;5680:9;;5670:20;;;;;;;:::i;:::-;;;;;;;;5410:287;;;;;;;:::o;2942:93:23:-;2988:7;3014:14;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;3014:14;3007:21;;2942:93;:::o;2854:136:21:-;2931:4;2954:12;;;;;;;;;;;-1:-1:-1;;;;;2954:29:21;;;;;;;;;;;;;;;2854:136::o;7432:261:23:-;7552:21;;7497:15;;-1:-1:-1;;;7552:21:23;;;;14557:13;;;7591:57;;;;-1:-1:-1;14785:15:23;14774:26;;;;7619:29;7591:57;7590:96;;7681:1;7684;7590:96;;;7653:13;;-1:-1:-1;;;7653:13:23;;;;7668:8;7590:96;7583:103;;;;7432:261;;:::o;7068:459:35:-;7304:20;;7338:15;;;;;7367;;;;;7396:16;;;;;7426:20;;;;;7213:270;;-1:-1:-1;;;7213:270:35;;;8751:33:38;7277:13:35;8800:12:38;;;8793:28;8837:12;;;8830:28;;;;8874:12;;;8867:28;;;;8911:13;;;8904:29;;;;8949:13;;;8942:29;;;;9006:15;-1:-1:-1;;9002:53:38;8987:13;;;8980:76;9072:13;;;;9065:29;;;;7213:270:35;;;;;;;;;;9110:13:38;;;;7213:270:35;;;7502:18;;;;;;7068:459::o;7130:229:23:-;7224:21;;7188:6;;-1:-1:-1;;;7224:21:23;;;;14557:13;;;7263:56;;;;-1:-1:-1;14785:15:23;14774:26;;;;7291:28;7262:90;;7339:13;;-1:-1:-1;;;7339:13:23;;;;7262:90;;;7323:13;;-1:-1:-1;;;7323:13:23;;;;7262:90;7255:97;;;7130:229;:::o;9146:344::-;9210:23;9239:21;:19;:21::i;:::-;-1:-1:-1;9209:51:23;-1:-1:-1;735:10:27;-1:-1:-1;;;;;9274:31:23;;;9270:175;;9388:46;;-1:-1:-1;;;9388:46:23;;735:10:27;9388:46:23;;;5418:51:38;5391:18;;9388:46:23;5272:203:38;9270:175:23;9454:29;:27;:29::i;6886:171::-;6999:20;;-1:-1:-1;;;;;6999:20:23;;;-1:-1:-1;;;7021:28:23;;;;;;6886:171::o;3563:267::-;3683:4;3679:104;;3732:40;;-1:-1:-1;;;3732:40:23;;;;;;;;;;;3679:104;3792:31;3809:4;3815:7;3792:16;:31::i;8706:128::-;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;8798:29:23::1;:27;:29::i;2565:202:21:-:0;2650:4;-1:-1:-1;;;;;;2673:47:21;;-1:-1:-1;;;2673:47:21;;:87;;-1:-1:-1;;;;;;;;;;862:40:29;;;2724:36:21;763:146:29;3199:103:21;3265:30;3276:4;735:10:27;3265::21;:30::i;11180:94:23:-;11245:22;11262:1;11265;11245:16;:22::i;:::-;11180:94::o;4226:136:21:-;3875:7;3901:12;;;;;;;;;;:22;;;2464:16;2475:4;2464:10;:16::i;:::-;4330:25:::1;4341:4;4347:7;4330:10;:25::i;5328:245::-:0;-1:-1:-1;;;;;5421:34:21;;735:10:27;5421:34:21;5417:102;;5478:30;;-1:-1:-1;;;5478:30:21;;;;;;;;;;;5417:102;5529:37;5541:4;5547:18;5529:11;:37::i;:::-;;5328:245;;:::o;8345:288:23:-;8426:18;8484:19;:17;:19::i;:::-;8447:34;8465:15;8447:17;:34::i;:::-;:56;;;;:::i;:::-;8426:77;;8513:46;8537:8;8547:11;8513:23;:46::i;:::-;8574:52;;671:14:38;659:27;;641:46;;-1:-1:-1;;;;;8574:52:23;;;;;629:2:38;614:18;8574:52:23;;;;;;;8416:217;8345:288;:::o;10566:::-;10644:18;10702:26;10719:8;10702:16;:26::i;:::-;10665:34;10683:15;10665:17;:34::i;:::-;:63;;;;:::i;:::-;10644:84;;10738:39;10755:8;10765:11;10738:16;:39::i;:::-;10792:55;;;5660:14:38;5701:15;;;5683:34;;5753:15;;5748:2;5733:18;;5726:43;10792:55:23;;5623:18:38;10792:55:23;;;;;;;10634:220;10566:288;:::o;5703:1152:35:-;5931:20;;5894:21;5918:34;;;:12;:34;;;;;:36;;;5894:21;5918:36;;;:::i;:::-;;;;;5894:60;;5985:6;:15;;;5968:13;:32;5964:71;;6009:26;;-1:-1:-1;;;6009:26:35;;;;;1029:25:38;;;1002:18;;6009:26:35;883:177:38;5964:71:35;6121:6;:16;;;6103:15;:34;6099:61;;;6146:14;;-1:-1:-1;;;6146:14:35;;;;;;;;;;;6099:61;6232:19;6254:38;6270:6;6278:13;6254:15;:38::i;:::-;6322:31;;;6302:17;6322:31;;;;;;;;;9682:25:38;;;9755:4;9743:17;;9723:18;;;9716:45;;;;9777:18;;;9770:34;;;9820:18;;;9813:34;;;6232:60:35;;-1:-1:-1;6302:17:35;6322:31;;9654:19:38;;6322:31:35;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;6302:51;;6440:34;-1:-1:-1;;;6464:9:35;6440:7;:34::i;:::-;6435:71;;6483:23;;-1:-1:-1;;;6483:23:35;;-1:-1:-1;;;;;5436:32:38;;6483:23:35;;;5418:51:38;5391:18;;6483:23:35;5272:203:38;6435:71:35;6621:20;;6600:42;;;;:20;:42;;;;;;6646:12;6600:58;;6596:99;;6667:28;;-1:-1:-1;;;6667:28:35;;;;;;;;;;;6596:99;6726:20;;6705:42;;;;:20;:42;;;;;;;6750:12;6705:57;;6800:48;;;6726:6;;10059:13:38;;10047:26;;10123:4;10111:17;;;10105:24;10089:14;;;10082:48;10178:2;10166:15;;;10160:22;10146:12;;;10139:44;10231:2;10219:15;;;10213:22;10199:12;;;10192:44;10289:3;10277:16;;;10271:23;-1:-1:-1;;;;;10267:49:38;10252:13;;;10245:72;;;;10304:3;10333:13;;9858:494;6800:48:35;;;;;;;;6815:9;-1:-1:-1;;;;;6800:48:35;;6834:13;6800:48;;;;1029:25:38;;1017:2;1002:18;;883:177;6800:48:35;;;;;;;;5815:1040;;;5703:1152;;;;;:::o;9618:474:23:-;9685:16;9703:15;9722:21;:19;:21::i;:::-;9684:59;;;;9758:24;9773:8;14557:13;;;;;14471:106;9758:24;9757:25;:58;;;-1:-1:-1;14785:15:23;14774:26;;;;9786:29;9757:58;9753:144;;;9838:48;;-1:-1:-1;;;9838:48:23;;671:14:38;659:27;;9838:48:23;;;641:46:38;614:18;;9838:48:23;497:196:38;9753:144:23;9906:47;2232:4:21;9938:14:23;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;9938:14;9906:11;:47::i;:::-;-1:-1:-1;9963:40:23;2232:4:21;9994:8:23;9963:10;:40::i;:::-;-1:-1:-1;;10020:20:23;10013:27;;-1:-1:-1;;;;;;10050:35:23;;;-1:-1:-1;9618:474:23:o;4642:138:21:-;3875:7;3901:12;;;;;;;;;;:22;;;2464:16;2475:4;2464:10;:16::i;:::-;4747:26:::1;4759:4;4765:7;4747:11;:26::i;8962:111:23:-:0;9028:38;9060:1;9064;9028:23;:38::i;3432:197:21:-;3520:22;3528:4;3534:7;3520;:22::i;:::-;3515:108;;3565:47;;-1:-1:-1;;;3565:47:21;;-1:-1:-1;;;;;10549:32:38;;3565:47:21;;;10531:51:38;10598:18;;;10591:34;;;10504:18;;3565:47:21;10357:274:38;13741:585:23;13843:21;;-1:-1:-1;;;13843:21:23;;;;14557:13;;13875:365;;14785:15;14774:26;;;;13922:308;;;14040:13;;14024;:29;;-1:-1:-1;;;;;14024:29:23;-1:-1:-1;;;14040:13:23;;;;;-1:-1:-1;;;14024:29:23;;;;;;;13922:308;;;14182:33;;;;;;;13922:308;-1:-1:-1;14250:13:23;:24;;-1:-1:-1;;;;;14284:35:23;-1:-1:-1;;;14250:24:23;;;;;-1:-1:-1;;;;;14284:35:23;;-1:-1:-1;;;14284:35:23;;;;;;;;;;;;;;13741:585::o;5509:370::-;5595:4;5615;5611:214;;5687:1;5661:14;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;5661:14;-1:-1:-1;;;;;5661:28:23;;5657:114;;5716:40;;-1:-1:-1;;;5716:40:23;;;;;;;;;;;5657:114;5784:20;:30;;-1:-1:-1;;;;;;5784:30:23;-1:-1:-1;;;;;5784:30:23;;;;;5611:214;5841:31;5858:4;5864:7;5841:16;:31::i;:::-;5834:38;5509:370;-1:-1:-1;;;5509:370:23:o;5946:271::-;6033:4;6053:26;;:55;;;;-1:-1:-1;6786:20:23;;-1:-1:-1;;;;;6083:25:23;;;6786:20;;6083:25;6053:55;6049:113;;;6131:20;6124:27;;-1:-1:-1;;;;;;6124:27:23;;;6049:113;6178:32;6196:4;6202:7;6178:17;:32::i;14296:213:32:-;14352:6;14382:16;14374:24;;14370:103;;;14421:41;;-1:-1:-1;;;14421:41:32;;14452:2;14421:41;;;10817:36:38;10869:18;;;10862:34;;;10790:18;;14421:41:32;10636:266:38;14370:103:32;-1:-1:-1;14496:5:32;14296:213::o;13062:525:23:-;13154:18;13176:21;:19;:21::i;:::-;13208:20;:31;;13249:42;;;-1:-1:-1;;;13249:42:23;-1:-1:-1;;;;;;13249:42:23;;;-1:-1:-1;;;;;13208:31:23;;13249:42;;;;13151:46;-1:-1:-1;13403:27:23;;-1:-1:-1;13151:46:23;14557:13;;;;;14471:106;13403:27;13399:182;;;13540:30;;;;;;;13141:446;13062:525;;:::o;11621:1249::-;11695:6;11713:19;11735;:17;:19::i;:::-;11713:41;;12684:12;12673:23;;:8;:23;;;:190;;12840:23;12855:8;12840:12;:23;:::i;:::-;12673:190;;;12722:51;;;;7861:6;3429:7:31;3066:5;;;3463;;;3065:36;3060:42;;3455:20;2825:294;6179:316:21;6256:4;6277:22;6285:4;6291:7;6277;:22::i;:::-;6272:217;;6315:6;:12;;;;;;;;;;;-1:-1:-1;;;;;6315:29:21;;;;;;;;;:36;;-1:-1:-1;;6315:36:21;6347:4;6315:36;;;6397:12;735:10:27;;656:96;6397:12:21;-1:-1:-1;;;;;6370:40:21;6388:7;-1:-1:-1;;;;;6370:40:21;6382:4;6370:40;;;;;;;;;;-1:-1:-1;6431:4:21;6424:11;;6272:217;-1:-1:-1;6473:5:21;6466:12;;6730:317;6808:4;6828:22;6836:4;6842:7;6828;:22::i;:::-;6824:217;;;6898:5;6866:12;;;;;;;;;;;-1:-1:-1;;;;;6866:29:21;;;;;;;;;;:37;;-1:-1:-1;;6866:37:21;;;6922:40;735:10:27;;6866:12:21;;6922:40;;6898:5;6922:40;-1:-1:-1;6983:4:21;6976:11;;14:286:38;72:6;125:2;113:9;104:7;100:23;96:32;93:52;;;141:1;138;131:12;93:52;167:23;;-1:-1:-1;;;;;;219:32:38;;209:43;;199:71;;266:1;263;256:12;698:180;757:6;810:2;798:9;789:7;785:23;781:32;778:52;;;826:1;823;816:12;778:52;-1:-1:-1;849:23:38;;698:180;-1:-1:-1;698:180:38:o;1432:173::-;1500:20;;-1:-1:-1;;;;;1549:31:38;;1539:42;;1529:70;;1595:1;1592;1585:12;1529:70;1432:173;;;:::o;1610:254::-;1678:6;1686;1739:2;1727:9;1718:7;1714:23;1710:32;1707:52;;;1755:1;1752;1745:12;1707:52;1791:9;1778:23;1768:33;;1820:38;1854:2;1843:9;1839:18;1820:38;:::i;:::-;1810:48;;1610:254;;;;;:::o;1869:645::-;1985:6;1993;2046:2;2034:9;2025:7;2021:23;2017:32;2014:52;;;2062:1;2059;2052:12;2014:52;2102:9;2089:23;2131:18;2172:2;2164:6;2161:14;2158:34;;;2188:1;2185;2178:12;2158:34;2226:6;2215:9;2211:22;2201:32;;2271:7;2264:4;2260:2;2256:13;2252:27;2242:55;;2293:1;2290;2283:12;2242:55;2333:2;2320:16;2359:2;2351:6;2348:14;2345:34;;;2375:1;2372;2365:12;2345:34;2428:7;2423:2;2413:6;2410:1;2406:14;2402:2;2398:23;2394:32;2391:45;2388:65;;;2449:1;2446;2439:12;2388:65;2480:2;2472:11;;;;;2502:6;;-1:-1:-1;1869:645:38;;-1:-1:-1;;;;1869:645:38:o;2519:397::-;2605:6;2613;2621;2629;2682:3;2670:9;2661:7;2657:23;2653:33;2650:53;;;2699:1;2696;2689:12;2650:53;2735:9;2722:23;2712:33;;2764:38;2798:2;2787:9;2783:18;2764:38;:::i;:::-;2754:48;;2821:38;2855:2;2844:9;2840:18;2821:38;:::i;:::-;2519:397;;;;-1:-1:-1;2811:48:38;;2906:2;2891:18;2878:32;;-1:-1:-1;;2519:397:38:o;2921:186::-;2980:6;3033:2;3021:9;3012:7;3008:23;3004:32;3001:52;;;3049:1;3046;3039:12;3001:52;3072:29;3091:9;3072:29;:::i;3112:280::-;3170:6;3223:2;3211:9;3202:7;3198:23;3194:32;3191:52;;;3239:1;3236;3229:12;3191:52;3278:9;3265:23;3328:14;3321:5;3317:26;3310:5;3307:37;3297:65;;3358:1;3355;3348:12;3397:779;3455:5;3503:4;3491:9;3486:3;3482:19;3478:30;3475:50;;;3521:1;3518;3511:12;3475:50;3554:2;3548:9;3596:4;3588:6;3584:17;3667:6;3655:10;3652:22;3631:18;3619:10;3616:34;3613:62;3610:185;;;3717:10;3712:3;3708:20;3705:1;3698:31;3752:4;3749:1;3742:15;3780:4;3777:1;3770:15;3610:185;3815:10;3811:2;3804:22;;3844:6;3835:15;;3887:9;3874:23;3866:6;3859:39;3959:2;3948:9;3944:18;3931:32;3926:2;3918:6;3914:15;3907:57;4025:2;4014:9;4010:18;3997:32;3992:2;3984:6;3980:15;3973:57;4091:2;4080:9;4076:18;4063:32;4058:2;4050:6;4046:15;4039:57;4130:39;4164:3;4153:9;4149:19;4130:39;:::i;:::-;4124:3;4116:6;4112:16;4105:65;;3397:779;;;;:::o;4181:1086::-;4324:6;4332;4340;4348;4356;4364;4372;4425:3;4413:9;4404:7;4400:23;4396:33;4393:53;;;4442:1;4439;4432:12;4393:53;4465:49;4506:7;4495:9;4465:49;:::i;:::-;4455:59;;4561:3;4550:9;4546:19;4533:33;4523:43;;4616:3;4605:9;4601:19;4588:33;4661:4;4654:5;4650:16;4643:5;4640:27;4630:55;;4681:1;4678;4671:12;4630:55;4704:5;-1:-1:-1;4756:3:38;4741:19;;4728:33;;-1:-1:-1;4808:3:38;4793:19;;4780:33;;-1:-1:-1;4864:3:38;4849:19;;4836:33;4888:18;4918:14;;;4915:34;;;4945:1;4942;4935:12;4915:34;4983:6;4972:9;4968:22;4958:32;;5028:7;5021:4;5017:2;5013:13;5009:27;4999:55;;5050:1;5047;5040:12;4999:55;5090:2;5077:16;5116:2;5108:6;5105:14;5102:34;;;5132:1;5129;5122:12;5102:34;5179:7;5172:4;5163:6;5159:2;5155:15;5151:26;5148:39;5145:59;;;5200:1;5197;5190:12;5145:59;5231:4;5227:2;5223:13;5213:23;;5255:6;5245:16;;;;;4181:1086;;;;;;;;;;:::o;5780:306::-;5878:6;5886;5939:3;5927:9;5918:7;5914:23;5910:33;5907:53;;;5956:1;5953;5946:12;5907:53;5979:49;6020:7;6009:9;5979:49;:::i;:::-;5969:59;6075:3;6060:19;;;;6047:33;;-1:-1:-1;;;5780:306:38:o;6901:127::-;6962:10;6957:3;6953:20;6950:1;6943:31;6993:4;6990:1;6983:15;7017:4;7014:1;7007:15;7033:127;7094:10;7089:3;7085:20;7082:1;7075:31;7125:4;7122:1;7115:15;7149:4;7146:1;7139:15;7165:128;7232:9;;;7253:11;;;7250:37;;;7267:18;;:::i;7678:277::-;7745:6;7798:2;7786:9;7777:7;7773:23;7769:32;7766:52;;;7814:1;7811;7804:12;7766:52;7846:9;7840:16;7899:5;7892:13;7885:21;7878:5;7875:32;7865:60;;7921:1;7918;7911:12;7960:388;8117:2;8106:9;8099:21;8156:6;8151:2;8140:9;8136:18;8129:34;8213:6;8205;8200:2;8189:9;8185:18;8172:48;8269:1;8240:22;;;8264:2;8236:31;;;8229:42;;;;8332:2;8311:15;;;-1:-1:-1;;8307:29:38;8292:45;8288:54;;7960:388;-1:-1:-1;7960:388:38:o;9134:176::-;9201:14;9235:10;;;9247;;;9231:27;;9270:11;;;9267:37;;;9284:18;;:::i;:::-;9267:37;9134:176;;;;:::o;9315:135::-;9354:3;9375:17;;;9372:43;;9395:18;;:::i;:::-;-1:-1:-1;9442:1:38;9431:13;;9315:135::o;10907:179::-;10975:14;11022:10;;;11010;;;11006:27;;11045:12;;;11042:38;;;11060:18;;:::i","linkReferences":{},"immutableReferences":{"49726":[{"start":398,"length":32},{"start":442,"length":32}]}},"methodIdentifiers":{"DEFAULT_ADMIN_ROLE()":"a217fddf","SEQUENCER_ROLE()":"4842855c","acceptDefaultAdminTransfer()":"cefc1429","beginDefaultAdminTransfer(address)":"634e93da","blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)":"c7bc4a62","cancelDefaultAdminTransfer()":"d602b9fd","changeDefaultAdminDelay(uint48)":"649a5ec7","defaultAdmin()":"84ef8ffc","defaultAdminDelay()":"cc8463c8","defaultAdminDelayIncreaseWait()":"022d63fb","enter(uint256,address)":"ea3b9ba1","enter(uint256,address,address,uint256)":"3805c6bd","fulfillExits((uint256,address,address,uint256)[])":"36702119","getRoleAdmin(bytes32)":"248a9ca3","grantRole(bytes32,address)":"2f2ff15d","hasRole(bytes32,address)":"91d14854","lastSubmittedAtBlock(uint256)":"7e82bb01","nextSequence(uint256)":"1e663720","owner()":"8da5cb5b","pendingDefaultAdmin()":"cf6eefb7","pendingDefaultAdminDelay()":"a1eda53c","renounceRole(bytes32,address)":"36568abe","revokeRole(bytes32,address)":"d547741f","rollbackDefaultAdminDelay()":"0aa6220b","submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)":"7e569274","supportsInterface(bytes4)":"01ffc9a7"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.25+commit.b61c2a91\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"defaultRollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AccessControlBadConfirmation\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint48\",\"name\":\"schedule\",\"type\":\"uint48\"}],\"name\":\"AccessControlEnforcedDefaultAdminDelay\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"AccessControlEnforcedDefaultAdminRules\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"defaultAdmin\",\"type\":\"address\"}],\"name\":\"AccessControlInvalidDefaultAdmin\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"neededRole\",\"type\":\"bytes32\"}],\"name\":\"AccessControlUnauthorizedAccount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"expected\",\"type\":\"uint256\"}],\"name\":\"BadSequence\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"derivedSequencer\",\"type\":\"address\"}],\"name\":\"BadSignature\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"BlockExpired\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OneRollupBlockPerHostBlock\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OrderExpired\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"bits\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"SafeCastOverflowedUintDowncast\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"blockData\",\"type\":\"bytes\"}],\"name\":\"BlockData\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sequencer\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"sequence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confirmBy\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rewardAddress\",\"type\":\"address\"}],\"indexed\":true,\"internalType\":\"struct Zenith.BlockHeader\",\"name\":\"header\",\"type\":\"tuple\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"blockDataHash\",\"type\":\"bytes32\"}],\"name\":\"BlockSubmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"DefaultAdminDelayChangeCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint48\",\"name\":\"newDelay\",\"type\":\"uint48\"},{\"indexed\":false,\"internalType\":\"uint48\",\"name\":\"effectSchedule\",\"type\":\"uint48\"}],\"name\":\"DefaultAdminDelayChangeScheduled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"DefaultAdminTransferCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint48\",\"name\":\"acceptSchedule\",\"type\":\"uint48\"}],\"name\":\"DefaultAdminTransferScheduled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"rollupRecipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"Enter\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"hostRecipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"ExitFilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"previousAdminRole\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"newAdminRole\",\"type\":\"bytes32\"}],\"name\":\"RoleAdminChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RoleGranted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RoleRevoked\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"DEFAULT_ADMIN_ROLE\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SEQUENCER_ROLE\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptDefaultAdminTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"beginDefaultAdminTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"sequence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confirmBy\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rewardAddress\",\"type\":\"address\"}],\"internalType\":\"struct Zenith.BlockHeader\",\"name\":\"header\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"blockDataHash\",\"type\":\"bytes32\"}],\"name\":\"blockCommitment\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"commit\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"cancelDefaultAdminTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint48\",\"name\":\"newDelay\",\"type\":\"uint48\"}],\"name\":\"changeDefaultAdminDelay\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"defaultAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"defaultAdminDelay\",\"outputs\":[{\"internalType\":\"uint48\",\"name\":\"\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"defaultAdminDelayIncreaseWait\",\"outputs\":[{\"internalType\":\"uint48\",\"name\":\"\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rollupRecipient\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"enter\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rollupRecipient\",\"type\":\"address\"}],\"name\":\"enter\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"internalType\":\"struct Passage.ExitOrder[]\",\"name\":\"orders\",\"type\":\"tuple[]\"}],\"name\":\"fulfillExits\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"}],\"name\":\"getRoleAdmin\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"grantRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"hasRole\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"lastSubmittedAtBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"nextSequence\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingDefaultAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"},{\"internalType\":\"uint48\",\"name\":\"schedule\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingDefaultAdminDelay\",\"outputs\":[{\"internalType\":\"uint48\",\"name\":\"newDelay\",\"type\":\"uint48\"},{\"internalType\":\"uint48\",\"name\":\"schedule\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"renounceRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"revokeRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"rollbackDefaultAdminDelay\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"sequence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confirmBy\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rewardAddress\",\"type\":\"address\"}],\"internalType\":\"struct Zenith.BlockHeader\",\"name\":\"header\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"blockDataHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint8\",\"name\":\"v\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"r\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"s\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"blockData\",\"type\":\"bytes\"}],\"name\":\"submitBlock\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}],\"devdoc\":{\"errors\":{\"AccessControlBadConfirmation()\":[{\"details\":\"The caller of a function is not the expected one. NOTE: Don't confuse with {AccessControlUnauthorizedAccount}.\"}],\"AccessControlEnforcedDefaultAdminDelay(uint48)\":[{\"details\":\"The delay for transferring the default admin delay is enforced and the operation must wait until `schedule`. NOTE: `schedule` can be 0 indicating there's no transfer scheduled.\"}],\"AccessControlEnforcedDefaultAdminRules()\":[{\"details\":\"At least one of the following rules was violated: - The `DEFAULT_ADMIN_ROLE` must only be managed by itself. - The `DEFAULT_ADMIN_ROLE` must only be held by one account at the time. - Any `DEFAULT_ADMIN_ROLE` transfer must be in two delayed steps.\"}],\"AccessControlInvalidDefaultAdmin(address)\":[{\"details\":\"The new default admin is not a valid default admin.\"}],\"AccessControlUnauthorizedAccount(address,bytes32)\":[{\"details\":\"The `account` is missing a role.\"}],\"BadSequence(uint256)\":[{\"details\":\"Blocks must be submitted in strict monotonic increasing order.\",\"params\":{\"expected\":\"- the correct next sequence number for the given rollup chainId.\"}}],\"BadSignature(address)\":[{\"params\":{\"derivedSequencer\":\"- the derived signer of the block data that is not a permissioned sequencer.\"}}],\"SafeCastOverflowedUintDowncast(uint8,uint256)\":[{\"details\":\"Value doesn't fit in an uint of `bits` size.\"}]},\"events\":{\"BlockSubmitted(address,(uint256,uint256,uint256,uint256,address),bytes32)\":{\"params\":{\"blockDataHash\":\"- keccak256(blockData). the Node will discard the block if the hash doens't match.\",\"header\":\"- the block header information for the block.\",\"sequencer\":\"- the address of the sequencer that signed the block.\"}},\"DefaultAdminDelayChangeCanceled()\":{\"details\":\"Emitted when a {pendingDefaultAdminDelay} is reset if its schedule didn't pass.\"},\"DefaultAdminDelayChangeScheduled(uint48,uint48)\":{\"details\":\"Emitted when a {defaultAdminDelay} change is started, setting `newDelay` as the next delay to be applied between default admin transfer after `effectSchedule` has passed.\"},\"DefaultAdminTransferCanceled()\":{\"details\":\"Emitted when a {pendingDefaultAdmin} is reset if it was never accepted, regardless of its schedule.\"},\"DefaultAdminTransferScheduled(address,uint48)\":{\"details\":\"Emitted when a {defaultAdmin} transfer is started, setting `newAdmin` as the next address to become the {defaultAdmin} by calling {acceptDefaultAdminTransfer} only after `acceptSchedule` passes.\"},\"Enter(uint256,address,address,uint256)\":{\"params\":{\"amount\":\"- The amount of the token entering the rollup.\",\"rollupRecipient\":\"- The recipient of the token on the rollup.\",\"token\":\"- The address of the token entering the rollup.\"}},\"ExitFilled(uint256,address,address,uint256)\":{\"params\":{\"amount\":\"- The amount of the token transferred to the recipient.\",\"hostRecipient\":\"- The recipient of the token on host.\",\"token\":\"- The address of the token transferred to the recipient.\"}},\"RoleAdminChanged(bytes32,bytes32,bytes32)\":{\"details\":\"Emitted when `newAdminRole` is set as ``role``'s admin role, replacing `previousAdminRole` `DEFAULT_ADMIN_ROLE` is the starting admin for all roles, despite {RoleAdminChanged} not being emitted signaling this.\"},\"RoleGranted(bytes32,address,address)\":{\"details\":\"Emitted when `account` is granted `role`. `sender` is the account that originated the contract call. This account bears the admin role (for the granted role). Expected in cases where the role was granted using the internal {AccessControl-_grantRole}.\"},\"RoleRevoked(bytes32,address,address)\":{\"details\":\"Emitted when `account` is revoked `role`. `sender` is the account that originated the contract call: - if using `revokeRole`, it is the admin role bearer - if using `renounceRole`, it is the role bearer (i.e. `account`)\"}},\"kind\":\"dev\",\"methods\":{\"acceptDefaultAdminTransfer()\":{\"details\":\"Completes a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. After calling the function: - `DEFAULT_ADMIN_ROLE` should be granted to the caller. - `DEFAULT_ADMIN_ROLE` should be revoked from the previous holder. - {pendingDefaultAdmin} should be reset to zero values. Requirements: - Only can be called by the {pendingDefaultAdmin}'s `newAdmin`. - The {pendingDefaultAdmin}'s `acceptSchedule` should've passed.\"},\"beginDefaultAdminTransfer(address)\":{\"details\":\"Starts a {defaultAdmin} transfer by setting a {pendingDefaultAdmin} scheduled for acceptance after the current timestamp plus a {defaultAdminDelay}. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminRoleChangeStarted event.\"},\"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)\":{\"params\":{\"header\":\"- the header information for the rollup block.\"},\"returns\":{\"commit\":\"- the hash of the encoded block details.\"}},\"cancelDefaultAdminTransfer()\":{\"details\":\"Cancels a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. A {pendingDefaultAdmin} not yet accepted can also be cancelled with this function. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminTransferCanceled event.\"},\"changeDefaultAdminDelay(uint48)\":{\"details\":\"Initiates a {defaultAdminDelay} update by setting a {pendingDefaultAdminDelay} scheduled for getting into effect after the current timestamp plus a {defaultAdminDelay}. This function guarantees that any call to {beginDefaultAdminTransfer} done between the timestamp this method is called and the {pendingDefaultAdminDelay} effect schedule will use the current {defaultAdminDelay} set before calling. The {pendingDefaultAdminDelay}'s effect schedule is defined in a way that waiting until the schedule and then calling {beginDefaultAdminTransfer} with the new delay will take at least the same as another {defaultAdmin} complete transfer (including acceptance). The schedule is designed for two scenarios: - When the delay is changed for a larger one the schedule is `block.timestamp + newDelay` capped by {defaultAdminDelayIncreaseWait}. - When the delay is changed for a shorter one, the schedule is `block.timestamp + (current delay - new delay)`. A {pendingDefaultAdminDelay} that never got into effect will be canceled in favor of a new scheduled change. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminDelayChangeScheduled event and may emit a DefaultAdminDelayChangeCanceled event.\"},\"constructor\":{\"details\":\"See `AccessControlDefaultAdminRules` for information on contract administration. - Admin role can grant and revoke Sequencer roles. - Admin role can be transferred via two-step process with a 1 day timelock.\",\"params\":{\"admin\":\"- the address that will be the initial admin.\"}},\"defaultAdmin()\":{\"details\":\"Returns the address of the current `DEFAULT_ADMIN_ROLE` holder.\"},\"defaultAdminDelay()\":{\"details\":\"Returns the delay required to schedule the acceptance of a {defaultAdmin} transfer started. This delay will be added to the current timestamp when calling {beginDefaultAdminTransfer} to set the acceptance schedule. NOTE: If a delay change has been scheduled, it will take effect as soon as the schedule passes, making this function returns the new delay. See {changeDefaultAdminDelay}.\"},\"defaultAdminDelayIncreaseWait()\":{\"details\":\"Maximum time in seconds for an increase to {defaultAdminDelay} (that is scheduled using {changeDefaultAdminDelay}) to take effect. Default to 5 days. When the {defaultAdminDelay} is scheduled to be increased, it goes into effect after the new delay has passed with the purpose of giving enough time for reverting any accidental change (i.e. using milliseconds instead of seconds) that may lock the contract. However, to avoid excessive schedules, the wait is capped by this function and it can be overrode for a custom {defaultAdminDelay} increase scheduling. IMPORTANT: Make sure to add a reasonable amount of time while overriding this value, otherwise, there's a risk of setting a high new delay that goes into effect almost immediately without the possibility of human intervention in the case of an input error (eg. set milliseconds instead of seconds).\"},\"enter(uint256,address)\":{\"custom:emits\":\"Enter indicating the amount of Ether to mint on the rollup & its recipient.\",\"details\":\"Permanently burns the entire msg.value by locking it in this contract.\",\"params\":{\"rollupChainId\":\"- The rollup chain to enter.\",\"rollupRecipient\":\"- The recipient of the Ether on the rollup.\"}},\"enter(uint256,address,address,uint256)\":{\"custom:emits\":\"Enter indicating the amount of tokens to mint on the rollup & its recipient.\",\"details\":\"Permanently burns the token amount by locking it in this contract.\",\"params\":{\"amount\":\"- The amount of the ERC20 token to transfer to the rollup.\",\"rollupChainId\":\"- The rollup chain to enter.\",\"rollupRecipient\":\"- The recipient of the Ether on the rollup.\",\"token\":\"- The address of the ERC20 token on the Host.\"}},\"fulfillExits((uint256,address,address,uint256)[])\":{\"custom:emits\":\"ExitFilled for each exit order fulfilled.\",\"details\":\"Builder SHOULD call `filfillExits` atomically with `submitBlock`. Builder SHOULD set a block expiration time that is AT MOST the minimum of all exit order deadlines; this way, `fulfillExits` + `submitBlock` will revert atomically on mainnet if any exit orders have expired. Otherwise, `filfillExits` may mine on mainnet, while `submitExit` reverts on the rollup, and the Builder can't collect the corresponding value on the rollup.Called by the Builder atomically with a transaction calling `submitBlock`. The user-submitted transactions initiating the ExitOrders on the rollup must be included by the Builder in the rollup block submitted via `submitBlock`.The user transfers tokenIn on the rollup, and receives tokenOut on host.The Builder receives tokenIn on the rollup, and transfers tokenOut to the user on host.The rollup STF MUST NOT apply `submitExit` transactions to the rollup state UNLESS a corresponding ExitFilled event is emitted on host in the same block.If the user submits multiple exit transactions for the same token in the same rollup block, the Builder may transfer the cumulative tokenOut to the user in a single ExitFilled event. The rollup STF will apply the user's exit transactions on the rollup up to the point that sum(tokenOut) is lte the ExitFilled amount. TODO: add option to fulfill ExitOrders with native ETH? or is it sufficient to only allow users to exit via WETH?\",\"params\":{\"orders\":\"The exit orders to fulfill\"}},\"getRoleAdmin(bytes32)\":{\"details\":\"Returns the admin role that controls `role`. See {grantRole} and {revokeRole}. To change a role's admin, use {_setRoleAdmin}.\"},\"grantRole(bytes32,address)\":{\"details\":\"See {AccessControl-grantRole}. Reverts for `DEFAULT_ADMIN_ROLE`.\"},\"hasRole(bytes32,address)\":{\"details\":\"Returns `true` if `account` has been granted `role`.\"},\"owner()\":{\"details\":\"See {IERC5313-owner}.\"},\"pendingDefaultAdmin()\":{\"details\":\"Returns a tuple of a `newAdmin` and an accept schedule. After the `schedule` passes, the `newAdmin` will be able to accept the {defaultAdmin} role by calling {acceptDefaultAdminTransfer}, completing the role transfer. A zero value only in `acceptSchedule` indicates no pending admin transfer. NOTE: A zero address `newAdmin` means that {defaultAdmin} is being renounced.\"},\"pendingDefaultAdminDelay()\":{\"details\":\"Returns a tuple of `newDelay` and an effect schedule. After the `schedule` passes, the `newDelay` will get into effect immediately for every new {defaultAdmin} transfer started with {beginDefaultAdminTransfer}. A zero value only in `effectSchedule` indicates no pending delay change. NOTE: A zero value only for `newDelay` means that the next {defaultAdminDelay} will be zero after the effect schedule.\"},\"renounceRole(bytes32,address)\":{\"details\":\"See {AccessControl-renounceRole}. For the `DEFAULT_ADMIN_ROLE`, it only allows renouncing in two steps by first calling {beginDefaultAdminTransfer} to the `address(0)`, so it's required that the {pendingDefaultAdmin} schedule has also passed when calling this function. After its execution, it will not be possible to call `onlyRole(DEFAULT_ADMIN_ROLE)` functions. NOTE: Renouncing `DEFAULT_ADMIN_ROLE` will leave the contract without a {defaultAdmin}, thereby disabling any functionality that is only available for it, and the possibility of reassigning a non-administrated role.\"},\"revokeRole(bytes32,address)\":{\"details\":\"See {AccessControl-revokeRole}. Reverts for `DEFAULT_ADMIN_ROLE`.\"},\"rollbackDefaultAdminDelay()\":{\"details\":\"Cancels a scheduled {defaultAdminDelay} change. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminDelayChangeCanceled event.\"},\"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)\":{\"custom:emits\":\"BlockSubmitted if the block is successfully submitted.BlockData to expose the block calldata; as a convenience until calldata tracing is implemented in the Node.\",\"custom:reverts\":\"BadSequence if the sequence number is not the next block for the given rollup chainId.BlockExpired if the confirmBy time has passed.BadSignature if the signer is not a permissioned sequencer, OR if the signature provided commits to a different header.OneRollupBlockPerHostBlock if attempting to submit a second rollup block within one host block.\",\"details\":\"Blocks are submitted by Builders, with an attestation to the block data signed by a Sequencer.including blockDataHash allows the sequencer to sign over finalized block data, without needing to calldatacopy the `blockData` param.\",\"params\":{\"blockData\":\"- block data information. could be packed blob hashes, or direct rlp-encoded transctions. blockData is ignored by the contract logic.\",\"blockDataHash\":\"- keccak256(blockData). the Node will discard the block if the hash doens't match.\",\"header\":\"- the header information for the rollup block.\",\"r\":\"- the r component of the Sequencer's ECSDA signature over the block header.\",\"s\":\"- the s component of the Sequencer's ECSDA signature over the block header.\",\"v\":\"- the v component of the Sequencer's ECSDA signature over the block header.\"}},\"supportsInterface(bytes4)\":{\"details\":\"See {IERC165-supportsInterface}.\"}},\"version\":1},\"userdoc\":{\"errors\":{\"BadSequence(uint256)\":[{\"notice\":\"Thrown when a block submission is attempted with a sequence number that is not the next block for the rollup chainId.\"}],\"BadSignature(address)\":[{\"notice\":\"Thrown when a block submission is attempted with a signature by a non-permissioned sequencer, OR when signature is produced over different data than is provided.\"}],\"BlockExpired()\":[{\"notice\":\"Thrown when a block submission is attempted when the confirmBy time has passed.\"}],\"OneRollupBlockPerHostBlock()\":[{\"notice\":\"Thrown when attempting to submit more than one rollup block per host block\"}],\"OrderExpired()\":[{\"notice\":\"Thrown when attempting to fulfill an exit order with a deadline that has passed.\"}]},\"events\":{\"BlockData(bytes)\":{\"notice\":\"Emit the entire block data for easy visibility\"},\"BlockSubmitted(address,(uint256,uint256,uint256,uint256,address),bytes32)\":{\"notice\":\"Emitted when a new rollup block is successfully submitted.\"},\"Enter(uint256,address,address,uint256)\":{\"notice\":\"Emitted when tokens enter the rollup.\"},\"ExitFilled(uint256,address,address,uint256)\":{\"notice\":\"Emitted when an exit order is fulfilled by the Builder.\"}},\"kind\":\"user\",\"methods\":{\"SEQUENCER_ROLE()\":{\"notice\":\"Role that allows a key to sign commitments to rollup blocks.\"},\"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)\":{\"notice\":\"Construct hash of block details that the sequencer signs.\"},\"constructor\":{\"notice\":\"Initializes the Admin role.\"},\"enter(uint256,address)\":{\"notice\":\"Allows native Ether to enter the rollup.\"},\"enter(uint256,address,address,uint256)\":{\"notice\":\"Allows ERC20s to enter the rollup.\"},\"fulfillExits((uint256,address,address,uint256)[])\":{\"notice\":\"Fulfills exit orders by transferring tokenOut to the recipient\"},\"lastSubmittedAtBlock(uint256)\":{\"notice\":\"The host block number that a block was last submitted at for a given rollup chainId. rollupChainId => host blockNumber that block was last submitted at\"},\"nextSequence(uint256)\":{\"notice\":\"The sequence number of the next block that can be submitted for a given rollup chainId. rollupChainId => nextSequence number\"},\"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)\":{\"notice\":\"Submit a rollup block with block data submitted via calldata.\"}},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/Zenith.sol\":\"Zenith\"},\"evmVersion\":\"cancun\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[\":@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/\",\":ds-test/=lib/forge-std/lib/ds-test/src/\",\":erc4626-tests/=lib/openzeppelin-contracts/lib/erc4626-tests/\",\":forge-std/=lib/forge-std/src/\",\":openzeppelin-contracts/=lib/openzeppelin-contracts/\"]},\"sources\":{\"lib/openzeppelin-contracts/contracts/access/AccessControl.sol\":{\"keccak256\":\"0xa0e92d42942f4f57c5be50568dac11e9d00c93efcb458026e18d2d9b9b2e7308\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://46326c0bb1e296b67185e81c918e0b40501b8b6386165855df0a3f3c634b6a80\",\"dweb:/ipfs/QmTwyrDYtsxsk6pymJTK94PnEpzsmkpUxFuzEiakDopy4Z\"]},\"lib/openzeppelin-contracts/contracts/access/IAccessControl.sol\":{\"keccak256\":\"0xc503b1464e90b1cf79d81239f719f81c35ff646b17b638c87fe87a1d7bc5d94d\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://381076837654e98f1d5dfc3909a3ebb80e2c86a97d662b507320701e09cb7a60\",\"dweb:/ipfs/QmWGwdWe9JWx2ae3n8EhWuY6ipWo6shVg9bct6y5og7v9Y\"]},\"lib/openzeppelin-contracts/contracts/access/extensions/AccessControlDefaultAdminRules.sol\":{\"keccak256\":\"0xd5e43578dce2678fbd458e1221dc37b20e983ecce4a314b422704f07d6015c5b\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://9ea4d9ae3392dc9db1ef4d7ebef84ce7fa243dc14abb46e68eb2eb60d2cd0e93\",\"dweb:/ipfs/QmRfjyDoLWF74EgmpcGkWZM7Kx1LgHN8dZHBxAnU9vPH46\"]},\"lib/openzeppelin-contracts/contracts/access/extensions/IAccessControlDefaultAdminRules.sol\":{\"keccak256\":\"0xc2dbeddf97707bf012827013b4a072bacbe56ad3219c405e30fd2a959e8a5413\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://281289e424c30c2ea92fc25598315117410404cf76a756663ad39ba18fd38b48\",\"dweb:/ipfs/Qma3wmq2cjxpfkKKM7JrvyJzzohsNWNNWsnaf3jVNBD65v\"]},\"lib/openzeppelin-contracts/contracts/interfaces/IERC5313.sol\":{\"keccak256\":\"0x22412c268e74cc3cbf550aecc2f7456f6ac40783058e219cfe09f26f4d396621\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://0b841021f25480424d2359de4869e60e77f790f52e8e85f07aa389543024b559\",\"dweb:/ipfs/QmV7U5ehV5xe3QrbE8ErxfWSSzK1T1dGeizXvYPjWpNDGq\"]},\"lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol\":{\"keccak256\":\"0xee2337af2dc162a973b4be6d3f7c16f06298259e0af48c5470d2839bfa8a22f4\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://30c476b4b2f405c1bb3f0bae15b006d129c80f1bfd9d0f2038160a3bb9745009\",\"dweb:/ipfs/Qmb3VcuDufv6xbHeVgksC4tHpc5gKYVqBEwjEXW72XzSvN\"]},\"lib/openzeppelin-contracts/contracts/utils/Context.sol\":{\"keccak256\":\"0x493033a8d1b176a037b2cc6a04dad01a5c157722049bbecf632ca876224dd4b2\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://6a708e8a5bdb1011c2c381c9a5cfd8a9a956d7d0a9dc1bd8bcdaf52f76ef2f12\",\"dweb:/ipfs/Qmax9WHBnVsZP46ZxEMNRQpLQnrdE4dK8LehML1Py8FowF\"]},\"lib/openzeppelin-contracts/contracts/utils/Panic.sol\":{\"keccak256\":\"0x29074fe5a74bb024c57b3570abf6c74d8bceed3438694d470fd0166a3ecd196a\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://f4f8435ccbc56e384f4cc9ac9ff491cf30a82f2beac00e33ccc2cf8af3f77cc3\",\"dweb:/ipfs/QmUKJXxTe6nn1qfgnX8xbnboNNAPUuEmJyGqMZCKNiFBgn\"]},\"lib/openzeppelin-contracts/contracts/utils/introspection/ERC165.sol\":{\"keccak256\":\"0x6fac27fb1885a1d9fd2ce3f8fac4e44a6596ca4d44207c9ef2541ba8c941291e\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://2079378abdb36baec15c23bc2353b73a3d28d1d0610b436b0c1c4e6fa61d65c9\",\"dweb:/ipfs/QmVZkRFMzKW7sLaugKSTbMNnUBKWF3QDsoMi5uoQFyVMjf\"]},\"lib/openzeppelin-contracts/contracts/utils/introspection/IERC165.sol\":{\"keccak256\":\"0xc859863e3bda7ec3cddf6dafe2ffe91bcbe648d1395b856b839c32ee9617c44c\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://a9d5417888b873cf2225ed5d50b2a67be97c1504134a2a580512168d587ad82e\",\"dweb:/ipfs/QmNr5fTb2heFW658NZn7dDnofZgFvQTnNxKRJ3wdnR1skX\"]},\"lib/openzeppelin-contracts/contracts/utils/math/Math.sol\":{\"keccak256\":\"0x3233b02fcf2b20a41cce60a62e43c7e5a67a55b738ec1db842a82452e6aa170d\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://231c75d18bc6973533dfa7d58d2b97e504ca4e21d703a5c8b0ec31475e97db67\",\"dweb:/ipfs/QmPJ29HDuFceD1FDr4CnjYYtvaQ234wGAfojZpL3RXFG26\"]},\"lib/openzeppelin-contracts/contracts/utils/math/SafeCast.sol\":{\"keccak256\":\"0x8cd59334ed58b8884cd1f775afc9400db702e674e5d6a7a438c655b9de788d7e\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://99e62c7de7318f413b6352e3f2704ca23e7725ff144e43c8bd574d12dbf29047\",\"dweb:/ipfs/QmSEXG2rBx1VxU2uFTWdiChjDvA4osEY2mesjmoVeVhHko\"]},\"src/Passage.sol\":{\"keccak256\":\"0x81016c92006558f93c028e3d4f61ddad8ff870b956edaa19ad2ccd68ec5d292a\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://dc70a7d97b18e988ce9560f4fabbf9caea3c6178f64fab056b1cf63d27bef6c5\",\"dweb:/ipfs/QmeJDLqvLdhkbWfyLHdYUPoGz7XHWw3zpe8YTCMQE9MacX\"]},\"src/Zenith.sol\":{\"keccak256\":\"0x0febef21c15ebf62421e25337341a8a11a6dd5b5dc2e9ea967a2d4769469ecd6\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://405a8eb90f834ab216e96d40b8c1cfd98c4bc4e71399b09c04ef4123eb3bb1ab\",\"dweb:/ipfs/QmVakr7Upoe2tgU1jQSZUgXE1UASAuHh9kReZ2mfgCsdha\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.25+commit.b61c2a91"},"language":"Solidity","output":{"abi":[{"inputs":[{"internalType":"uint256","name":"defaultRollupChainId","type":"uint256"},{"internalType":"address","name":"admin","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"type":"error","name":"AccessControlBadConfirmation"},{"inputs":[{"internalType":"uint48","name":"schedule","type":"uint48"}],"type":"error","name":"AccessControlEnforcedDefaultAdminDelay"},{"inputs":[],"type":"error","name":"AccessControlEnforcedDefaultAdminRules"},{"inputs":[{"internalType":"address","name":"defaultAdmin","type":"address"}],"type":"error","name":"AccessControlInvalidDefaultAdmin"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"bytes32","name":"neededRole","type":"bytes32"}],"type":"error","name":"AccessControlUnauthorizedAccount"},{"inputs":[{"internalType":"uint256","name":"expected","type":"uint256"}],"type":"error","name":"BadSequence"},{"inputs":[{"internalType":"address","name":"derivedSequencer","type":"address"}],"type":"error","name":"BadSignature"},{"inputs":[],"type":"error","name":"BlockExpired"},{"inputs":[],"type":"error","name":"OneRollupBlockPerHostBlock"},{"inputs":[],"type":"error","name":"OrderExpired"},{"inputs":[{"internalType":"uint8","name":"bits","type":"uint8"},{"internalType":"uint256","name":"value","type":"uint256"}],"type":"error","name":"SafeCastOverflowedUintDowncast"},{"inputs":[{"internalType":"bytes","name":"blockData","type":"bytes","indexed":false}],"type":"event","name":"BlockData","anonymous":false},{"inputs":[{"internalType":"address","name":"sequencer","type":"address","indexed":true},{"internalType":"struct Zenith.BlockHeader","name":"header","type":"tuple","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"uint256","name":"sequence","type":"uint256"},{"internalType":"uint256","name":"confirmBy","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"address","name":"rewardAddress","type":"address"}],"indexed":true},{"internalType":"bytes32","name":"blockDataHash","type":"bytes32","indexed":false}],"type":"event","name":"BlockSubmitted","anonymous":false},{"inputs":[],"type":"event","name":"DefaultAdminDelayChangeCanceled","anonymous":false},{"inputs":[{"internalType":"uint48","name":"newDelay","type":"uint48","indexed":false},{"internalType":"uint48","name":"effectSchedule","type":"uint48","indexed":false}],"type":"event","name":"DefaultAdminDelayChangeScheduled","anonymous":false},{"inputs":[],"type":"event","name":"DefaultAdminTransferCanceled","anonymous":false},{"inputs":[{"internalType":"address","name":"newAdmin","type":"address","indexed":true},{"internalType":"uint48","name":"acceptSchedule","type":"uint48","indexed":false}],"type":"event","name":"DefaultAdminTransferScheduled","anonymous":false},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256","indexed":false},{"internalType":"address","name":"token","type":"address","indexed":true},{"internalType":"address","name":"rollupRecipient","type":"address","indexed":true},{"internalType":"uint256","name":"amount","type":"uint256","indexed":false}],"type":"event","name":"Enter","anonymous":false},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256","indexed":false},{"internalType":"address","name":"token","type":"address","indexed":true},{"internalType":"address","name":"hostRecipient","type":"address","indexed":true},{"internalType":"uint256","name":"amount","type":"uint256","indexed":false}],"type":"event","name":"ExitFilled","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32","indexed":true},{"internalType":"bytes32","name":"previousAdminRole","type":"bytes32","indexed":true},{"internalType":"bytes32","name":"newAdminRole","type":"bytes32","indexed":true}],"type":"event","name":"RoleAdminChanged","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32","indexed":true},{"internalType":"address","name":"account","type":"address","indexed":true},{"internalType":"address","name":"sender","type":"address","indexed":true}],"type":"event","name":"RoleGranted","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32","indexed":true},{"internalType":"address","name":"account","type":"address","indexed":true},{"internalType":"address","name":"sender","type":"address","indexed":true}],"type":"event","name":"RoleRevoked","anonymous":false},{"inputs":[],"stateMutability":"payable","type":"fallback"},{"inputs":[],"stateMutability":"view","type":"function","name":"DEFAULT_ADMIN_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SEQUENCER_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"acceptDefaultAdminTransfer"},{"inputs":[{"internalType":"address","name":"newAdmin","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"beginDefaultAdminTransfer"},{"inputs":[{"internalType":"struct Zenith.BlockHeader","name":"header","type":"tuple","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"uint256","name":"sequence","type":"uint256"},{"internalType":"uint256","name":"confirmBy","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"address","name":"rewardAddress","type":"address"}]},{"internalType":"bytes32","name":"blockDataHash","type":"bytes32"}],"stateMutability":"view","type":"function","name":"blockCommitment","outputs":[{"internalType":"bytes32","name":"commit","type":"bytes32"}]},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"cancelDefaultAdminTransfer"},{"inputs":[{"internalType":"uint48","name":"newDelay","type":"uint48"}],"stateMutability":"nonpayable","type":"function","name":"changeDefaultAdminDelay"},{"inputs":[],"stateMutability":"view","type":"function","name":"defaultAdmin","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"defaultAdminDelay","outputs":[{"internalType":"uint48","name":"","type":"uint48"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"defaultAdminDelayIncreaseWait","outputs":[{"internalType":"uint48","name":"","type":"uint48"}]},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"address","name":"rollupRecipient","type":"address"},{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"stateMutability":"payable","type":"function","name":"enter"},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"address","name":"rollupRecipient","type":"address"}],"stateMutability":"payable","type":"function","name":"enter"},{"inputs":[{"internalType":"struct Passage.ExitOrder[]","name":"orders","type":"tuple[]","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"address","name":"token","type":"address"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}]}],"stateMutability":"payable","type":"function","name":"fulfillExits"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"stateMutability":"view","type":"function","name":"getRoleAdmin","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"grantRole"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"view","type":"function","name":"hasRole","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function","name":"lastSubmittedAtBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function","name":"nextSequence","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"pendingDefaultAdmin","outputs":[{"internalType":"address","name":"newAdmin","type":"address"},{"internalType":"uint48","name":"schedule","type":"uint48"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"pendingDefaultAdminDelay","outputs":[{"internalType":"uint48","name":"newDelay","type":"uint48"},{"internalType":"uint48","name":"schedule","type":"uint48"}]},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"renounceRole"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"revokeRole"},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"rollbackDefaultAdminDelay"},{"inputs":[{"internalType":"struct Zenith.BlockHeader","name":"header","type":"tuple","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"uint256","name":"sequence","type":"uint256"},{"internalType":"uint256","name":"confirmBy","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"address","name":"rewardAddress","type":"address"}]},{"internalType":"bytes32","name":"blockDataHash","type":"bytes32"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"},{"internalType":"bytes","name":"blockData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"submitBlock"},{"inputs":[{"internalType":"bytes4","name":"interfaceId","type":"bytes4"}],"stateMutability":"view","type":"function","name":"supportsInterface","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[],"stateMutability":"payable","type":"receive"}],"devdoc":{"kind":"dev","methods":{"acceptDefaultAdminTransfer()":{"details":"Completes a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. After calling the function: - `DEFAULT_ADMIN_ROLE` should be granted to the caller. - `DEFAULT_ADMIN_ROLE` should be revoked from the previous holder. - {pendingDefaultAdmin} should be reset to zero values. Requirements: - Only can be called by the {pendingDefaultAdmin}'s `newAdmin`. - The {pendingDefaultAdmin}'s `acceptSchedule` should've passed."},"beginDefaultAdminTransfer(address)":{"details":"Starts a {defaultAdmin} transfer by setting a {pendingDefaultAdmin} scheduled for acceptance after the current timestamp plus a {defaultAdminDelay}. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminRoleChangeStarted event."},"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)":{"params":{"header":"- the header information for the rollup block."},"returns":{"commit":"- the hash of the encoded block details."}},"cancelDefaultAdminTransfer()":{"details":"Cancels a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. A {pendingDefaultAdmin} not yet accepted can also be cancelled with this function. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminTransferCanceled event."},"changeDefaultAdminDelay(uint48)":{"details":"Initiates a {defaultAdminDelay} update by setting a {pendingDefaultAdminDelay} scheduled for getting into effect after the current timestamp plus a {defaultAdminDelay}. This function guarantees that any call to {beginDefaultAdminTransfer} done between the timestamp this method is called and the {pendingDefaultAdminDelay} effect schedule will use the current {defaultAdminDelay} set before calling. The {pendingDefaultAdminDelay}'s effect schedule is defined in a way that waiting until the schedule and then calling {beginDefaultAdminTransfer} with the new delay will take at least the same as another {defaultAdmin} complete transfer (including acceptance). The schedule is designed for two scenarios: - When the delay is changed for a larger one the schedule is `block.timestamp + newDelay` capped by {defaultAdminDelayIncreaseWait}. - When the delay is changed for a shorter one, the schedule is `block.timestamp + (current delay - new delay)`. A {pendingDefaultAdminDelay} that never got into effect will be canceled in favor of a new scheduled change. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminDelayChangeScheduled event and may emit a DefaultAdminDelayChangeCanceled event."},"constructor":{"details":"See `AccessControlDefaultAdminRules` for information on contract administration. - Admin role can grant and revoke Sequencer roles. - Admin role can be transferred via two-step process with a 1 day timelock.","params":{"admin":"- the address that will be the initial admin."}},"defaultAdmin()":{"details":"Returns the address of the current `DEFAULT_ADMIN_ROLE` holder."},"defaultAdminDelay()":{"details":"Returns the delay required to schedule the acceptance of a {defaultAdmin} transfer started. This delay will be added to the current timestamp when calling {beginDefaultAdminTransfer} to set the acceptance schedule. NOTE: If a delay change has been scheduled, it will take effect as soon as the schedule passes, making this function returns the new delay. See {changeDefaultAdminDelay}."},"defaultAdminDelayIncreaseWait()":{"details":"Maximum time in seconds for an increase to {defaultAdminDelay} (that is scheduled using {changeDefaultAdminDelay}) to take effect. Default to 5 days. When the {defaultAdminDelay} is scheduled to be increased, it goes into effect after the new delay has passed with the purpose of giving enough time for reverting any accidental change (i.e. using milliseconds instead of seconds) that may lock the contract. However, to avoid excessive schedules, the wait is capped by this function and it can be overrode for a custom {defaultAdminDelay} increase scheduling. IMPORTANT: Make sure to add a reasonable amount of time while overriding this value, otherwise, there's a risk of setting a high new delay that goes into effect almost immediately without the possibility of human intervention in the case of an input error (eg. set milliseconds instead of seconds)."},"enter(uint256,address)":{"custom:emits":"Enter indicating the amount of Ether to mint on the rollup & its recipient.","details":"Permanently burns the entire msg.value by locking it in this contract.","params":{"rollupChainId":"- The rollup chain to enter.","rollupRecipient":"- The recipient of the Ether on the rollup."}},"enter(uint256,address,address,uint256)":{"custom:emits":"Enter indicating the amount of tokens to mint on the rollup & its recipient.","details":"Permanently burns the token amount by locking it in this contract.","params":{"amount":"- The amount of the ERC20 token to transfer to the rollup.","rollupChainId":"- The rollup chain to enter.","rollupRecipient":"- The recipient of the Ether on the rollup.","token":"- The address of the ERC20 token on the Host."}},"fulfillExits((uint256,address,address,uint256)[])":{"custom:emits":"ExitFilled for each exit order fulfilled.","details":"Builder SHOULD call `filfillExits` atomically with `submitBlock`. Builder SHOULD set a block expiration time that is AT MOST the minimum of all exit order deadlines; this way, `fulfillExits` + `submitBlock` will revert atomically on mainnet if any exit orders have expired. Otherwise, `filfillExits` may mine on mainnet, while `submitExit` reverts on the rollup, and the Builder can't collect the corresponding value on the rollup.Called by the Builder atomically with a transaction calling `submitBlock`. The user-submitted transactions initiating the ExitOrders on the rollup must be included by the Builder in the rollup block submitted via `submitBlock`.The user transfers tokenIn on the rollup, and receives tokenOut on host.The Builder receives tokenIn on the rollup, and transfers tokenOut to the user on host.The rollup STF MUST NOT apply `submitExit` transactions to the rollup state UNLESS a corresponding ExitFilled event is emitted on host in the same block.If the user submits multiple exit transactions for the same token in the same rollup block, the Builder may transfer the cumulative tokenOut to the user in a single ExitFilled event. The rollup STF will apply the user's exit transactions on the rollup up to the point that sum(tokenOut) is lte the ExitFilled amount. TODO: add option to fulfill ExitOrders with native ETH? or is it sufficient to only allow users to exit via WETH?","params":{"orders":"The exit orders to fulfill"}},"getRoleAdmin(bytes32)":{"details":"Returns the admin role that controls `role`. See {grantRole} and {revokeRole}. To change a role's admin, use {_setRoleAdmin}."},"grantRole(bytes32,address)":{"details":"See {AccessControl-grantRole}. Reverts for `DEFAULT_ADMIN_ROLE`."},"hasRole(bytes32,address)":{"details":"Returns `true` if `account` has been granted `role`."},"owner()":{"details":"See {IERC5313-owner}."},"pendingDefaultAdmin()":{"details":"Returns a tuple of a `newAdmin` and an accept schedule. After the `schedule` passes, the `newAdmin` will be able to accept the {defaultAdmin} role by calling {acceptDefaultAdminTransfer}, completing the role transfer. A zero value only in `acceptSchedule` indicates no pending admin transfer. NOTE: A zero address `newAdmin` means that {defaultAdmin} is being renounced."},"pendingDefaultAdminDelay()":{"details":"Returns a tuple of `newDelay` and an effect schedule. After the `schedule` passes, the `newDelay` will get into effect immediately for every new {defaultAdmin} transfer started with {beginDefaultAdminTransfer}. A zero value only in `effectSchedule` indicates no pending delay change. NOTE: A zero value only for `newDelay` means that the next {defaultAdminDelay} will be zero after the effect schedule."},"renounceRole(bytes32,address)":{"details":"See {AccessControl-renounceRole}. For the `DEFAULT_ADMIN_ROLE`, it only allows renouncing in two steps by first calling {beginDefaultAdminTransfer} to the `address(0)`, so it's required that the {pendingDefaultAdmin} schedule has also passed when calling this function. After its execution, it will not be possible to call `onlyRole(DEFAULT_ADMIN_ROLE)` functions. NOTE: Renouncing `DEFAULT_ADMIN_ROLE` will leave the contract without a {defaultAdmin}, thereby disabling any functionality that is only available for it, and the possibility of reassigning a non-administrated role."},"revokeRole(bytes32,address)":{"details":"See {AccessControl-revokeRole}. Reverts for `DEFAULT_ADMIN_ROLE`."},"rollbackDefaultAdminDelay()":{"details":"Cancels a scheduled {defaultAdminDelay} change. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminDelayChangeCanceled event."},"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)":{"custom:emits":"BlockSubmitted if the block is successfully submitted.BlockData to expose the block calldata; as a convenience until calldata tracing is implemented in the Node.","custom:reverts":"BadSequence if the sequence number is not the next block for the given rollup chainId.BlockExpired if the confirmBy time has passed.BadSignature if the signer is not a permissioned sequencer, OR if the signature provided commits to a different header.OneRollupBlockPerHostBlock if attempting to submit a second rollup block within one host block.","details":"Blocks are submitted by Builders, with an attestation to the block data signed by a Sequencer.including blockDataHash allows the sequencer to sign over finalized block data, without needing to calldatacopy the `blockData` param.","params":{"blockData":"- block data information. could be packed blob hashes, or direct rlp-encoded transctions. blockData is ignored by the contract logic.","blockDataHash":"- keccak256(blockData). the Node will discard the block if the hash doens't match.","header":"- the header information for the rollup block.","r":"- the r component of the Sequencer's ECSDA signature over the block header.","s":"- the s component of the Sequencer's ECSDA signature over the block header.","v":"- the v component of the Sequencer's ECSDA signature over the block header."}},"supportsInterface(bytes4)":{"details":"See {IERC165-supportsInterface}."}},"version":1},"userdoc":{"kind":"user","methods":{"SEQUENCER_ROLE()":{"notice":"Role that allows a key to sign commitments to rollup blocks."},"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)":{"notice":"Construct hash of block details that the sequencer signs."},"constructor":{"notice":"Initializes the Admin role."},"enter(uint256,address)":{"notice":"Allows native Ether to enter the rollup."},"enter(uint256,address,address,uint256)":{"notice":"Allows ERC20s to enter the rollup."},"fulfillExits((uint256,address,address,uint256)[])":{"notice":"Fulfills exit orders by transferring tokenOut to the recipient"},"lastSubmittedAtBlock(uint256)":{"notice":"The host block number that a block was last submitted at for a given rollup chainId. rollupChainId => host blockNumber that block was last submitted at"},"nextSequence(uint256)":{"notice":"The sequence number of the next block that can be submitted for a given rollup chainId. rollupChainId => nextSequence number"},"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)":{"notice":"Submit a rollup block with block data submitted via calldata."}},"version":1}},"settings":{"remappings":["@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/","ds-test/=lib/forge-std/lib/ds-test/src/","erc4626-tests/=lib/openzeppelin-contracts/lib/erc4626-tests/","forge-std/=lib/forge-std/src/","openzeppelin-contracts/=lib/openzeppelin-contracts/"],"optimizer":{"enabled":true,"runs":200},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/Zenith.sol":"Zenith"},"evmVersion":"cancun","libraries":{}},"sources":{"lib/openzeppelin-contracts/contracts/access/AccessControl.sol":{"keccak256":"0xa0e92d42942f4f57c5be50568dac11e9d00c93efcb458026e18d2d9b9b2e7308","urls":["bzz-raw://46326c0bb1e296b67185e81c918e0b40501b8b6386165855df0a3f3c634b6a80","dweb:/ipfs/QmTwyrDYtsxsk6pymJTK94PnEpzsmkpUxFuzEiakDopy4Z"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/access/IAccessControl.sol":{"keccak256":"0xc503b1464e90b1cf79d81239f719f81c35ff646b17b638c87fe87a1d7bc5d94d","urls":["bzz-raw://381076837654e98f1d5dfc3909a3ebb80e2c86a97d662b507320701e09cb7a60","dweb:/ipfs/QmWGwdWe9JWx2ae3n8EhWuY6ipWo6shVg9bct6y5og7v9Y"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/access/extensions/AccessControlDefaultAdminRules.sol":{"keccak256":"0xd5e43578dce2678fbd458e1221dc37b20e983ecce4a314b422704f07d6015c5b","urls":["bzz-raw://9ea4d9ae3392dc9db1ef4d7ebef84ce7fa243dc14abb46e68eb2eb60d2cd0e93","dweb:/ipfs/QmRfjyDoLWF74EgmpcGkWZM7Kx1LgHN8dZHBxAnU9vPH46"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/access/extensions/IAccessControlDefaultAdminRules.sol":{"keccak256":"0xc2dbeddf97707bf012827013b4a072bacbe56ad3219c405e30fd2a959e8a5413","urls":["bzz-raw://281289e424c30c2ea92fc25598315117410404cf76a756663ad39ba18fd38b48","dweb:/ipfs/Qma3wmq2cjxpfkKKM7JrvyJzzohsNWNNWsnaf3jVNBD65v"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/interfaces/IERC5313.sol":{"keccak256":"0x22412c268e74cc3cbf550aecc2f7456f6ac40783058e219cfe09f26f4d396621","urls":["bzz-raw://0b841021f25480424d2359de4869e60e77f790f52e8e85f07aa389543024b559","dweb:/ipfs/QmV7U5ehV5xe3QrbE8ErxfWSSzK1T1dGeizXvYPjWpNDGq"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol":{"keccak256":"0xee2337af2dc162a973b4be6d3f7c16f06298259e0af48c5470d2839bfa8a22f4","urls":["bzz-raw://30c476b4b2f405c1bb3f0bae15b006d129c80f1bfd9d0f2038160a3bb9745009","dweb:/ipfs/Qmb3VcuDufv6xbHeVgksC4tHpc5gKYVqBEwjEXW72XzSvN"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/Context.sol":{"keccak256":"0x493033a8d1b176a037b2cc6a04dad01a5c157722049bbecf632ca876224dd4b2","urls":["bzz-raw://6a708e8a5bdb1011c2c381c9a5cfd8a9a956d7d0a9dc1bd8bcdaf52f76ef2f12","dweb:/ipfs/Qmax9WHBnVsZP46ZxEMNRQpLQnrdE4dK8LehML1Py8FowF"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/Panic.sol":{"keccak256":"0x29074fe5a74bb024c57b3570abf6c74d8bceed3438694d470fd0166a3ecd196a","urls":["bzz-raw://f4f8435ccbc56e384f4cc9ac9ff491cf30a82f2beac00e33ccc2cf8af3f77cc3","dweb:/ipfs/QmUKJXxTe6nn1qfgnX8xbnboNNAPUuEmJyGqMZCKNiFBgn"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/introspection/ERC165.sol":{"keccak256":"0x6fac27fb1885a1d9fd2ce3f8fac4e44a6596ca4d44207c9ef2541ba8c941291e","urls":["bzz-raw://2079378abdb36baec15c23bc2353b73a3d28d1d0610b436b0c1c4e6fa61d65c9","dweb:/ipfs/QmVZkRFMzKW7sLaugKSTbMNnUBKWF3QDsoMi5uoQFyVMjf"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/introspection/IERC165.sol":{"keccak256":"0xc859863e3bda7ec3cddf6dafe2ffe91bcbe648d1395b856b839c32ee9617c44c","urls":["bzz-raw://a9d5417888b873cf2225ed5d50b2a67be97c1504134a2a580512168d587ad82e","dweb:/ipfs/QmNr5fTb2heFW658NZn7dDnofZgFvQTnNxKRJ3wdnR1skX"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/math/Math.sol":{"keccak256":"0x3233b02fcf2b20a41cce60a62e43c7e5a67a55b738ec1db842a82452e6aa170d","urls":["bzz-raw://231c75d18bc6973533dfa7d58d2b97e504ca4e21d703a5c8b0ec31475e97db67","dweb:/ipfs/QmPJ29HDuFceD1FDr4CnjYYtvaQ234wGAfojZpL3RXFG26"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/math/SafeCast.sol":{"keccak256":"0x8cd59334ed58b8884cd1f775afc9400db702e674e5d6a7a438c655b9de788d7e","urls":["bzz-raw://99e62c7de7318f413b6352e3f2704ca23e7725ff144e43c8bd574d12dbf29047","dweb:/ipfs/QmSEXG2rBx1VxU2uFTWdiChjDvA4osEY2mesjmoVeVhHko"],"license":"MIT"},"src/Passage.sol":{"keccak256":"0x81016c92006558f93c028e3d4f61ddad8ff870b956edaa19ad2ccd68ec5d292a","urls":["bzz-raw://dc70a7d97b18e988ce9560f4fabbf9caea3c6178f64fab056b1cf63d27bef6c5","dweb:/ipfs/QmeJDLqvLdhkbWfyLHdYUPoGz7XHWw3zpe8YTCMQE9MacX"],"license":"UNLICENSED"},"src/Zenith.sol":{"keccak256":"0x0febef21c15ebf62421e25337341a8a11a6dd5b5dc2e9ea967a2d4769469ecd6","urls":["bzz-raw://405a8eb90f834ab216e96d40b8c1cfd98c4bc4e71399b09c04ef4123eb3bb1ab","dweb:/ipfs/QmVakr7Upoe2tgU1jQSZUgXE1UASAuHh9kReZ2mfgCsdha"],"license":"UNLICENSED"}},"version":1},"id":35} \ No newline at end of file diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs new file mode 100644 index 0000000000000..98a8e378c7bd1 --- /dev/null +++ b/examples/exex/rollup/src/execution.rs @@ -0,0 +1,488 @@ +use alloy_consensus::{SidecarCoder, SimpleCoder}; +use alloy_rlp::Decodable as _; +use eyre::OptionExt; +use reth::transaction_pool::TransactionPool; +use reth_interfaces::executor::BlockValidationError; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; +use reth_node_ethereum::EthEvmConfig; +use reth_primitives::{ + constants, + eip4844::kzg_to_versioned_hash, + keccak256, + revm::env::fill_tx_env, + revm_primitives::{CfgEnvWithHandlerCfg, EVMError, ExecutionResult, ResultAndState}, + Address, Block, BlockWithSenders, Bytes, Hardfork, Header, Receipt, TransactionSigned, TxType, + B256, U256, +}; +use reth_revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + DBBox, DatabaseCommit, Evm, StateBuilder, StateDBBox, +}; +use reth_tracing::tracing::debug; + +use crate::{db::Database, RollupContract, CHAIN_ID, CHAIN_SPEC}; + +/// Execute a rollup block and return (block with recovered senders)[BlockWithSenders], (bundle +/// state)[BundleState] and list of (receipts)[Receipt]. +pub async fn execute_block( + db: &mut Database, + pool: &Pool, + tx: &TransactionSigned, + header: &RollupContract::BlockHeader, + block_data: Bytes, + block_data_hash: B256, +) -> eyre::Result<(BlockWithSenders, BundleState, Vec, Vec)> { + if header.rollupChainId != U256::from(CHAIN_ID) { + eyre::bail!("Invalid rollup chain ID") + } + + // Construct header + let header = construct_header(db, header)?; + + // Decode transactions + let transactions = decode_transactions(pool, tx, block_data, block_data_hash).await?; + + // Configure EVM + let evm_config = EthEvmConfig::default(); + let mut evm = configure_evm(&evm_config, db, &header); + + // Execute transactions + let (executed_txs, receipts, results) = execute_transactions(&mut evm, &header, transactions)?; + + // Construct block and recover senders + let block = Block { header, body: executed_txs, ..Default::default() } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + let bundle = evm.db_mut().take_bundle(); + + Ok((block, bundle, receipts, results)) +} + +/// Construct header from the given rollup header. +fn construct_header(db: &Database, header: &RollupContract::BlockHeader) -> eyre::Result
{ + let parent_block = if !header.sequence.is_zero() { + db.get_block(header.sequence - U256::from(1))? + } else { + None + }; + + let block_number = u64::try_from(header.sequence)?; + + // Calculate base fee per gas for EIP-1559 transactions + let base_fee_per_gas = if CHAIN_SPEC.fork(Hardfork::London).transitions_at_block(block_number) { + constants::EIP1559_INITIAL_BASE_FEE + } else { + parent_block + .as_ref() + .ok_or(eyre::eyre!("parent block not found"))? + .header + .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) + .ok_or(eyre::eyre!("failed to calculate base fee"))? + }; + + // Construct header + Ok(Header { + parent_hash: parent_block.map(|block| block.header.hash()).unwrap_or_default(), + number: block_number, + gas_limit: u64::try_from(header.gasLimit)?, + timestamp: u64::try_from(header.confirmBy)?, + base_fee_per_gas: Some(base_fee_per_gas), + ..Default::default() + }) +} + +/// Configure EVM with the given database and header. +fn configure_evm<'a>( + config: &'a EthEvmConfig, + db: &'a mut Database, + header: &Header, +) -> Evm<'a, (), StateDBBox<'a, eyre::Report>> { + let mut evm = config.evm( + StateBuilder::new_with_database(Box::new(db) as DBBox<'_, eyre::Report>) + .with_bundle_update() + .build(), + ); + evm.db_mut().set_state_clear_flag( + CHAIN_SPEC.fork(Hardfork::SpuriousDragon).active_at_block(header.number), + ); + + let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(evm.cfg().clone(), evm.spec_id()); + EthEvmConfig::fill_cfg_and_block_env( + &mut cfg, + evm.block_mut(), + &CHAIN_SPEC, + header, + U256::ZERO, + ); + *evm.cfg_mut() = cfg.cfg_env; + + evm +} + +/// Decode transactions from the block data and recover senders. +/// - If the transaction is a blob-carrying one, decode the blobs either using the local transaction +/// pool, or querying Blobscan. +/// - If the transaction is a regular one, decode the block data directly. +async fn decode_transactions( + pool: &Pool, + tx: &TransactionSigned, + block_data: Bytes, + block_data_hash: B256, +) -> eyre::Result> { + // Get raw transactions either from the blobs, or directly from the block data + let raw_transactions = if matches!(tx.tx_type(), TxType::Eip4844) { + let blobs: Vec<_> = if let Some(sidecar) = pool.get_blob(tx.hash)? { + // Try to get blobs from the transaction pool + sidecar.blobs.into_iter().zip(sidecar.commitments).collect() + } else { + // If transaction is not found in the pool, try to get blobs from Blobscan + let blobscan_client = foundry_blob_explorers::Client::holesky(); + let sidecar = blobscan_client.transaction(tx.hash).await?.blob_sidecar(); + sidecar + .blobs + .into_iter() + .map(|blob| (*blob).into()) + .zip(sidecar.commitments.into_iter().map(|commitment| (*commitment).into())) + .collect() + }; + + // Decode blob hashes from block data + let blob_hashes = Vec::::decode(&mut block_data.as_ref())?; + + // Filter blobs that are present in the block data + let blobs = blobs + .into_iter() + // Convert blob KZG commitments to versioned hashes + .map(|(blob, commitment)| (blob, kzg_to_versioned_hash((*commitment).into()))) + // Filter only blobs that are present in the block data + .filter(|(_, hash)| blob_hashes.contains(hash)) + .map(|(blob, _)| blob) + .collect::>(); + if blobs.len() != blob_hashes.len() { + eyre::bail!("some blobs not found") + } + + // Decode blobs and concatenate them to get the raw transactions + let data = SimpleCoder::default() + .decode_all(&blobs) + .ok_or(eyre::eyre!("failed to decode blobs"))? + .concat(); + + data.into() + } else { + block_data + }; + + let raw_transaction_hash = keccak256(&raw_transactions); + if raw_transaction_hash != block_data_hash { + eyre::bail!("block data hash mismatch") + } + + // Decode block data, filter only transactions with the correct chain ID and recover senders + let transactions = Vec::::decode(&mut raw_transactions.as_ref())? + .into_iter() + .filter(|tx| tx.chain_id() == Some(CHAIN_ID)) + .map(|tx| { + let sender = tx.recover_signer().ok_or(eyre::eyre!("failed to recover signer"))?; + Ok((tx, sender)) + }) + .collect::>()?; + + Ok(transactions) +} + +/// Execute transactions and return the list of executed transactions, receipts and +/// execution results. +fn execute_transactions( + evm: &mut Evm<'_, (), StateDBBox<'_, eyre::Report>>, + header: &Header, + transactions: Vec<(TransactionSigned, Address)>, +) -> eyre::Result<(Vec, Vec, Vec)> { + let mut receipts = Vec::with_capacity(transactions.len()); + let mut executed_txs = Vec::with_capacity(transactions.len()); + let mut results = Vec::with_capacity(transactions.len()); + if !transactions.is_empty() { + let mut cumulative_gas_used = 0; + for (transaction, sender) in transactions { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + // Execute transaction. + // Fill revm structure. + fill_tx_env(evm.tx_mut(), &transaction, sender); + + let ResultAndState { result, state } = match evm.transact() { + Ok(result) => result, + Err(err) => { + match err { + EVMError::Transaction(err) => { + // if the transaction is invalid, we can skip it + debug!(%err, ?transaction, "Skipping invalid transaction"); + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + eyre::bail!(err) + } + } + } + }; + + debug!(?transaction, ?result, ?state, "Executed transaction"); + + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + #[allow(clippy::needless_update)] // side-effect of optimism fields + receipts.push(Receipt { + tx_type: transaction.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.logs().iter().cloned().map(Into::into).collect(), + ..Default::default() + }); + + // append transaction to the list of executed transactions + executed_txs.push(transaction); + results.push(result); + } + + evm.db_mut().merge_transitions(BundleRetention::Reverts); + } + + Ok((executed_txs, receipts, results)) +} + +#[cfg(test)] +mod tests { + use std::time::{SystemTime, UNIX_EPOCH}; + + use alloy_consensus::{SidecarBuilder, SimpleCoder}; + use alloy_sol_types::{sol, SolCall}; + use reth::transaction_pool::{ + test_utils::{testing_pool, MockTransaction}, + TransactionOrigin, TransactionPool, + }; + use reth_interfaces::test_utils::generators::{self, sign_tx_with_key_pair}; + use reth_primitives::{ + bytes, + constants::ETH_TO_WEI, + keccak256, public_key_to_address, + revm_primitives::{AccountInfo, ExecutionResult, Output, TransactTo, TxEnv}, + BlockNumber, Receipt, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, + }; + use reth_revm::Evm; + use rusqlite::Connection; + use secp256k1::{Keypair, Secp256k1}; + + use crate::{ + db::Database, execute_block, RollupContract::BlockHeader, CHAIN_ID, + ROLLUP_SUBMITTER_ADDRESS, + }; + + sol!( + WETH, + r#" +[ + { + "constant":true, + "inputs":[ + { + "name":"", + "type":"address" + } + ], + "name":"balanceOf", + "outputs":[ + { + "name":"", + "type":"uint256" + } + ], + "payable":false, + "stateMutability":"view", + "type":"function" + } +] + "# + ); + + #[tokio::test] + async fn test_execute_block() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let mut database = Database::new(Connection::open_in_memory()?)?; + + // Create key pair + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut generators::rng()); + let sender_address = public_key_to_address(key_pair.public_key()); + + // Deposit some ETH to the sender and insert it into database + database.upsert_account(sender_address, |_| { + Ok(AccountInfo { balance: U256::from(ETH_TO_WEI), nonce: 1, ..Default::default() }) + })?; + + // WETH deployment transaction sent using calldata + let (_, _, results) = execute_transaction( + &mut database, + key_pair, + 0, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 1, + gas_limit: 1_500_000, + gas_price: 1_500_000_000, + to: TxKind::Create, + // WETH9 bytecode + input: bytes!("60606040526040805190810160405280600d81526020017f57726170706564204574686572000000000000000000000000000000000000008152506000908051906020019061004f9291906100c8565b506040805190810160405280600481526020017f57455448000000000000000000000000000000000000000000000000000000008152506001908051906020019061009b9291906100c8565b506012600260006101000a81548160ff021916908360ff16021790555034156100c357600080fd5b61016d565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061010957805160ff1916838001178555610137565b82800160010185558215610137579182015b8281111561013657825182559160200191906001019061011b565b5b5090506101449190610148565b5090565b61016a91905b8082111561016657600081600090555060010161014e565b5090565b90565b610c348061017c6000396000f3006060604052600436106100af576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100b9578063095ea7b31461014757806318160ddd146101a157806323b872dd146101ca5780632e1a7d4d14610243578063313ce5671461026657806370a082311461029557806395d89b41146102e2578063a9059cbb14610370578063d0e30db0146103ca578063dd62ed3e146103d4575b6100b7610440565b005b34156100c457600080fd5b6100cc6104dd565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561010c5780820151818401526020810190506100f1565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610187600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061057b565b604051808215151515815260200191505060405180910390f35b34156101ac57600080fd5b6101b461066d565b6040518082815260200191505060405180910390f35b34156101d557600080fd5b610229600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061068c565b604051808215151515815260200191505060405180910390f35b341561024e57600080fd5b61026460048080359060200190919050506109d9565b005b341561027157600080fd5b610279610b05565b604051808260ff1660ff16815260200191505060405180910390f35b34156102a057600080fd5b6102cc600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610b18565b6040518082815260200191505060405180910390f35b34156102ed57600080fd5b6102f5610b30565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561033557808201518184015260208101905061031a565b50505050905090810190601f1680156103625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037b57600080fd5b6103b0600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610bce565b604051808215151515815260200191505060405180910390f35b6103d2610440565b005b34156103df57600080fd5b61042a600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610be3565b6040518082815260200191505060405180910390f35b34600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055503373ffffffffffffffffffffffffffffffffffffffff167fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c346040518082815260200191505060405180910390a2565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105735780601f1061054857610100808354040283529160200191610573565b820191906000526020600020905b81548152906001019060200180831161055657829003601f168201915b505050505081565b600081600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1631905090565b600081600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054101515156106dc57600080fd5b3373ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141580156107b457507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205414155b156108cf5781600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561084457600080fd5b81600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055505b81600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600360008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610a2757600080fd5b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f193505050501515610ab457600080fd5b3373ffffffffffffffffffffffffffffffffffffffff167f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65826040518082815260200191505060405180910390a250565b600260009054906101000a900460ff1681565b60036020528060005260406000206000915090505481565b60018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610bc65780601f10610b9b57610100808354040283529160200191610bc6565b820191906000526020600020905b815481529060010190602001808311610ba957829003601f168201915b505050505081565b6000610bdb33848461068c565b905092915050565b60046020528160005260406000206020528060005260406000206000915091505054815600a165627a7a72305820deb4c2ccab3c2fdca32ab3f46728389c2fe2c165d5fafa07661e4e004f6c344a0029"), + ..Default::default() + }), + BlockDataSource::Calldata + ).await?; + + let weth_address = match results.first() { + Some(ExecutionResult::Success { output: Output::Create(_, Some(address)), .. }) => { + *address + } + _ => eyre::bail!("WETH contract address not found"), + }; + + // WETH deposit transaction sent using blobs + execute_transaction( + &mut database, + key_pair, + 1, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 2, + gas_limit: 50000, + gas_price: 1_500_000_000, + to: TxKind::Call(weth_address), + value: U256::from(0.5 * ETH_TO_WEI as f64), + input: bytes!("d0e30db0"), + ..Default::default() + }), + BlockDataSource::Blobs, + ) + .await?; + + // Verify WETH balance + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!( + result.output(), + Some(&U256::from(0.5 * ETH_TO_WEI as f64).to_be_bytes_vec().into()) + ); + drop(evm); + + // Verify nonce + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 3); + + // Revert block with WETH deposit transaction + database.revert_tip_block(U256::from(1))?; + + // Verify WETH balance after revert + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!(result.output(), Some(&U256::ZERO.to_be_bytes_vec().into())); + drop(evm); + + // Verify nonce after revert + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 2); + + Ok(()) + } + + enum BlockDataSource { + Calldata, + Blobs, + } + + async fn execute_transaction( + database: &mut Database, + key_pair: Keypair, + sequence: BlockNumber, + tx: Transaction, + block_data_source: BlockDataSource, + ) -> eyre::Result<(SealedBlockWithSenders, Vec, Vec)> { + // Construct block header + let block_header = BlockHeader { + rollupChainId: U256::from(CHAIN_ID), + sequence: U256::from(sequence), + confirmBy: U256::from(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()), + gasLimit: U256::from(30_000_000), + rewardAddress: ROLLUP_SUBMITTER_ADDRESS, + }; + let encoded_transactions = + alloy_rlp::encode(vec![sign_tx_with_key_pair(key_pair, tx).envelope_encoded()]); + let block_data_hash = keccak256(&encoded_transactions); + + let pool = testing_pool(); + + let (block_data, l1_transaction) = match block_data_source { + BlockDataSource::Calldata => ( + encoded_transactions, + sign_tx_with_key_pair(key_pair, Transaction::Eip2930(TxEip2930::default())), + ), + BlockDataSource::Blobs => { + let sidecar = + SidecarBuilder::::from_slice(&encoded_transactions).build()?; + let blob_hashes = alloy_rlp::encode(sidecar.versioned_hashes().collect::>()); + + let mut mock_transaction = MockTransaction::eip4844_with_sidecar(sidecar.into()); + let transaction = + sign_tx_with_key_pair(key_pair, Transaction::from(mock_transaction.clone())); + mock_transaction.set_hash(transaction.hash); + pool.add_transaction(TransactionOrigin::Local, mock_transaction).await?; + (blob_hashes, transaction) + } + }; + + // Execute block and insert into database + let (block, bundle, receipts, results) = execute_block( + database, + &pool, + &l1_transaction, + &block_header, + block_data.into(), + block_data_hash, + ) + .await?; + let block = block.seal_slow(); + database.insert_block_with_bundle(&block, bundle)?; + + Ok((block, receipts, results)) + } +} diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs index db33aaf726a24..f3e7f00bc67d6 100644 --- a/examples/exex/rollup/src/main.rs +++ b/examples/exex/rollup/src/main.rs @@ -2,41 +2,33 @@ //! processing deposits and storing all related data in an SQLite database. //! //! The rollup contract accepts blocks of transactions and deposits of ETH and is deployed on -//! Holesky at [ROLLUP_CONTRACT_ADDRESS], see . +//! Holesky at [ROLLUP_CONTRACT_ADDRESS], see . -use alloy_rlp::Decodable; use alloy_sol_types::{sol, SolEventInterface, SolInterface}; use db::Database; -use eyre::OptionExt; +use execution::execute_block; use once_cell::sync::Lazy; use reth_exex::{ExExContext, ExExEvent}; -use reth_interfaces::executor::BlockValidationError; -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeComponents}; -use reth_node_ethereum::{EthEvmConfig, EthereumNode}; +use reth_node_api::FullNodeComponents; +use reth_node_ethereum::EthereumNode; use reth_primitives::{ - address, constants, - revm::env::fill_tx_env, - revm_primitives::{CfgEnvWithHandlerCfg, EVMError, ExecutionResult, ResultAndState}, - Address, Block, BlockWithSenders, Bytes, ChainSpec, ChainSpecBuilder, Genesis, Hardfork, - Header, Receipt, SealedBlockWithSenders, TransactionSigned, U256, + address, Address, ChainSpec, ChainSpecBuilder, Genesis, SealedBlockWithSenders, + TransactionSigned, U256, }; use reth_provider::Chain; -use reth_revm::{ - db::{states::bundle_state::BundleRetention, BundleState}, - DatabaseCommit, StateBuilder, -}; -use reth_tracing::tracing::{debug, error, info}; +use reth_tracing::tracing::{error, info}; use rusqlite::Connection; use std::sync::Arc; mod db; +mod execution; sol!(RollupContract, "rollup_abi.json"); use RollupContract::{RollupContractCalls, RollupContractEvents}; const DATABASE_PATH: &str = "rollup.db"; -const ROLLUP_CONTRACT_ADDRESS: Address = address!("74ae65DF20cB0e3BF8c022051d0Cdd79cc60890C"); -const ROLLUP_SUBMITTER_ADDRESS: Address = address!("B01042Db06b04d3677564222010DF5Bd09C5A947"); +const ROLLUP_CONTRACT_ADDRESS: Address = address!("97C0E40c6B5bb5d4fa3e2AA1C6b8bC7EA5ECAe31"); +const ROLLUP_SUBMITTER_ADDRESS: Address = address!("5b0517Dc94c413a5871536872605522E54C85a03"); const CHAIN_ID: u64 = 17001; static CHAIN_SPEC: Lazy> = Lazy::new(|| { Arc::new( @@ -67,7 +59,7 @@ impl Rollup { } if let Some(committed_chain) = notification.committed_chain() { - self.commit(&committed_chain)?; + self.commit(&committed_chain).await?; self.ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; } } @@ -79,7 +71,7 @@ impl Rollup { /// /// This function decodes all transactions to the rollup contract into events, executes the /// corresponding actions and inserts the results into the database. - fn commit(&mut self, chain: &Chain) -> eyre::Result<()> { + async fn commit(&mut self, chain: &Chain) -> eyre::Result<()> { let events = decode_chain_into_rollup_events(chain); for (_, tx, event) in events { @@ -87,7 +79,10 @@ impl Rollup { // A new block is submitted to the rollup contract. // The block is executed on top of existing rollup state and committed into the // database. - RollupContractEvents::BlockSubmitted(_) => { + RollupContractEvents::BlockSubmitted(RollupContract::BlockSubmitted { + blockDataHash, + .. + }) => { let call = RollupContractCalls::abi_decode(tx.input(), true)?; if let RollupContractCalls::submitBlock(RollupContract::submitBlockCall { @@ -96,12 +91,21 @@ impl Rollup { .. }) = call { - match execute_block(&mut self.db, &header, blockData) { + match execute_block( + &mut self.db, + self.ctx.pool(), + tx, + &header, + blockData, + blockDataHash, + ) + .await + { Ok((block, bundle, _, _)) => { let block = block.seal_slow(); self.db.insert_block_with_bundle(&block, bundle)?; info!( - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), chain_id = %header.rollupChainId, sequence = %header.sequence, transactions = block.body.len(), @@ -111,7 +115,7 @@ impl Rollup { Err(err) => { error!( %err, - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), chain_id = %header.rollupChainId, sequence = %header.sequence, "Failed to execute block" @@ -123,12 +127,17 @@ impl Rollup { // A deposit of ETH to the rollup contract. The deposit is added to the recipient's // balance and committed into the database. RollupContractEvents::Enter(RollupContract::Enter { + rollupChainId, token, rollupRecipient, amount, }) => { + if rollupChainId != U256::from(CHAIN_ID) { + error!(tx_hash = %tx.recalculate_hash(), "Invalid rollup chain ID"); + continue + } if token != Address::ZERO { - error!(tx_hash = %tx.hash, "Only ETH deposits are supported"); + error!(tx_hash = %tx.recalculate_hash(), "Only ETH deposits are supported"); continue } @@ -139,7 +148,7 @@ impl Rollup { })?; info!( - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), %amount, recipient = %rollupRecipient, "Deposit", @@ -174,7 +183,7 @@ impl Rollup { { self.db.revert_tip_block(header.sequence)?; info!( - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), chain_id = %header.rollupChainId, sequence = %header.sequence, "Block reverted" @@ -183,12 +192,17 @@ impl Rollup { } // The deposit is subtracted from the recipient's balance. RollupContractEvents::Enter(RollupContract::Enter { + rollupChainId, token, rollupRecipient, amount, }) => { + if rollupChainId != U256::from(CHAIN_ID) { + error!(tx_hash = %tx.recalculate_hash(), "Invalid rollup chain ID"); + continue + } if token != Address::ZERO { - error!(tx_hash = %tx.hash, "Only ETH deposits are supported"); + error!(tx_hash = %tx.recalculate_hash(), "Only ETH deposits are supported"); continue } @@ -199,7 +213,7 @@ impl Rollup { })?; info!( - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), %amount, recipient = %rollupRecipient, "Deposit reverted", @@ -242,153 +256,6 @@ fn decode_chain_into_rollup_events( .collect() } -/// Execute a rollup block and return (block with recovered senders)[BlockWithSenders], (bundle -/// state)[BundleState] and list of (receipts)[Receipt]. -fn execute_block( - db: &mut Database, - header: &RollupContract::BlockHeader, - block_data: Bytes, -) -> eyre::Result<(BlockWithSenders, BundleState, Vec, Vec)> { - if header.rollupChainId != U256::from(CHAIN_ID) { - eyre::bail!("Invalid rollup chain ID") - } - - let block_number = u64::try_from(header.sequence)?; - let parent_block = if !header.sequence.is_zero() { - db.get_block(header.sequence - U256::from(1))? - } else { - None - }; - - // Calculate base fee per gas for EIP-1559 transactions - let base_fee_per_gas = if CHAIN_SPEC.fork(Hardfork::London).transitions_at_block(block_number) { - constants::EIP1559_INITIAL_BASE_FEE - } else { - parent_block - .as_ref() - .ok_or(eyre::eyre!("parent block not found"))? - .header - .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) - .ok_or(eyre::eyre!("failed to calculate base fee"))? - }; - - // Construct header - let header = Header { - parent_hash: parent_block.map(|block| block.header.hash()).unwrap_or_default(), - number: block_number, - gas_limit: u64::try_from(header.gasLimit)?, - timestamp: u64::try_from(header.confirmBy)?, - base_fee_per_gas: Some(base_fee_per_gas), - ..Default::default() - }; - - // Decode block data, filter only transactions with the correct chain ID and recover senders - let transactions = Vec::::decode(&mut block_data.as_ref())? - .into_iter() - .filter(|tx| tx.chain_id() == Some(CHAIN_ID)) - .map(|tx| { - let sender = tx.recover_signer().ok_or(eyre::eyre!("failed to recover signer"))?; - Ok((tx, sender)) - }) - .collect::>>()?; - - // Execute block - let state = StateBuilder::new_with_database( - Box::new(db) as Box + Send> - ) - .with_bundle_update() - .build(); - let evm_config = EthEvmConfig::default(); - let mut evm = evm_config.evm(state); - - // Set state clear flag. - evm.db_mut().set_state_clear_flag( - CHAIN_SPEC.fork(Hardfork::SpuriousDragon).active_at_block(header.number), - ); - - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(evm.cfg().clone(), evm.spec_id()); - EthEvmConfig::fill_cfg_and_block_env( - &mut cfg, - evm.block_mut(), - &CHAIN_SPEC, - &header, - U256::ZERO, - ); - *evm.cfg_mut() = cfg.cfg_env; - - let mut receipts = Vec::with_capacity(transactions.len()); - let mut executed_txs = Vec::with_capacity(transactions.len()); - let mut results = Vec::with_capacity(transactions.len()); - if !transactions.is_empty() { - let mut cumulative_gas_used = 0; - for (transaction, sender) in transactions { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas { - // TODO(alexey): what to do here? - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - // Execute transaction. - // Fill revm structure. - fill_tx_env(evm.tx_mut(), &transaction, sender); - - let ResultAndState { result, state } = match evm.transact() { - Ok(result) => result, - Err(err) => { - match err { - EVMError::Transaction(err) => { - // if the transaction is invalid, we can skip it - debug!(%err, ?transaction, "Skipping invalid transaction"); - continue - } - err => { - // this is an error that we should treat as fatal for this attempt - eyre::bail!(err) - } - } - } - }; - - debug!(?transaction, ?result, ?state, "Executed transaction"); - - evm.db_mut().commit(state); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - #[allow(clippy::needless_update)] // side-effect of optimism fields - receipts.push(Receipt { - tx_type: transaction.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.logs().iter().cloned().map(Into::into).collect(), - ..Default::default() - }); - - // append transaction to the list of executed transactions - executed_txs.push(transaction); - results.push(result); - } - - evm.db_mut().merge_transitions(BundleRetention::Reverts); - } - - // Construct block and recover senders - let block = Block { header, body: executed_txs, ..Default::default() } - .with_recovered_senders() - .ok_or_eyre("failed to recover senders")?; - - let bundle = evm.db_mut().take_bundle(); - - Ok((block, bundle, receipts, results)) -} - fn main() -> eyre::Result<()> { reth::cli::Cli::parse_args().run(|builder, _| async move { let handle = builder @@ -404,184 +271,3 @@ fn main() -> eyre::Result<()> { handle.wait_for_node_exit().await }) } - -#[cfg(test)] -mod tests { - use std::time::{SystemTime, UNIX_EPOCH}; - - use alloy_sol_types::{sol, SolCall}; - use reth_interfaces::test_utils::generators::{self, sign_tx_with_key_pair}; - use reth_primitives::{ - bytes, - constants::ETH_TO_WEI, - public_key_to_address, - revm_primitives::{AccountInfo, ExecutionResult, Output, TransactTo, TxEnv}, - BlockNumber, Receipt, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, - }; - use reth_revm::Evm; - use rusqlite::Connection; - use secp256k1::{Keypair, Secp256k1}; - - use crate::{ - db::Database, execute_block, RollupContract::BlockHeader, CHAIN_ID, - ROLLUP_SUBMITTER_ADDRESS, - }; - - sol!( - WETH, - r#" -[ - { - "constant":true, - "inputs":[ - { - "name":"", - "type":"address" - } - ], - "name":"balanceOf", - "outputs":[ - { - "name":"", - "type":"uint256" - } - ], - "payable":false, - "stateMutability":"view", - "type":"function" - } -] - "# - ); - - #[test] - fn test_execute_block() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - - let mut database = Database::new(Connection::open_in_memory()?)?; - - // Create key pair - let secp = Secp256k1::new(); - let key_pair = Keypair::new(&secp, &mut generators::rng()); - let sender_address = public_key_to_address(key_pair.public_key()); - - // Deposit some ETH to the sender and insert it into database - database.upsert_account(sender_address, |_| { - Ok(AccountInfo { balance: U256::from(ETH_TO_WEI), nonce: 1, ..Default::default() }) - })?; - - // WETH deployment transaction - let (_, _, results) = execute_transaction( - &mut database, - key_pair, - 0, - Transaction::Eip2930(TxEip2930 { - chain_id: CHAIN_ID, - nonce: 1, - gas_limit: 1_500_000, - gas_price: 1_500_000_000, - to: TxKind::Create, - // WETH9 bytecode - input: bytes!("60606040526040805190810160405280600d81526020017f57726170706564204574686572000000000000000000000000000000000000008152506000908051906020019061004f9291906100c8565b506040805190810160405280600481526020017f57455448000000000000000000000000000000000000000000000000000000008152506001908051906020019061009b9291906100c8565b506012600260006101000a81548160ff021916908360ff16021790555034156100c357600080fd5b61016d565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061010957805160ff1916838001178555610137565b82800160010185558215610137579182015b8281111561013657825182559160200191906001019061011b565b5b5090506101449190610148565b5090565b61016a91905b8082111561016657600081600090555060010161014e565b5090565b90565b610c348061017c6000396000f3006060604052600436106100af576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100b9578063095ea7b31461014757806318160ddd146101a157806323b872dd146101ca5780632e1a7d4d14610243578063313ce5671461026657806370a082311461029557806395d89b41146102e2578063a9059cbb14610370578063d0e30db0146103ca578063dd62ed3e146103d4575b6100b7610440565b005b34156100c457600080fd5b6100cc6104dd565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561010c5780820151818401526020810190506100f1565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610187600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061057b565b604051808215151515815260200191505060405180910390f35b34156101ac57600080fd5b6101b461066d565b6040518082815260200191505060405180910390f35b34156101d557600080fd5b610229600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061068c565b604051808215151515815260200191505060405180910390f35b341561024e57600080fd5b61026460048080359060200190919050506109d9565b005b341561027157600080fd5b610279610b05565b604051808260ff1660ff16815260200191505060405180910390f35b34156102a057600080fd5b6102cc600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610b18565b6040518082815260200191505060405180910390f35b34156102ed57600080fd5b6102f5610b30565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561033557808201518184015260208101905061031a565b50505050905090810190601f1680156103625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037b57600080fd5b6103b0600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610bce565b604051808215151515815260200191505060405180910390f35b6103d2610440565b005b34156103df57600080fd5b61042a600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610be3565b6040518082815260200191505060405180910390f35b34600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055503373ffffffffffffffffffffffffffffffffffffffff167fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c346040518082815260200191505060405180910390a2565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105735780601f1061054857610100808354040283529160200191610573565b820191906000526020600020905b81548152906001019060200180831161055657829003601f168201915b505050505081565b600081600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1631905090565b600081600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054101515156106dc57600080fd5b3373ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141580156107b457507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205414155b156108cf5781600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561084457600080fd5b81600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055505b81600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600360008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610a2757600080fd5b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f193505050501515610ab457600080fd5b3373ffffffffffffffffffffffffffffffffffffffff167f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65826040518082815260200191505060405180910390a250565b600260009054906101000a900460ff1681565b60036020528060005260406000206000915090505481565b60018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610bc65780601f10610b9b57610100808354040283529160200191610bc6565b820191906000526020600020905b815481529060010190602001808311610ba957829003601f168201915b505050505081565b6000610bdb33848461068c565b905092915050565b60046020528160005260406000206020528060005260406000206000915091505054815600a165627a7a72305820deb4c2ccab3c2fdca32ab3f46728389c2fe2c165d5fafa07661e4e004f6c344a0029"), - ..Default::default() - }) - )?; - - let weth_address = match results.first() { - Some(ExecutionResult::Success { output: Output::Create(_, Some(address)), .. }) => { - *address - } - _ => eyre::bail!("WETH contract address not found"), - }; - - // WETH deposit transaction - execute_transaction( - &mut database, - key_pair, - 1, - Transaction::Eip2930(TxEip2930 { - chain_id: CHAIN_ID, - nonce: 2, - gas_limit: 50000, - gas_price: 1_500_000_000, - to: TxKind::Call(weth_address), - value: U256::from(0.5 * ETH_TO_WEI as f64), - input: bytes!("d0e30db0"), - ..Default::default() - }), - )?; - - // Verify WETH balance - let mut evm = Evm::builder() - .with_db(&mut database) - .with_tx_env(TxEnv { - caller: sender_address, - gas_limit: 50_000_000, - transact_to: TransactTo::Call(weth_address), - data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), - ..Default::default() - }) - .build(); - let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; - assert_eq!( - result.output(), - Some(&U256::from(0.5 * ETH_TO_WEI as f64).to_be_bytes_vec().into()) - ); - drop(evm); - - // Verify nonce - let account = database.get_account(sender_address)?.unwrap(); - assert_eq!(account.nonce, 3); - - // Revert block with WETH deposit transaction - database.revert_tip_block(U256::from(1))?; - - // Verify WETH balance after revert - let mut evm = Evm::builder() - .with_db(&mut database) - .with_tx_env(TxEnv { - caller: sender_address, - gas_limit: 50_000_000, - transact_to: TransactTo::Call(weth_address), - data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), - ..Default::default() - }) - .build(); - let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; - assert_eq!(result.output(), Some(&U256::ZERO.to_be_bytes_vec().into())); - drop(evm); - - // Verify nonce after revert - let account = database.get_account(sender_address)?.unwrap(); - assert_eq!(account.nonce, 2); - - Ok(()) - } - - fn execute_transaction( - database: &mut Database, - key_pair: Keypair, - sequence: BlockNumber, - tx: Transaction, - ) -> eyre::Result<(SealedBlockWithSenders, Vec, Vec)> { - let signed_tx = sign_tx_with_key_pair(key_pair, tx); - - // Construct block header and data - let block_header = BlockHeader { - rollupChainId: U256::from(CHAIN_ID), - sequence: U256::from(sequence), - confirmBy: U256::from(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()), - gasLimit: U256::from(30_000_000), - rewardAddress: ROLLUP_SUBMITTER_ADDRESS, - }; - let block_data = alloy_rlp::encode(vec![signed_tx.envelope_encoded()]); - - // Execute block and insert into database - let (block, bundle, receipts, results) = - execute_block(database, &block_header, block_data.into())?; - let block = block.seal_slow(); - database.insert_block_with_bundle(&block, bundle)?; - - Ok((block, receipts, results)) - } -} From a590ed7ce5af3bbf6feb3cd5a626334311e341d7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 2 May 2024 22:34:13 +0200 Subject: [PATCH 457/700] chore(discv5): pub methods (#8057) --- crates/net/discv5/src/config.rs | 2 +- crates/net/discv5/src/filter.rs | 22 +- crates/net/discv5/src/lib.rs | 343 ++++++++++++++++---------------- 3 files changed, 182 insertions(+), 185 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 3a506902ed8e4..05c2863c85572 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -143,7 +143,7 @@ impl ConfigBuilder { } /// Sets the tcp port to advertise in the local [`Enr`](discv5::enr::Enr). - fn tcp_port(mut self, port: u16) -> Self { + pub fn tcp_port(mut self, port: u16) -> Self { self.tcp_port = port; self } diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index 2e20e2fbdabae..d62a7584a6328 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -35,14 +35,12 @@ impl MustIncludeKey { /// Returns [`FilterOutcome::Ok`] if [`Enr`](discv5::Enr) contains the configured kv-pair key. pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome { if enr.get_raw_rlp(self.key).is_none() { - return FilterOutcome::Ignore { reason: self.ignore_reason() } + return FilterOutcome::Ignore { + reason: format!("{} fork required", String::from_utf8_lossy(self.key)), + } } FilterOutcome::Ok } - - fn ignore_reason(&self) -> String { - format!("{} fork required", String::from_utf8_lossy(self.key)) - } } /// Filter requiring that peers not advertise kv-pairs using certain keys, e.g. b"eth2". @@ -69,20 +67,18 @@ impl MustNotIncludeKeys { pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome { for key in self.keys.iter() { if matches!(key.filter(enr), FilterOutcome::Ok) { - return FilterOutcome::Ignore { reason: self.ignore_reason() } + return FilterOutcome::Ignore { + reason: format!( + "{} forks not allowed", + self.keys.iter().map(|key| String::from_utf8_lossy(key.key)).format(",") + ), + } } } FilterOutcome::Ok } - fn ignore_reason(&self) -> String { - format!( - "{} forks not allowed", - self.keys.iter().map(|key| String::from_utf8_lossy(key.key)).format(",") - ) - } - /// Adds a key that must not be present for any kv-pair in a node record. pub fn add_disallowed_keys(&mut self, keys: &[&'static [u8]]) { for key in keys { diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index b8b2eab242542..8895f8a1638e3 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -161,7 +161,7 @@ impl Discv5 { // // 1. make local enr from listen config // - let (enr, bc_enr, fork_key, ip_mode) = Self::build_local_enr(sk, &discv5_config); + let (enr, bc_enr, fork_key, ip_mode) = build_local_enr(sk, &discv5_config); trace!(target: "net::discv5", ?enr, @@ -197,14 +197,14 @@ impl Discv5 { // // 3. add boot nodes // - Self::bootstrap(bootstrap_nodes, &discv5).await?; + bootstrap(bootstrap_nodes, &discv5).await?; let metrics = Discv5Metrics::default(); // // 4. start bg kbuckets maintenance // - Self::spawn_populate_kbuckets_bg( + spawn_populate_kbuckets_bg( lookup_interval, bootstrap_lookup_interval, bootstrap_lookup_countdown, @@ -219,169 +219,6 @@ impl Discv5 { )) } - fn build_local_enr( - sk: &SecretKey, - config: &Config, - ) -> (Enr, NodeRecord, Option<&'static [u8]>, IpMode) { - let mut builder = discv5::enr::Enr::builder(); - - let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; - - let (ip_mode, socket) = match discv5_config.listen_config { - ListenConfig::Ipv4 { ip, port } => { - if ip != Ipv4Addr::UNSPECIFIED { - builder.ip4(ip); - } - builder.udp4(port); - builder.tcp4(*tcp_port); - - (IpMode::Ip4, (ip, port).into()) - } - ListenConfig::Ipv6 { ip, port } => { - if ip != Ipv6Addr::UNSPECIFIED { - builder.ip6(ip); - } - builder.udp6(port); - builder.tcp6(*tcp_port); - - (IpMode::Ip6, (ip, port).into()) - } - ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { - if ipv4 != Ipv4Addr::UNSPECIFIED { - builder.ip4(ipv4); - } - builder.udp4(ipv4_port); - builder.tcp4(*tcp_port); - - if ipv6 != Ipv6Addr::UNSPECIFIED { - builder.ip6(ipv6); - } - builder.udp6(ipv6_port); - - (IpMode::DualStack, (ipv6, ipv6_port).into()) - } - }; - - // identifies which network node is on - let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| { - builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into()); - *network_stack_id - }); - - // add other data - for (key, value) in other_enr_kv_pairs { - builder.add_value_rlp(key, value.clone().into()); - } - - // enr v4 not to get confused with discv4, independent versioning enr and - // discovery - let enr = builder.build(sk).expect("should build enr v4"); - - // backwards compatible enr - let bc_enr = NodeRecord::from_secret_key(socket, sk); - - (enr, bc_enr, network_stack_id, ip_mode) - } - - /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. - async fn bootstrap( - bootstrap_nodes: HashSet, - discv5: &Arc, - ) -> Result<(), Error> { - trace!(target: "net::discv5", - ?bootstrap_nodes, - "adding bootstrap nodes .." - ); - - let mut enr_requests = vec![]; - for node in bootstrap_nodes { - match node { - BootNode::Enr(node) => { - if let Err(err) = discv5.add_enr(node) { - return Err(Error::AddNodeFailed(err)) - } - } - BootNode::Enode(enode) => { - let discv5 = discv5.clone(); - enr_requests.push(async move { - if let Err(err) = discv5.request_enr(enode.to_string()).await { - debug!(target: "net::discv5", - ?enode, - %err, - "failed adding boot node" - ); - } - }) - } - } - } - - // If a session is established, the ENR is added straight away to discv5 kbuckets - Ok(_ = join_all(enr_requests).await) - } - - /// Backgrounds regular look up queries, in order to keep kbuckets populated. - fn spawn_populate_kbuckets_bg( - lookup_interval: u64, - bootstrap_lookup_interval: u64, - bootstrap_lookup_countdown: u64, - metrics: Discv5Metrics, - discv5: Arc, - ) { - task::spawn({ - let local_node_id = discv5.local_enr().node_id(); - let lookup_interval = Duration::from_secs(lookup_interval); - let metrics = metrics.discovered_peers; - let mut kbucket_index = MAX_KBUCKET_INDEX; - let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval); - // todo: graceful shutdown - - async move { - // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest - // log2distance from local node - for i in (0..bootstrap_lookup_countdown).rev() { - let target = discv5::enr::NodeId::random(); - - trace!(target: "net::discv5", - %target, - bootstrap_boost_runs_countdown=i, - lookup_interval=format!("{:#?}", pulse_lookup_interval), - "starting bootstrap boost lookup query" - ); - - lookup(target, &discv5, &metrics).await; - - tokio::time::sleep(pulse_lookup_interval).await; - } - - // initiate regular lookups to populate kbuckets - loop { - // make sure node is connected to each subtree in the network by target - // selection (ref kademlia) - let target = get_lookup_target(kbucket_index, local_node_id); - - trace!(target: "net::discv5", - %target, - lookup_interval=format!("{:#?}", lookup_interval), - "starting periodic lookup query" - ); - - lookup(target, &discv5, &metrics).await; - - if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX { - // try to populate bucket one step closer - kbucket_index -= 1 - } else { - // start over with bucket furthest away - kbucket_index = MAX_KBUCKET_INDEX - } - - tokio::time::sleep(lookup_interval).await; - } - } - }); - } - /// Process an event from the underlying [`discv5::Discv5`] node. pub fn on_discv5_update(&mut self, update: discv5::Event) -> Option { match update { @@ -416,7 +253,7 @@ impl Discv5 { } /// Processes a discovered peer. Returns `true` if peer is added to - fn on_discovered_peer( + pub fn on_discovered_peer( &mut self, enr: &discv5::Enr, socket: SocketAddr, @@ -467,7 +304,7 @@ impl Discv5 { /// /// Note: [`discv5::Discv5`] won't initiate a session with any peer with a malformed node /// record, that advertises a reserved IP address on a WAN network. - fn try_into_reachable( + pub fn try_into_reachable( &self, enr: &discv5::Enr, socket: SocketAddr, @@ -490,13 +327,13 @@ impl Discv5 { /// Applies filtering rules on an ENR. Returns [`Ok`](FilterOutcome::Ok) if peer should be /// passed up to app, and [`Ignore`](FilterOutcome::Ignore) if peer should instead be dropped. - fn filter_discovered_peer(&self, enr: &discv5::Enr) -> FilterOutcome { + pub fn filter_discovered_peer(&self, enr: &discv5::Enr) -> FilterOutcome { self.discovered_peer_filter.filter(enr) } /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr) w.r.t. the local node's network /// stack, if field is set. - fn get_fork_id( + pub fn get_fork_id( &self, enr: &discv5::enr::Enr, ) -> Result { @@ -551,6 +388,170 @@ pub struct DiscoveredPeer { pub fork_id: Option, } +/// Builds the local ENR with the supplied key. +pub fn build_local_enr( + sk: &SecretKey, + config: &Config, +) -> (Enr, NodeRecord, Option<&'static [u8]>, IpMode) { + let mut builder = discv5::enr::Enr::builder(); + + let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; + + let (ip_mode, socket) = match discv5_config.listen_config { + ListenConfig::Ipv4 { ip, port } => { + if ip != Ipv4Addr::UNSPECIFIED { + builder.ip4(ip); + } + builder.udp4(port); + builder.tcp4(*tcp_port); + + (IpMode::Ip4, (ip, port).into()) + } + ListenConfig::Ipv6 { ip, port } => { + if ip != Ipv6Addr::UNSPECIFIED { + builder.ip6(ip); + } + builder.udp6(port); + builder.tcp6(*tcp_port); + + (IpMode::Ip6, (ip, port).into()) + } + ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { + if ipv4 != Ipv4Addr::UNSPECIFIED { + builder.ip4(ipv4); + } + builder.udp4(ipv4_port); + builder.tcp4(*tcp_port); + + if ipv6 != Ipv6Addr::UNSPECIFIED { + builder.ip6(ipv6); + } + builder.udp6(ipv6_port); + + (IpMode::DualStack, (ipv6, ipv6_port).into()) + } + }; + + // identifies which network node is on + let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| { + builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into()); + *network_stack_id + }); + + // add other data + for (key, value) in other_enr_kv_pairs { + builder.add_value_rlp(key, value.clone().into()); + } + + // enr v4 not to get confused with discv4, independent versioning enr and + // discovery + let enr = builder.build(sk).expect("should build enr v4"); + + // backwards compatible enr + let bc_enr = NodeRecord::from_secret_key(socket, sk); + + (enr, bc_enr, network_stack_id, ip_mode) +} + +/// Bootstraps underlying [`discv5::Discv5`] node with configured peers. +pub async fn bootstrap( + bootstrap_nodes: HashSet, + discv5: &Arc, +) -> Result<(), Error> { + trace!(target: "net::discv5", + ?bootstrap_nodes, + "adding bootstrap nodes .." + ); + + let mut enr_requests = vec![]; + for node in bootstrap_nodes { + match node { + BootNode::Enr(node) => { + if let Err(err) = discv5.add_enr(node) { + return Err(Error::AddNodeFailed(err)) + } + } + BootNode::Enode(enode) => { + let discv5 = discv5.clone(); + enr_requests.push(async move { + if let Err(err) = discv5.request_enr(enode.to_string()).await { + debug!(target: "net::discv5", + ?enode, + %err, + "failed adding boot node" + ); + } + }) + } + } + } + + // If a session is established, the ENR is added straight away to discv5 kbuckets + Ok(_ = join_all(enr_requests).await) +} + +/// Backgrounds regular look up queries, in order to keep kbuckets populated. +pub fn spawn_populate_kbuckets_bg( + lookup_interval: u64, + bootstrap_lookup_interval: u64, + bootstrap_lookup_countdown: u64, + metrics: Discv5Metrics, + discv5: Arc, +) { + task::spawn({ + let local_node_id = discv5.local_enr().node_id(); + let lookup_interval = Duration::from_secs(lookup_interval); + let metrics = metrics.discovered_peers; + let mut kbucket_index = MAX_KBUCKET_INDEX; + let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval); + // todo: graceful shutdown + + async move { + // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest + // log2distance from local node + for i in (0..bootstrap_lookup_countdown).rev() { + let target = discv5::enr::NodeId::random(); + + trace!(target: "net::discv5", + %target, + bootstrap_boost_runs_countdown=i, + lookup_interval=format!("{:#?}", pulse_lookup_interval), + "starting bootstrap boost lookup query" + ); + + lookup(target, &discv5, &metrics).await; + + tokio::time::sleep(pulse_lookup_interval).await; + } + + // initiate regular lookups to populate kbuckets + loop { + // make sure node is connected to each subtree in the network by target + // selection (ref kademlia) + let target = get_lookup_target(kbucket_index, local_node_id); + + trace!(target: "net::discv5", + %target, + lookup_interval=format!("{:#?}", lookup_interval), + "starting periodic lookup query" + ); + + lookup(target, &discv5, &metrics).await; + + if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX { + // try to populate bucket one step closer + kbucket_index -= 1 + } else { + // start over with bucket furthest away + kbucket_index = MAX_KBUCKET_INDEX + } + + tokio::time::sleep(lookup_interval).await; + } + } + }); +} + /// Gets the next lookup target, based on which bucket is currently being targeted. pub fn get_lookup_target( kbucket_index: usize, @@ -846,7 +847,7 @@ mod tests { let config = Config::builder(TCP_PORT).fork(NetworkStackId::ETH, fork_id).build(); let sk = SecretKey::new(&mut thread_rng()); - let (enr, _, _, _) = Discv5::build_local_enr(&sk, &config); + let (enr, _, _, _) = build_local_enr(&sk, &config); let decoded_fork_id = enr .get_decodable::(NetworkStackId::ETH) From aef1bcc4359c2db80b116cf6198369122b1adebe Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 3 May 2024 12:18:41 +0200 Subject: [PATCH 458/700] chore: make clippy happy (#8068) --- bin/reth/src/commands/db/mod.rs | 2 +- bin/reth/src/utils.rs | 4 ++-- crates/blockchain-tree/src/blockchain_tree.rs | 4 ++-- crates/consensus/auto-seal/src/lib.rs | 2 +- crates/consensus/beacon/src/engine/hooks/controller.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 4 ++-- crates/exex/src/manager.rs | 2 +- crates/metrics/src/common/mpsc.rs | 2 +- crates/net/discv4/src/lib.rs | 4 ++-- crates/net/discv4/src/test_utils.rs | 2 +- crates/net/discv5/src/lib.rs | 6 +++--- crates/net/dns/src/lib.rs | 4 ++-- crates/net/downloaders/src/bodies/bodies.rs | 2 +- crates/net/downloaders/src/file_client.rs | 2 +- crates/net/downloaders/src/headers/reverse_headers.rs | 4 ++-- crates/net/eth-wire/src/multiplex.rs | 2 +- crates/net/eth-wire/src/muxdemux.rs | 2 +- crates/net/network/src/budget.rs | 1 + crates/net/network/src/eth_requests.rs | 6 +++--- crates/net/network/src/fetch/mod.rs | 2 +- crates/net/network/src/manager.rs | 2 +- crates/net/network/src/state.rs | 2 +- crates/net/network/src/transactions/fetcher.rs | 6 +++--- crates/node/events/src/node.rs | 2 +- crates/rpc/rpc-builder/src/lib.rs | 6 +++--- crates/rpc/rpc/src/eth/api/sign.rs | 2 +- crates/stages-api/src/pipeline/mod.rs | 2 +- crates/stages/src/stages/merkle.rs | 2 +- crates/storage/libmdbx-rs/src/lib.rs | 2 +- crates/storage/nippy-jar/src/lib.rs | 10 +++++----- crates/storage/nippy-jar/src/writer.rs | 2 +- .../provider/src/providers/static_file/writer.rs | 2 +- crates/storage/provider/src/test_utils/events.rs | 4 ++-- crates/transaction-pool/src/pool/txpool.rs | 4 ++-- 34 files changed, 54 insertions(+), 53 deletions(-) diff --git a/bin/reth/src/commands/db/mod.rs b/bin/reth/src/commands/db/mod.rs index aeaf1d7e883cf..6eedabcc7714a 100644 --- a/bin/reth/src/commands/db/mod.rs +++ b/bin/reth/src/commands/db/mod.rs @@ -159,7 +159,7 @@ impl Command { let provider_factory = ProviderFactory::new(db, self.chain.clone(), static_files_path.clone())?; - let mut tool = DbTool::new(provider_factory, self.chain.clone())?; + let tool = DbTool::new(provider_factory, self.chain.clone())?; tool.drop(db_path, static_files_path)?; } Subcommands::Clear(command) => { diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index 5c56476a84ea1..650fc9d700d94 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -132,7 +132,7 @@ impl DbTool { /// Drops the database and the static files at the given path. pub fn drop( - &mut self, + &self, db_path: impl AsRef, static_files_path: impl AsRef, ) -> Result<()> { @@ -149,7 +149,7 @@ impl DbTool { } /// Drops the provided table from the database. - pub fn drop_table(&mut self) -> Result<()> { + pub fn drop_table(&self) -> Result<()> { self.provider_factory.db_ref().update(|tx| tx.clear::())??; Ok(()) } diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index fc9e7685a1c35..b2b30f132d80a 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1216,7 +1216,7 @@ where /// /// The block, `revert_until`, is __non-inclusive__, i.e. `revert_until` stays in the database. fn revert_canonical_from_database( - &mut self, + &self, revert_until: BlockNumber, ) -> Result, CanonicalError> { // read data that is needed for new sidechain @@ -1239,7 +1239,7 @@ where } } - fn update_reorg_metrics(&mut self, reorg_depth: f64) { + fn update_reorg_metrics(&self, reorg_depth: f64) { self.metrics.reorgs.increment(1); self.metrics.latest_reorg_depth.set(reorg_depth); } diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 402a6c9834133..9f2f2c40298ae 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -338,7 +338,7 @@ impl StorageInner { /// /// This returns the poststate from execution and post-block changes, as well as the gas used. pub(crate) fn execute( - &mut self, + &self, block: &BlockWithSenders, executor: &mut EVMProcessor<'_, EvmConfig>, ) -> Result<(BundleStateWithReceipts, u64), BlockExecutionError> diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 48343d4804f5e..47085be008768 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -124,7 +124,7 @@ impl EngineHooksController { } fn poll_next_hook_inner( - &mut self, + &self, cx: &mut Context<'_>, hook: &mut Box, args: EngineHookContext, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f3aa249fa1ba2..3e12c5f8e3a89 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -446,7 +446,7 @@ where /// /// Returns `true` if the head needs to be updated. fn on_head_already_canonical( - &mut self, + &self, header: &SealedHeader, attrs: &mut Option, ) -> bool { @@ -804,7 +804,7 @@ where /// This also updates the safe and finalized blocks in the [CanonChainTracker], if they are /// consistent with the head block. fn ensure_consistent_forkchoice_state( - &mut self, + &self, state: ForkchoiceState, ) -> ProviderResult> { // Ensure that the finalized block, if not zero, is known and in the canonical chain diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 1037395b8544d..1de8c102e3be8 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -258,7 +258,7 @@ impl ExExManager { /// Updates the current buffer capacity and notifies all `is_ready` watchers of the manager's /// readiness to receive notifications. - fn update_capacity(&mut self) { + fn update_capacity(&self) { let capacity = self.max_capacity.saturating_sub(self.buffer.len()); self.current_capacity.store(capacity, Ordering::Relaxed); self.metrics.current_capacity.set(capacity as f64); diff --git a/crates/metrics/src/common/mpsc.rs b/crates/metrics/src/common/mpsc.rs index 3c35c745eaf98..98c670ef79905 100644 --- a/crates/metrics/src/common/mpsc.rs +++ b/crates/metrics/src/common/mpsc.rs @@ -173,7 +173,7 @@ impl MeteredSender { /// Calls the underlying [Sender](mpsc::Sender)'s `send`, incrementing the appropriate /// metrics depending on the result. - pub async fn send(&mut self, value: T) -> Result<(), SendError> { + pub async fn send(&self, value: T) -> Result<(), SendError> { match self.sender.send(value).await { Ok(()) => { self.metrics.messages_sent.increment(1); diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 1a942a5b97e4a..9a0cb9c11a4b0 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -992,7 +992,7 @@ impl Discv4Service { } /// Encodes the packet, sends it and returns the hash. - pub(crate) fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 { + pub(crate) fn send_packet(&self, msg: Message, to: SocketAddr) -> B256 { let (payload, hash) = msg.encode(&self.secret_key); trace!(target: "discv4", r#type=?msg.msg_type(), ?to, ?hash, "sending packet"); let _ = self.egress.try_send((payload, to)).map_err(|err| { @@ -1277,7 +1277,7 @@ impl Discv4Service { /// Handler for incoming `EnrRequest` message fn on_enr_request( - &mut self, + &self, msg: EnrRequest, remote_addr: SocketAddr, id: PeerId, diff --git a/crates/net/discv4/src/test_utils.rs b/crates/net/discv4/src/test_utils.rs index dae3ea388d787..d4930f204fff8 100644 --- a/crates/net/discv4/src/test_utils.rs +++ b/crates/net/discv4/src/test_utils.rs @@ -114,7 +114,7 @@ impl MockDiscovery { } /// Encodes the packet, sends it and returns the hash. - fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 { + fn send_packet(&self, msg: Message, to: SocketAddr) -> B256 { let (payload, hash) = msg.encode(&self.secret_key); let _ = self.egress.try_send((payload, to)); hash diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 8895f8a1638e3..e9bc79dce91a7 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -220,7 +220,7 @@ impl Discv5 { } /// Process an event from the underlying [`discv5::Discv5`] node. - pub fn on_discv5_update(&mut self, update: discv5::Event) -> Option { + pub fn on_discv5_update(&self, update: discv5::Event) -> Option { match update { discv5::Event::SocketUpdated(_) | discv5::Event::TalkRequest(_) | // `Discovered` not unique discovered peers @@ -254,7 +254,7 @@ impl Discv5 { /// Processes a discovered peer. Returns `true` if peer is added to pub fn on_discovered_peer( - &mut self, + &self, enr: &discv5::Enr, socket: SocketAddr, ) -> Option { @@ -724,7 +724,7 @@ mod tests { let remote_key = CombinedKey::generate_secp256k1(); let remote_enr = Enr::builder().tcp4(REMOTE_RLPX_PORT).build(&remote_key).unwrap(); - let mut discv5 = discv5_noop(); + let discv5 = discv5_noop(); // test let filtered_peer = discv5.on_discovered_peer(&remote_enr, remote_socket); diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index e5ddc0fd1851b..5000e524eb858 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -67,13 +67,13 @@ pub struct DnsDiscoveryHandle { impl DnsDiscoveryHandle { /// Starts syncing the given link to a tree. - pub fn sync_tree(&mut self, link: &str) -> Result<(), ParseDnsEntryError> { + pub fn sync_tree(&self, link: &str) -> Result<(), ParseDnsEntryError> { self.sync_tree_with_link(link.parse()?); Ok(()) } /// Starts syncing the given link to a tree. - pub fn sync_tree_with_link(&mut self, link: LinkEntry) { + pub fn sync_tree_with_link(&self, link: LinkEntry) { let _ = self.to_service.send(DnsDiscoveryCommand::SyncTree(link)); } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 985c545e9b396..8f97e09c7dd43 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -68,7 +68,7 @@ where Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. - fn next_headers_request(&mut self) -> DownloadResult>> { + fn next_headers_request(&self) -> DownloadResult>> { let start_at = match self.in_progress_queue.last_requested_block_number { Some(num) => num + 1, None => *self.download_range.start(), diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index ee783a1a4f164..ef72a891be7d3 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -224,7 +224,7 @@ impl FileClient { } /// Returns an iterator over headers in the client. - pub fn headers_iter(&mut self) -> impl Iterator { + pub fn headers_iter(&self) -> impl Iterator { self.headers.values() } diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 273f97e589b47..a5cdb145b01ae 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -536,7 +536,7 @@ where /// Handles the error of a bad response /// /// This will re-submit the request. - fn on_headers_error(&mut self, err: Box) { + fn on_headers_error(&self, err: Box) { let HeadersResponseError { request, peer_id, error } = *err; self.penalize_peer(peer_id, &error); @@ -581,7 +581,7 @@ where } /// Starts a request future - fn submit_request(&mut self, request: HeadersRequest, priority: Priority) { + fn submit_request(&self, request: HeadersRequest, priority: Priority) { trace!(target: "downloaders::headers", ?request, "Submitting headers request"); self.in_progress_queue.push(self.request_fut(request, priority)); self.metrics.in_flight_requests.increment(1.); diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 8677ae77c1058..82eccd5c8a18b 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -239,7 +239,7 @@ impl MultiplexInner { } /// Delegates a message to the matching protocol. - fn delegate_message(&mut self, cap: &SharedCapability, msg: BytesMut) -> bool { + fn delegate_message(&self, cap: &SharedCapability, msg: BytesMut) -> bool { for proto in &self.protocols { if proto.shared_cap == *cap { proto.send_raw(msg); diff --git a/crates/net/eth-wire/src/muxdemux.rs b/crates/net/eth-wire/src/muxdemux.rs index a9bbe2fdb4326..18112346ea31e 100644 --- a/crates/net/eth-wire/src/muxdemux.rs +++ b/crates/net/eth-wire/src/muxdemux.rs @@ -171,7 +171,7 @@ impl MuxDemuxStream { /// Checks if all clones of this shared stream have been dropped, if true then returns // /// function to drop the stream. - fn can_drop(&mut self) -> bool { + fn can_drop(&self) -> bool { for tx in self.demux.values() { if !tx.is_closed() { return false diff --git a/crates/net/network/src/budget.rs b/crates/net/network/src/budget.rs index 319c8e3113818..e20d882fe8275 100644 --- a/crates/net/network/src/budget.rs +++ b/crates/net/network/src/budget.rs @@ -46,6 +46,7 @@ macro_rules! poll_nested_stream_with_budget { loop { match $poll_stream { Poll::Ready(Some(item)) => { + #[allow(unused_mut)] let mut f = $on_ready_some; f(item); diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 57e83391dee90..3268ff8987afe 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -139,7 +139,7 @@ where } fn on_headers_request( - &mut self, + &self, _peer_id: PeerId, request: GetBlockHeaders, response: oneshot::Sender>, @@ -150,7 +150,7 @@ where } fn on_bodies_request( - &mut self, + &self, _peer_id: PeerId, request: GetBlockBodies, response: oneshot::Sender>, @@ -187,7 +187,7 @@ where } fn on_receipts_request( - &mut self, + &self, _peer_id: PeerId, request: GetReceipts, response: oneshot::Sender>, diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 3a529c97e22ee..9ad50edb0a276 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -130,7 +130,7 @@ impl StateFetcher { /// Returns the _next_ idle peer that's ready to accept a request, /// prioritizing those with the lowest timeout/latency and those that recently responded with /// adequate data. - fn next_best_peer(&mut self) -> Option { + fn next_best_peer(&self) -> Option { let mut idle = self.peers.iter().filter(|(_, peer)| peer.state.is_idle()); let mut best_peer = idle.next()?; diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 39d29ee715af8..0d2a3340816de 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -403,7 +403,7 @@ where } /// Handle an incoming request from the peer - fn on_eth_request(&mut self, peer_id: PeerId, req: PeerRequest) { + fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest) { match req { PeerRequest::GetBlockHeaders { request, response } => { self.delegate_eth_request(IncomingEthRequest::GetBlockHeaders { diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 0020b4927dc1b..309184ca32553 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -234,7 +234,7 @@ where } /// Invoked when a new [`ForkId`] is activated. - pub(crate) fn update_fork_id(&mut self, fork_id: ForkId) { + pub(crate) fn update_fork_id(&self, fork_id: ForkId) { self.discovery.update_fork_id(fork_id) } diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index e82a20a31628a..f26b1abe2193b 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -239,7 +239,7 @@ impl TransactionFetcher { /// /// Returns left over hashes. pub fn pack_request( - &mut self, + &self, hashes_to_request: &mut RequestTxHashes, hashes_from_announcement: ValidAnnouncementData, ) -> RequestTxHashes { @@ -260,7 +260,7 @@ impl TransactionFetcher { /// response. If no, it's added to surplus hashes. If yes, it's added to hashes to the request /// and expected response size is accumulated. pub fn pack_request_eth68( - &mut self, + &self, hashes_to_request: &mut RequestTxHashes, hashes_from_announcement: impl HandleMempoolData + IntoIterator)>, @@ -328,7 +328,7 @@ impl TransactionFetcher { /// /// Returns left over hashes. pub fn pack_request_eth66( - &mut self, + &self, hashes_to_request: &mut RequestTxHashes, hashes_from_announcement: ValidAnnouncementData, ) -> RequestTxHashes { diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 2689226ea693e..ba7ae8da4600e 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -232,7 +232,7 @@ impl NodeState { } } - fn handle_network_event(&mut self, _: NetworkEvent) { + fn handle_network_event(&self, _: NetworkEvent) { // NOTE(onbjerg): This used to log established/disconnecting sessions, but this is already // logged in the networking component. I kept this stub in case we want to catch other // networking events later on. diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index cea80398efa0f..7d86a00562c68 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1041,12 +1041,12 @@ where Network: NetworkInfo + Peers + Clone + 'static, { /// Instantiates AdminApi - pub fn admin_api(&mut self) -> AdminApi { + pub fn admin_api(&self) -> AdminApi { AdminApi::new(self.network.clone(), self.provider.chain_spec()) } /// Instantiates Web3Api - pub fn web3_api(&mut self) -> Web3Api { + pub fn web3_api(&self) -> Web3Api { Web3Api::new(self.network.clone()) } @@ -1443,7 +1443,7 @@ where } /// Instantiates RethApi - pub fn reth_api(&mut self) -> RethApi { + pub fn reth_api(&self) -> RethApi { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) } } diff --git a/crates/rpc/rpc/src/eth/api/sign.rs b/crates/rpc/rpc/src/eth/api/sign.rs index 66df0e8de148d..5cbdefa41c9ec 100644 --- a/crates/rpc/rpc/src/eth/api/sign.rs +++ b/crates/rpc/rpc/src/eth/api/sign.rs @@ -42,7 +42,7 @@ impl EthApi RethResult<()> { + pub fn produce_static_files(&self) -> RethResult<()> { let mut static_file_producer = self.static_file_producer.lock(); let provider = self.provider_factory.provider()?; diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 186382e36c234..77fcf2e15ba5b 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -117,7 +117,7 @@ impl MerkleStage { /// Saves the hashing progress pub fn save_execution_checkpoint( - &mut self, + &self, provider: &DatabaseProviderRW, checkpoint: Option, ) -> Result<(), StageError> { diff --git a/crates/storage/libmdbx-rs/src/lib.rs b/crates/storage/libmdbx-rs/src/lib.rs index f8c2512082a69..ba8c6b0624418 100644 --- a/crates/storage/libmdbx-rs/src/lib.rs +++ b/crates/storage/libmdbx-rs/src/lib.rs @@ -5,7 +5,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![allow(missing_docs)] +#![allow(missing_docs, clippy::needless_pass_by_ref_mut)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub use crate::{ diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 1cecdba40b019..1abbfba75cc26 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -366,7 +366,7 @@ impl NippyJar { /// Writes all data and configuration to a file and the offset index to another. pub fn freeze( - mut self, + self, columns: Vec>>>, total_rows: u64, ) -> Result { @@ -392,7 +392,7 @@ impl NippyJar { } /// Freezes [`PerfectHashingFunction`], [`InclusionFilter`] and the offset index to file. - fn freeze_filters(&mut self) -> Result<(), NippyJarError> { + fn freeze_filters(&self) -> Result<(), NippyJarError> { debug!(target: "nippy-jar", path=?self.index_path(), "Writing offsets and offsets index to file."); let mut file = File::create(self.index_path())?; @@ -405,7 +405,7 @@ impl NippyJar { /// Safety checks before creating and returning a [`File`] handle to write data to. fn check_before_freeze( - &mut self, + &self, columns: &[impl IntoIterator>>], ) -> Result<(), NippyJarError> { if columns.len() != self.columns { @@ -427,7 +427,7 @@ impl NippyJar { } /// Writes all necessary configuration to file. - fn freeze_config(&mut self) -> Result<(), NippyJarError> { + fn freeze_config(&self) -> Result<(), NippyJarError> { Ok(bincode::serialize_into(File::create(self.config_path())?, &self)?) } } @@ -1200,7 +1200,7 @@ mod tests { fn append_two_rows(num_columns: usize, file_path: &Path, col1: &[Vec], col2: &[Vec]) { // Create and add 1 row { - let mut nippy = NippyJar::new_without_header(num_columns, file_path); + let nippy = NippyJar::new_without_header(num_columns, file_path); nippy.freeze_config().unwrap(); assert_eq!(nippy.max_row_size, 0); assert_eq!(nippy.rows, 0); diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index e1f4af10b01e9..6417e60076cf1 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -43,7 +43,7 @@ pub struct NippyJarWriter { impl NippyJarWriter { /// Creates a [`NippyJarWriter`] from [`NippyJar`]. - pub fn new(mut jar: NippyJar) -> Result { + pub fn new(jar: NippyJar) -> Result { let (data_file, offsets_file, is_created) = Self::create_or_open_files(jar.data_path(), &jar.offsets_path())?; diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index c61736b5eb668..d1aa8560f24e1 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -225,7 +225,7 @@ impl StaticFileProviderRW { /// Verifies if the incoming block number matches the next expected block number /// for a static file. This ensures data continuity when adding new blocks. fn check_next_block_number( - &mut self, + &self, expected_block_number: u64, segment: StaticFileSegment, ) -> ProviderResult<()> { diff --git a/crates/storage/provider/src/test_utils/events.rs b/crates/storage/provider/src/test_utils/events.rs index 34c4266612e57..baa6bc470bf2c 100644 --- a/crates/storage/provider/src/test_utils/events.rs +++ b/crates/storage/provider/src/test_utils/events.rs @@ -12,14 +12,14 @@ pub struct TestCanonStateSubscriptions { impl TestCanonStateSubscriptions { /// Adds new block commit to the queue that can be consumed with /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_commit(&mut self, new: Arc) { + pub fn add_next_commit(&self, new: Arc) { let event = CanonStateNotification::Commit { new }; self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) } /// Adds reorg to the queue that can be consumed with /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_reorg(&mut self, old: Arc, new: Arc) { + pub fn add_next_reorg(&self, old: Arc, new: Arc) { let event = CanonStateNotification::Reorg { old, new }; self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 44a90f1cf912c..bcad71edbd4a0 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -428,7 +428,7 @@ impl TxPool { } /// Update sub-pools size metrics. - pub(crate) fn update_size_metrics(&mut self) { + pub(crate) fn update_size_metrics(&self) { let stats = self.size(); self.metrics.pending_pool_transactions.set(stats.pending as f64); self.metrics.pending_pool_size_bytes.set(stats.pending_size as f64); @@ -990,7 +990,7 @@ impl AllTransactions { } /// Updates the size metrics - pub(crate) fn update_size_metrics(&mut self) { + pub(crate) fn update_size_metrics(&self) { self.metrics.all_transactions_by_hash.set(self.by_hash.len() as f64); self.metrics.all_transactions_by_id.set(self.txs.len() as f64); } From ec45ae679fec8ca7e21d1c73f38e6c401ae71a7a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 3 May 2024 13:09:55 +0200 Subject: [PATCH 459/700] chore: log only number & hash when skipping payload (#8069) --- crates/node-core/src/engine/skip_new_payload.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/node-core/src/engine/skip_new_payload.rs b/crates/node-core/src/engine/skip_new_payload.rs index ea5cf61e956a9..fdcb4aeecc4be 100644 --- a/crates/node-core/src/engine/skip_new_payload.rs +++ b/crates/node-core/src/engine/skip_new_payload.rs @@ -44,7 +44,14 @@ where Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) => { if this.skipped < this.threshold { *this.skipped += 1; - tracing::warn!(target: "engine::intercept", ?payload, ?cancun_fields, threshold=this.threshold, skipped=this.skipped, "Skipping new payload"); + tracing::warn!( + target: "engine::intercept", + block_number = payload.block_number(), + block_hash = %payload.block_hash(), + ?cancun_fields, + threshold=this.threshold, + skipped=this.skipped, "Skipping new payload" + ); let _ = tx.send(Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing))); continue } else { From 067b0ff420882b31b00629936071ccd11f19e775 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 May 2024 13:39:46 +0200 Subject: [PATCH 460/700] feat: integrate ExecutorProvider (#7798) --- Cargo.lock | 16 +- bin/reth/Cargo.toml | 1 + .../src/commands/debug_cmd/build_block.rs | 41 +- bin/reth/src/commands/debug_cmd/execution.rs | 9 +- .../commands/debug_cmd/in_memory_merkle.rs | 41 +- bin/reth/src/commands/debug_cmd/merkle.rs | 7 +- .../src/commands/debug_cmd/replay_engine.rs | 23 +- bin/reth/src/commands/import.rs | 9 +- bin/reth/src/commands/stage/dump/execution.rs | 19 +- bin/reth/src/commands/stage/dump/merkle.rs | 7 +- bin/reth/src/commands/stage/run.rs | 9 +- bin/reth/src/commands/stage/unwind.rs | 26 +- bin/reth/src/lib.rs | 1 + bin/reth/src/macros.rs | 20 + crates/blockchain-tree/Cargo.toml | 3 + crates/blockchain-tree/src/blockchain_tree.rs | 35 +- crates/blockchain-tree/src/chain.rs | 44 +- crates/blockchain-tree/src/externals.rs | 10 +- crates/blockchain-tree/src/shareable.rs | 27 +- crates/consensus/auto-seal/src/lib.rs | 202 ++-- crates/consensus/auto-seal/src/task.rs | 24 +- crates/consensus/beacon/Cargo.toml | 1 + .../consensus/beacon/src/engine/test_utils.rs | 47 +- crates/ethereum/evm/Cargo.toml | 1 - crates/ethereum/evm/src/execute.rs | 99 +- crates/ethereum/evm/src/lib.rs | 1 + crates/ethereum/evm/src/verify.rs | 53 ++ crates/evm/Cargo.toml | 8 + crates/evm/src/either.rs | 119 +++ crates/evm/src/execute.rs | 142 ++- crates/evm/src/lib.rs | 5 + crates/evm/src/test_utils.rs | 80 ++ crates/exex/src/context.rs | 13 +- crates/node-ethereum/src/evm.rs | 2 + crates/node-ethereum/src/lib.rs | 2 +- crates/node-ethereum/src/node.rs | 14 +- crates/node/api/src/node.rs | 7 + crates/node/builder/Cargo.toml | 1 + crates/node/builder/src/builder/mod.rs | 23 + crates/node/builder/src/builder/states.rs | 5 + crates/node/builder/src/components/builder.rs | 14 +- crates/node/builder/src/components/execute.rs | 19 +- crates/node/builder/src/components/mod.rs | 23 +- crates/node/builder/src/launch/mod.rs | 9 +- crates/node/builder/src/setup.rs | 37 +- crates/optimism/evm/Cargo.toml | 1 + crates/optimism/evm/src/execute.rs | 109 +-- .../mod.rs => optimism/evm/src/l1.rs} | 23 +- crates/optimism/evm/src/lib.rs | 4 + crates/optimism/evm/src/verify.rs | 58 ++ crates/optimism/node/src/node.rs | 15 +- crates/optimism/node/src/txpool.rs | 5 +- crates/payload/optimism/Cargo.toml | 2 + crates/payload/optimism/src/builder.rs | 2 +- crates/revm/Cargo.toml | 5 +- crates/revm/src/factory.rs | 56 -- crates/revm/src/lib.rs | 13 - crates/revm/src/optimism/processor.rs | 401 -------- crates/revm/src/processor.rs | 865 ------------------ crates/rpc/rpc-builder/Cargo.toml | 3 +- crates/rpc/rpc-builder/tests/it/auth.rs | 2 +- crates/rpc/rpc-builder/tests/it/utils.rs | 3 +- crates/rpc/rpc/Cargo.toml | 4 + crates/rpc/rpc/src/eth/api/block.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 26 +- crates/stages/Cargo.toml | 2 + crates/stages/src/lib.rs | 24 +- crates/stages/src/sets.rs | 72 +- crates/stages/src/stages/execution.rs | 60 +- crates/stages/src/stages/mod.rs | 10 +- .../bundle_state_with_receipts.rs | 17 + examples/custom-evm/src/main.rs | 17 +- testing/ef-tests/Cargo.toml | 6 +- testing/ef-tests/src/cases/blockchain_test.rs | 10 +- 74 files changed, 1088 insertions(+), 2028 deletions(-) create mode 100644 bin/reth/src/macros.rs create mode 100644 crates/ethereum/evm/src/verify.rs create mode 100644 crates/evm/src/either.rs create mode 100644 crates/evm/src/test_utils.rs rename crates/{revm/src/optimism/mod.rs => optimism/evm/src/l1.rs} (97%) create mode 100644 crates/optimism/evm/src/verify.rs delete mode 100644 crates/revm/src/factory.rs delete mode 100644 crates/revm/src/optimism/processor.rs delete mode 100644 crates/revm/src/processor.rs diff --git a/Cargo.lock b/Cargo.lock index 24b07b8ab3161..b2e179a955bc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2670,8 +2670,8 @@ dependencies = [ "alloy-rlp", "rayon", "reth-db", + "reth-evm-ethereum", "reth-interfaces", - "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-revm", @@ -6393,6 +6393,7 @@ dependencies = [ "reth-discv4", "reth-downloaders", "reth-ethereum-payload-builder", + "reth-evm", "reth-exex", "reth-interfaces", "reth-network", @@ -6490,6 +6491,7 @@ dependencies = [ "reth-downloaders", "reth-engine-primitives", "reth-ethereum-engine-primitives", + "reth-evm", "reth-evm-ethereum", "reth-interfaces", "reth-metrics", @@ -6537,6 +6539,7 @@ dependencies = [ "parking_lot 0.12.2", "reth-consensus", "reth-db", + "reth-evm", "reth-evm-ethereum", "reth-interfaces", "reth-metrics", @@ -6954,6 +6957,8 @@ dependencies = [ name = "reth-evm" version = "0.2.0-beta.6" dependencies = [ + "futures-util", + "parking_lot 0.12.2", "reth-interfaces", "reth-primitives", "revm", @@ -6967,7 +6972,6 @@ dependencies = [ "reth-evm", "reth-interfaces", "reth-primitives", - "reth-provider", "reth-revm", "revm-primitives", "tracing", @@ -6982,6 +6986,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm", + "revm", "revm-primitives", "tracing", ] @@ -7262,6 +7267,7 @@ dependencies = [ "reth-consensus", "reth-db", "reth-downloaders", + "reth-evm", "reth-exex", "reth-interfaces", "reth-network", @@ -7457,6 +7463,7 @@ dependencies = [ "reth-basic-payload-builder", "reth-engine-primitives", "reth-evm", + "reth-evm-optimism", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7645,6 +7652,7 @@ dependencies = [ "reth-consensus-common", "reth-evm", "reth-evm-ethereum", + "reth-evm-optimism", "reth-interfaces", "reth-metrics", "reth-network-api", @@ -7711,12 +7719,13 @@ dependencies = [ "pin-project", "reth-beacon-consensus", "reth-engine-primitives", + "reth-ethereum-engine-primitives", "reth-evm", + "reth-evm-ethereum", "reth-interfaces", "reth-ipc", "reth-metrics", "reth-network-api", - "reth-node-ethereum", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7826,6 +7835,7 @@ dependencies = [ "reth-db", "reth-downloaders", "reth-etl", + "reth-evm", "reth-evm-ethereum", "reth-exex", "reth-interfaces", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index c323017d0ab33..3f5d788347b8b 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -19,6 +19,7 @@ reth-primitives = { workspace = true, features = ["arbitrary", "clap"] } reth-db = { workspace = true, features = ["mdbx"] } reth-exex.workspace = true reth-provider = { workspace = true } +reth-evm.workspace = true reth-revm.workspace = true reth-stages.workspace = true reth-interfaces = { workspace = true, features = ["clap"] } diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 9d5942ae152de..22361aada5612 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -6,6 +6,7 @@ use crate::{ DatabaseArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, }; use alloy_rlp::Decodable; use clap::Parser; @@ -20,10 +21,9 @@ use reth_blockchain_tree::{ use reth_cli_runner::CliContext; use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_interfaces::RethResult; use reth_node_api::PayloadBuilderAttributes; -#[cfg(not(feature = "optimism"))] -use reth_node_ethereum::EthEvmConfig; use reth_payload_builder::database::CachedReads; use reth_primitives::{ constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP}, @@ -31,13 +31,14 @@ use reth_primitives::{ revm_primitives::KzgSettings, stage::StageId, Address, BlobTransaction, BlobTransactionSidecar, Bytes, ChainSpec, PooledTransactionsElement, - SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, U256, + Receipts, SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, + U256, }; use reth_provider::{ - providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ExecutorFactory, - ProviderFactory, StageCheckpointReader, StateProviderFactory, + providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, + BundleStateWithReceipts, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::EvmProcessorFactory; +use reth_revm::database::StateProviderDatabase; #[cfg(feature = "optimism")] use reth_rpc_types::engine::OptimismPayloadAttributes; use reth_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; @@ -161,18 +162,11 @@ impl Command { let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); - #[cfg(feature = "optimism")] - let evm_config = reth_node_optimism::OptimismEvmConfig::default(); - - #[cfg(not(feature = "optimism"))] - let evm_config = EthEvmConfig::default(); + let executor = block_executor!(self.chain.clone()); // configure blockchain tree - let tree_externals = TreeExternals::new( - provider_factory.clone(), - Arc::clone(&consensus), - EvmProcessorFactory::new(self.chain.clone(), evm_config), - ); + let tree_externals = + TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); @@ -309,11 +303,16 @@ impl Command { let block_with_senders = SealedBlockWithSenders::new(block.clone(), senders).unwrap(); - let executor_factory = EvmProcessorFactory::new(self.chain.clone(), evm_config); - let mut executor = executor_factory.with_state(blockchain_db.latest()?); - executor - .execute_and_verify_receipt(&block_with_senders.clone().unseal(), U256::MAX)?; - let state = executor.take_output_state(); + let db = StateProviderDatabase::new(blockchain_db.latest()?); + let executor = block_executor!(self.chain.clone()).executor(db); + + let BlockExecutionOutput { state, receipts, .. } = + executor.execute((&block_with_senders.clone().unseal(), U256::MAX).into())?; + let state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); debug!(target: "reth::cli", ?state, "Executed block"); let hashed_state = state.hash_state_slow(); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index a83ea19fde267..33b07368a48bf 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -7,6 +7,7 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, utils::get_single_header, }; use clap::Parser; @@ -25,7 +26,6 @@ use reth_interfaces::p2p::{bodies::client::BodiesClient, headers::client::Header use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_core::init::init_genesis; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, }; @@ -111,8 +111,7 @@ impl Command { let stage_conf = &config.stages; let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); + let executor = block_executor!(self.chain.clone()); let header_mode = HeaderSyncMode::Tip(tip_rx); let pipeline = Pipeline::builder() @@ -124,14 +123,14 @@ impl Command { Arc::clone(&consensus), header_downloader, body_downloader, - factory.clone(), + executor.clone(), stage_conf.etl.clone(), ) .set(SenderRecoveryStage { commit_threshold: stage_conf.sender_recovery.commit_threshold, }) .set(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: None, max_changes: None, diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index e68231a768782..008530c53100b 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -7,6 +7,7 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, utils::{get_single_body, get_single_header}, }; use backon::{ConstantBuilder, Retryable}; @@ -14,16 +15,17 @@ use clap::Parser; use reth_cli_runner::CliContext; use reth_config::Config; use reth_db::{init_db, DatabaseEnv}; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_interfaces::executor::BlockValidationError; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_ethereum::EthEvmConfig; -use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec}; +use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec, Receipts}; use reth_provider::{ - AccountExtReader, ExecutorFactory, HashingWriter, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StaticFileProviderFactory, - StorageReader, + AccountExtReader, BundleStateWithReceipts, HashingWriter, HeaderProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, StorageReader, }; +use reth_revm::database::StateProviderDatabase; use reth_tasks::TaskExecutor; use reth_trie::{updates::TrieKey, StateRoot}; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; @@ -162,24 +164,31 @@ impl Command { ) .await?; - let executor_factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); - let mut executor = executor_factory.with_state(LatestStateProviderRef::new( + let db = StateProviderDatabase::new(LatestStateProviderRef::new( provider.tx_ref(), factory.static_file_provider(), )); + let executor = block_executor!(self.chain.clone()).executor(db); + let merkle_block_td = provider.header_td_by_number(merkle_block_number)?.unwrap_or_default(); - executor.execute_and_verify_receipt( - &block - .clone() - .unseal() - .with_recovered_senders() - .ok_or(BlockValidationError::SenderRecoveryError)?, - merkle_block_td + block.difficulty, + let BlockExecutionOutput { state, receipts, .. } = executor.execute( + ( + &block + .clone() + .unseal() + .with_recovered_senders() + .ok_or(BlockValidationError::SenderRecoveryError)?, + merkle_block_td + block.difficulty, + ) + .into(), )?; - let block_state = executor.take_output_state(); + let block_state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); // Unpacked `BundleState::state_root_slow` function let (in_memory_state_root, in_memory_updates) = diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 07075ff26768a..6d895fccf41a1 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -7,6 +7,7 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, utils::get_single_header, }; use backon::{ConstantBuilder, Retryable}; @@ -20,7 +21,6 @@ use reth_exex::ExExManagerHandle; use reth_interfaces::p2p::full_block::FullBlockClient; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ fs, stage::{StageCheckpoint, StageId}, @@ -201,10 +201,9 @@ impl Command { checkpoint.stage_checkpoint.is_some() }); - let factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); + let executor = block_executor!(self.chain.clone()); let mut execution_stage = ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: Some(1), max_changes: None, diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 947c127452ed2..da2e458be2a18 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -5,6 +5,7 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, }; use clap::Parser; use eyre::Context; @@ -20,15 +21,12 @@ use reth_db::{init_db, DatabaseEnv}; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_node_core::engine::engine_store::{EngineMessageStore, StoredEngineApiMessage}; -#[cfg(not(feature = "optimism"))] -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{fs, ChainSpec, PruneModes}; use reth_provider::{ providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory, StaticFileProviderFactory, }; -use reth_revm::EvmProcessorFactory; use reth_stages::Pipeline; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; @@ -126,18 +124,11 @@ impl Command { let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); - #[cfg(not(feature = "optimism"))] - let evm_config = EthEvmConfig::default(); - - #[cfg(feature = "optimism")] - let evm_config = reth_node_optimism::OptimismEvmConfig::default(); + let executor = block_executor!(self.chain.clone()); // Configure blockchain tree - let tree_externals = TreeExternals::new( - provider_factory.clone(), - Arc::clone(&consensus), - EvmProcessorFactory::new(self.chain.clone(), evm_config), - ); + let tree_externals = + TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); @@ -184,8 +175,10 @@ impl Command { ) = PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); #[cfg(not(feature = "optimism"))] - let (payload_service, payload_builder): (_, PayloadBuilderHandle) = - PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); + let (payload_service, payload_builder): ( + _, + PayloadBuilderHandle, + ) = PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); ctx.task_executor.spawn_critical("payload builder service", payload_service); diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 3c191d8bbe79d..0d5b242751558 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -6,6 +6,7 @@ use crate::{ DatabaseArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, version::SHORT_VERSION, }; use clap::Parser; @@ -26,7 +27,6 @@ use reth_interfaces::p2p::{ headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_node_core::init::init_genesis; -use reth_node_ethereum::EthEvmConfig; use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; use reth_provider::{ @@ -269,8 +269,7 @@ where .expect("failed to set download range"); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = - reth_revm::EvmProcessorFactory::new(provider_factory.chain_spec(), EthEvmConfig::default()); + let executor = block_executor!(provider_factory.chain_spec()); let max_block = file_client.max_block().unwrap_or(0); @@ -285,14 +284,14 @@ where consensus.clone(), header_downloader, body_downloader, - factory.clone(), + executor.clone(), config.stages.etl.clone(), ) .set(SenderRecoveryStage { commit_threshold: config.stages.sender_recovery.commit_threshold, }) .set(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: config.stages.execution.max_blocks, max_changes: config.stages.execution.max_changes, diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/bin/reth/src/commands/stage/dump/execution.rs index 571ce486a678c..d8f12b50af7c5 100644 --- a/bin/reth/src/commands/stage/dump/execution.rs +++ b/bin/reth/src/commands/stage/dump/execution.rs @@ -1,15 +1,12 @@ use super::setup; -use crate::utils::DbTool; -use eyre::Result; +use crate::{macros::block_executor, utils::DbTool}; use reth_db::{ cursor::DbCursorRO, database::Database, table::TableImporter, tables, transaction::DbTx, DatabaseEnv, }; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::stage::StageCheckpoint; use reth_provider::{ChainSpecProvider, ProviderFactory}; -use reth_revm::EvmProcessorFactory; use reth_stages::{stages::ExecutionStage, Stage, UnwindInput}; use tracing::info; @@ -19,7 +16,7 @@ pub(crate) async fn dump_execution_stage( to: u64, output_datadir: ChainPath, should_run: bool, -) -> Result<()> { +) -> eyre::Result<()> { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; import_tables_with_range(&output_db, db_tool, from, to)?; @@ -127,10 +124,8 @@ async fn unwind_and_copy( ) -> eyre::Result<()> { let provider = db_tool.provider_factory.provider_rw()?; - let mut exec_stage = ExecutionStage::new_with_factory(EvmProcessorFactory::new( - db_tool.chain.clone(), - EthEvmConfig::default(), - )); + let executor = block_executor!(db_tool.chain.clone()); + let mut exec_stage = ExecutionStage::new_with_executor(executor); exec_stage.unwind( &provider, @@ -159,10 +154,8 @@ async fn dry_run( ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage. [dry-run]"); - let mut exec_stage = ExecutionStage::new_with_factory(EvmProcessorFactory::new( - output_provider_factory.chain_spec(), - EthEvmConfig::default(), - )); + let executor = block_executor!(output_provider_factory.chain_spec()); + let mut exec_stage = ExecutionStage::new_with_executor(executor); let input = reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) }; diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/bin/reth/src/commands/stage/dump/merkle.rs index 55a8ec76d1e4d..9b421be7ca3fe 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/bin/reth/src/commands/stage/dump/merkle.rs @@ -1,11 +1,10 @@ use super::setup; -use crate::utils::DbTool; +use crate::{macros::block_executor, utils::DbTool}; use eyre::Result; use reth_config::config::EtlConfig; use reth_db::{database::Database, table::TableImporter, tables, DatabaseEnv}; use reth_exex::ExExManagerHandle; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{stage::StageCheckpoint, BlockNumber, PruneModes}; use reth_provider::ProviderFactory; use reth_stages::{ @@ -81,9 +80,11 @@ async fn unwind_and_copy( MerkleStage::default_unwind().unwind(&provider, unwind)?; + let executor = block_executor!(db_tool.chain.clone()); + // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - reth_revm::EvmProcessorFactory::new(db_tool.chain.clone(), EthEvmConfig::default()), + executor, ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None, diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index d798c87d1f143..562b7e1b3e60e 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -9,6 +9,7 @@ use crate::{ DatabaseArgs, NetworkArgs, StageEnum, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, prometheus_exporter, version::SHORT_VERSION, }; @@ -19,7 +20,6 @@ use reth_config::{config::EtlConfig, Config}; use reth_db::init_db; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; use reth_exex::ExExManagerHandle; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::ChainSpec; use reth_provider::{ ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, @@ -224,13 +224,10 @@ impl Command { } StageEnum::Senders => (Box::new(SenderRecoveryStage::new(batch_size)), None), StageEnum::Execution => { - let factory = reth_revm::EvmProcessorFactory::new( - self.chain.clone(), - EthEvmConfig::default(), - ); + let executor = block_executor!(self.chain.clone()); ( Box::new(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: Some(batch_size), max_changes: None, diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 0c4260c0c6d3f..c6dea1a0596e2 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -1,12 +1,5 @@ //! Unwinding a certain block range -use crate::{ - args::{ - utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, - }, - dirs::{DataDirPath, MaybePlatformPath}, -}; use clap::{Parser, Subcommand}; use reth_beacon_consensus::BeaconConsensus; use reth_config::{Config, PruneConfig}; @@ -21,7 +14,6 @@ use reth_node_core::{ args::{get_secret_key, NetworkArgs}, dirs::ChainPath, }; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{BlockHashOrNumber, ChainSpec, PruneModes, B256}; use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, @@ -42,6 +34,15 @@ use std::{ops::RangeInclusive, sync::Arc}; use tokio::sync::watch; use tracing::info; +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, +}; + /// `reth stage unwind` command #[derive(Debug, Parser)] pub struct Command { @@ -178,10 +179,7 @@ impl Command { let stage_conf = &config.stages; let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = reth_revm::EvmProcessorFactory::new( - provider_factory.chain_spec(), - EthEvmConfig::default(), - ); + let executor = block_executor!(provider_factory.chain_spec()); let header_mode = HeaderSyncMode::Tip(tip_rx); let pipeline = Pipeline::builder() @@ -193,14 +191,14 @@ impl Command { Arc::clone(&consensus), header_downloader, body_downloader, - factory.clone(), + executor.clone(), stage_conf.etl.clone(), ) .set(SenderRecoveryStage { commit_threshold: stage_conf.sender_recovery.commit_threshold, }) .set(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: None, max_changes: None, diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 42f26115c5d8e..9dd43bcd240ad 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -31,6 +31,7 @@ pub mod cli; pub mod commands; +mod macros; pub mod utils; /// Re-exported payload related types diff --git a/bin/reth/src/macros.rs b/bin/reth/src/macros.rs new file mode 100644 index 0000000000000..7ff81a0f90582 --- /dev/null +++ b/bin/reth/src/macros.rs @@ -0,0 +1,20 @@ +//! Helper macros + +/// Creates the block executor type based on the configured feature. +/// +/// Note(mattsse): This is incredibly horrible and will be replaced +#[cfg(not(feature = "optimism"))] +macro_rules! block_executor { + ($chain_spec:expr) => { + reth_node_ethereum::EthExecutorProvider::ethereum($chain_spec) + }; +} + +#[cfg(feature = "optimism")] +macro_rules! block_executor { + ($chain_spec:expr) => { + reth_node_optimism::OpExecutorProvider::optimism($chain_spec) + }; +} + +pub(crate) use block_executor; diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index ecb2e4ef3631e..912f593dc4c95 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -15,6 +15,8 @@ workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-db.workspace = true +reth-evm.workspace = true +reth-revm.workspace = true reth-provider.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } @@ -40,6 +42,7 @@ reth-db = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true , features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-evm = { workspace = true, features = ["test-utils"] } reth-revm.workspace = true reth-evm-ethereum.workspace = true parking_lot.workspace = true diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index b2b30f132d80a..5346eafbdc38e 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -7,6 +7,7 @@ use crate::{ }; use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; +use reth_evm::execute::BlockExecutorProvider; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, @@ -24,7 +25,7 @@ use reth_provider::{ chain::{ChainSplit, ChainSplitTarget}, BlockExecutionWriter, BlockNumReader, BlockWriter, BundleStateWithReceipts, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, - ChainSpecProvider, DisplayBlocksChain, ExecutorFactory, HeaderProvider, ProviderError, + ChainSpecProvider, DisplayBlocksChain, HeaderProvider, ProviderError, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use std::{ @@ -57,13 +58,13 @@ use tracing::{debug, error, info, instrument, trace, warn}; /// * [BlockchainTree::make_canonical]: Check if we have the hash of a block that is the current /// canonical head and commit it to db. #[derive(Debug)] -pub struct BlockchainTree { +pub struct BlockchainTree { /// The state of the tree /// /// Tracks all the chains, the block indices, and the block buffer. state: TreeState, /// External components (the database, consensus engine etc.) - externals: TreeExternals, + externals: TreeExternals, /// Tree configuration config: BlockchainTreeConfig, /// Broadcast channel for canon state changes notifications. @@ -75,7 +76,7 @@ pub struct BlockchainTree { prune_modes: Option, } -impl BlockchainTree { +impl BlockchainTree { /// Subscribe to new blocks events. /// /// Note: Only canonical blocks are emitted by the tree. @@ -89,10 +90,10 @@ impl BlockchainTree { } } -impl BlockchainTree +impl BlockchainTree where DB: Database + Clone, - EVM: ExecutorFactory, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// @@ -115,7 +116,7 @@ where /// storage space efficiently. It's important to validate this configuration to ensure it does /// not lead to unintended data loss. pub fn new( - externals: TreeExternals, + externals: TreeExternals, config: BlockchainTreeConfig, prune_modes: Option, ) -> RethResult { @@ -1273,7 +1274,8 @@ mod tests { use linked_hash_set::LinkedHashSet; use reth_consensus::test_utils::TestConsensus; use reth_db::{tables, test_utils::TempDatabase, transaction::DbTxMut, DatabaseEnv}; - use reth_evm_ethereum::EthEvmConfig; + use reth_evm::test_utils::MockExecutorProvider; + use reth_evm_ethereum::execute::EthExecutorProvider; #[cfg(not(feature = "optimism"))] use reth_primitives::proofs::calculate_receipt_root; #[cfg(feature = "optimism")] @@ -1289,19 +1291,15 @@ mod tests { MAINNET, }; use reth_provider::{ - test_utils::{ - blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, - TestExecutorFactory, - }, + test_utils::{blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec}, ProviderFactory, }; - use reth_revm::EvmProcessorFactory; use reth_trie::StateRoot; use std::collections::HashMap; fn setup_externals( exec_res: Vec, - ) -> TreeExternals>, TestExecutorFactory> { + ) -> TreeExternals>, MockExecutorProvider> { let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) @@ -1311,7 +1309,7 @@ mod tests { ); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); let consensus = Arc::new(TestConsensus::default()); - let executor_factory = TestExecutorFactory::default(); + let executor_factory = MockExecutorProvider::default(); executor_factory.extend(exec_res); TreeExternals::new(provider_factory, consensus, executor_factory) @@ -1395,7 +1393,7 @@ mod tests { self } - fn assert(self, tree: &BlockchainTree) { + fn assert(self, tree: &BlockchainTree) { if let Some(chain_num) = self.chain_num { assert_eq!(tree.state.chains.len(), chain_num); } @@ -1439,8 +1437,7 @@ mod tests { ); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); let consensus = Arc::new(TestConsensus::default()); - let executor_factory = - EvmProcessorFactory::new(chain_spec.clone(), EthEvmConfig::default()); + let executor_provider = EthExecutorProvider::ethereum(chain_spec.clone()); { let provider_rw = provider_factory.provider_rw().unwrap(); @@ -1548,7 +1545,7 @@ mod tests { mock_block(3, Some(sidechain_block_1.hash()), Vec::from([mock_tx(2)]), 3); let mut tree = BlockchainTree::new( - TreeExternals::new(provider_factory, consensus, executor_factory), + TreeExternals::new(provider_factory, consensus, executor_provider), BlockchainTreeConfig::default(), None, ) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index c091b800ab94a..637ea52e7e890 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -7,6 +7,7 @@ use super::externals::TreeExternals; use crate::BundleStateDataRef; use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, InsertBlockErrorKind}, @@ -15,13 +16,14 @@ use reth_interfaces::{ RethResult, }; use reth_primitives::{ - BlockHash, BlockNumber, ForkBlock, GotExpected, SealedBlockWithSenders, SealedHeader, U256, + BlockHash, BlockNumber, ForkBlock, GotExpected, Receipts, SealedBlockWithSenders, SealedHeader, + U256, }; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView}, - BundleStateDataProvider, BundleStateWithReceipts, Chain, ExecutorFactory, ProviderError, - StateRootProvider, + BundleStateDataProvider, BundleStateWithReceipts, Chain, ProviderError, StateRootProvider, }; +use reth_revm::database::StateProviderDatabase; use reth_trie::updates::TrieUpdates; use reth_trie_parallel::parallel_root::ParallelStateRoot; use std::{ @@ -66,18 +68,18 @@ impl AppendableChain { /// /// if [BlockValidationKind::Exhaustive] is specified, the method will verify the state root of /// the block. - pub fn new_canonical_fork( + pub fn new_canonical_fork( block: SealedBlockWithSenders, parent_header: &SealedHeader, canonical_block_hashes: &BTreeMap, canonical_fork: ForkBlock, - externals: &TreeExternals, + externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, ) -> Result where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { let state = BundleStateWithReceipts::default(); let empty = BTreeMap::new(); @@ -104,18 +106,18 @@ impl AppendableChain { /// Create a new chain that forks off of an existing sidechain. /// /// This differs from [AppendableChain::new_canonical_fork] in that this starts a new fork. - pub(crate) fn new_chain_fork( + pub(crate) fn new_chain_fork( &self, block: SealedBlockWithSenders, side_chain_block_hashes: BTreeMap, canonical_block_hashes: &BTreeMap, canonical_fork: ForkBlock, - externals: &TreeExternals, + externals: &TreeExternals, block_validation_kind: BlockValidationKind, ) -> Result where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { let parent_number = block.number - 1; let parent = self.blocks().get(&parent_number).ok_or( @@ -166,18 +168,18 @@ impl AppendableChain { /// - [BlockAttachment] represents if the block extends the canonical chain, and thus we can /// cache the trie state updates. /// - [BlockValidationKind] determines if the state root __should__ be validated. - fn validate_and_execute( + fn validate_and_execute( block: SealedBlockWithSenders, parent_block: &SealedHeader, bundle_state_data_provider: BSDP, - externals: &TreeExternals, + externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, ) -> RethResult<(BundleStateWithReceipts, Option)> where BSDP: BundleStateDataProvider, DB: Database + Clone, - EVM: ExecutorFactory, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -203,11 +205,17 @@ impl AppendableChain { let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); - let mut executor = externals.executor_factory.with_state(&provider); + let db = StateProviderDatabase::new(&provider); + let executor = externals.executor_factory.executor(db); let block_hash = block.hash(); let block = block.unseal(); - executor.execute_and_verify_receipt(&block, U256::MAX)?; - let bundle_state = executor.take_output_state(); + let state = executor.execute((&block, U256::MAX).into())?; + let BlockExecutionOutput { state, receipts, .. } = state; + let bundle_state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); // check state root if the block extends the canonical chain __and__ if state root // validation was requested. @@ -259,19 +267,19 @@ impl AppendableChain { /// __not__ the canonical head. #[track_caller] #[allow(clippy::too_many_arguments)] - pub(crate) fn append_block( + pub(crate) fn append_block( &mut self, block: SealedBlockWithSenders, side_chain_block_hashes: BTreeMap, canonical_block_hashes: &BTreeMap, - externals: &TreeExternals, + externals: &TreeExternals, canonical_fork: ForkBlock, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, ) -> Result<(), InsertBlockErrorKind> where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 36f3041738592..a311281c94253 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -19,27 +19,27 @@ use std::{collections::BTreeMap, sync::Arc}; /// - The executor factory to execute blocks with /// - The chain spec #[derive(Debug)] -pub struct TreeExternals { +pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. pub(crate) provider_factory: ProviderFactory, /// The consensus engine. pub(crate) consensus: Arc, /// The executor factory to execute blocks with. - pub(crate) executor_factory: EVM, + pub(crate) executor_factory: E, } -impl TreeExternals { +impl TreeExternals { /// Create new tree externals. pub fn new( provider_factory: ProviderFactory, consensus: Arc, - executor_factory: EVM, + executor_factory: E, ) -> Self { Self { provider_factory, consensus, executor_factory } } } -impl TreeExternals { +impl TreeExternals { /// Fetches the latest canonical block hashes by walking backwards from the head. /// /// Returns the hashes sorted by increasing block numbers diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 7a0eb36fa49bd..061b49f4c450c 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -3,6 +3,7 @@ use super::BlockchainTree; use parking_lot::RwLock; use reth_db::database::Database; +use reth_evm::execute::BlockExecutorProvider; use reth_interfaces::{ blockchain_tree::{ error::{CanonicalError, InsertBlockError}, @@ -17,7 +18,7 @@ use reth_primitives::{ }; use reth_provider::{ BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonStateSubscriptions, - ExecutorFactory, ProviderError, + ProviderError, }; use std::{ collections::{BTreeMap, HashSet}, @@ -27,22 +28,22 @@ use tracing::trace; /// Shareable blockchain tree that is behind a RwLock #[derive(Clone, Debug)] -pub struct ShareableBlockchainTree { +pub struct ShareableBlockchainTree { /// BlockchainTree - pub tree: Arc>>, + pub tree: Arc>>, } -impl ShareableBlockchainTree { +impl ShareableBlockchainTree { /// Create a new shareable database. - pub fn new(tree: BlockchainTree) -> Self { + pub fn new(tree: BlockchainTree) -> Self { Self { tree: Arc::new(RwLock::new(tree)) } } } -impl BlockchainTreeEngine for ShareableBlockchainTree +impl BlockchainTreeEngine for ShareableBlockchainTree where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -99,10 +100,10 @@ where } } -impl BlockchainTreeViewer for ShareableBlockchainTree +impl BlockchainTreeViewer for ShareableBlockchainTree where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { fn blocks(&self) -> BTreeMap> { trace!(target: "blockchain_tree", "Returning all blocks in blockchain tree"); @@ -181,10 +182,10 @@ where } } -impl BlockchainTreePendingStateProvider for ShareableBlockchainTree +impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, @@ -196,10 +197,10 @@ where } } -impl CanonStateSubscriptions for ShareableBlockchainTree +impl CanonStateSubscriptions for ShareableBlockchainTree where DB: Send + Sync, - EF: Send + Sync, + E: Send + Sync, { fn subscribe_to_canonical_state(&self) -> reth_provider::CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 9f2f2c40298ae..e954108c8c408 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -18,23 +18,18 @@ use reth_beacon_consensus::BeaconEngineMessage; use reth_consensus::{Consensus, ConsensusError}; use reth_engine_primitives::EngineTypes; -use reth_evm::ConfigureEvm; use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ - constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, + constants::{EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, - proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, Bloom, - ChainSpec, Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, - B256, U256, + proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Header, + Receipts, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, B256, U256, }; use reth_provider::{ - BlockExecutor, BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, - StateProviderFactory, -}; -use reth_revm::{ - database::StateProviderDatabase, db::states::bundle_state::BundleRetention, - processor::EVMProcessor, State, + BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, StateProviderFactory, + StateRootProvider, }; +use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::TransactionPool; use std::{ collections::HashMap, @@ -50,6 +45,7 @@ mod task; pub use crate::client::AutoSealClient; pub use mode::{FixedBlockTimeMiner, MiningMode, ReadyTransactionMiner}; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; pub use task::MiningTask; /// A consensus implementation intended for local development and testing purposes. @@ -281,6 +277,18 @@ impl StorageInner { parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) }); + let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(timestamp) { + let mut sum_blob_gas_used = 0; + for tx in transactions { + if let Some(blob_tx) = tx.transaction.as_eip4844() { + sum_blob_gas_used += blob_tx.blob_gas(); + } + } + Some(sum_blob_gas_used) + } else { + None + }; + let mut header = Header { parent_hash: self.best_hash, ommers_hash: proofs::calculate_ommers_root(ommers), @@ -298,7 +306,7 @@ impl StorageInner { mix_hash: Default::default(), nonce: 0, base_fee_per_gas, - blob_gas_used: None, + blob_gas_used, excess_blob_gas: None, extra_data: Default::default(), parent_beacon_block_root: None, @@ -334,111 +342,26 @@ impl StorageInner { header } - /// Executes the block with the given block and senders, on the provided [EVMProcessor]. - /// - /// This returns the poststate from execution and post-block changes, as well as the gas used. - pub(crate) fn execute( - &self, - block: &BlockWithSenders, - executor: &mut EVMProcessor<'_, EvmConfig>, - ) -> Result<(BundleStateWithReceipts, u64), BlockExecutionError> - where - EvmConfig: ConfigureEvm, - { - trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - // TODO: there isn't really a parent beacon block root here, so not sure whether or not to - // call the 4788 beacon contract - - // set the first block to find the correct index in bundle state - executor.set_first_block(block.number); - - let (receipts, gas_used) = executor.execute_transactions(block, U256::ZERO)?; - - // Save receipts. - executor.save_receipts(receipts)?; - - // add post execution state change - // Withdrawals, rewards etc. - executor.apply_post_execution_state_change(block, U256::ZERO)?; - - // merge transitions - executor.db_mut().merge_transitions(BundleRetention::Reverts); - - // apply post block changes - Ok((executor.take_output_state(), gas_used)) - } - - /// Fills in the post-execution header fields based on the given BundleState and gas used. - /// In doing this, the state root is calculated and the final header is returned. - pub(crate) fn complete_header( - &self, - mut header: Header, - bundle_state: &BundleStateWithReceipts, - client: &S, - gas_used: u64, - blob_gas_used: Option, - #[cfg(feature = "optimism")] chain_spec: &ChainSpec, - ) -> Result { - let receipts = bundle_state.receipts_by_block(header.number); - header.receipts_root = if receipts.is_empty() { - EMPTY_RECEIPTS - } else { - let receipts_with_bloom = receipts - .iter() - .map(|r| (*r).clone().expect("receipts have not been pruned").into()) - .collect::>(); - header.logs_bloom = - receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - #[cfg(feature = "optimism")] - { - proofs::calculate_receipt_root_optimism( - &receipts_with_bloom, - chain_spec, - header.timestamp, - ) - } - #[cfg(not(feature = "optimism"))] - { - proofs::calculate_receipt_root(&receipts_with_bloom) - } - }; - - header.gas_used = gas_used; - header.blob_gas_used = blob_gas_used; - - // calculate the state root - let state_root = client - .latest() - .map_err(BlockExecutionError::LatestBlock)? - .state_root(bundle_state.state()) - .unwrap(); - header.state_root = state_root; - Ok(header) - } - - /// Builds and executes a new block with the given transactions, on the provided [EVMProcessor]. + /// Builds and executes a new block with the given transactions, on the provided executor. /// /// This returns the header of the executed block, as well as the poststate from execution. - pub(crate) fn build_and_execute( + pub(crate) fn build_and_execute( &mut self, transactions: Vec, ommers: Vec
, withdrawals: Option, - client: &impl StateProviderFactory, + provider: &Provider, chain_spec: Arc, - evm_config: &EvmConfig, + executor: &Executor, ) -> Result<(SealedHeader, BundleStateWithReceipts), BlockExecutionError> where - EvmConfig: ConfigureEvm, + Executor: BlockExecutorProvider, + Provider: StateProviderFactory, { - let header = self.build_header_template( - &transactions, - &ommers, - withdrawals.as_ref(), - chain_spec.clone(), - ); + let header = + self.build_header_template(&transactions, &ommers, withdrawals.as_ref(), chain_spec); - let block = Block { + let mut block = Block { header, body: transactions, ommers: ommers.clone(), @@ -449,45 +372,46 @@ impl StorageInner { trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - // now execute the block - let db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new( - client.latest().map_err(BlockExecutionError::LatestBlock)?, - ))) - .with_bundle_update() - .build(); - let mut executor = EVMProcessor::new_with_state(chain_spec.clone(), db, evm_config); + let mut db = StateProviderDatabase::new( + provider.latest().map_err(BlockExecutionError::LatestBlock)?, + ); - let (bundle_state, gas_used) = self.execute(&block, &mut executor)?; + // TODO(mattsse): At this point we don't know certain fields of the header, so we first + // execute it and then update the header this can be improved by changing the executor + // input, for now we intercept the errors and retry + loop { + match executor.executor(&mut db).execute((&block, U256::ZERO).into()) { + Err(BlockExecutionError::Validation(BlockValidationError::BlockGasUsed { + gas, + .. + })) => { + block.block.header.gas_used = gas.got; + } + Err(BlockExecutionError::Validation(BlockValidationError::ReceiptRootDiff( + err, + ))) => { + block.block.header.receipts_root = err.got; + } + _ => break, + }; + } - let Block { header, body, .. } = block.block; - let body = BlockBody { transactions: body, ommers, withdrawals }; + // now execute the block + let BlockExecutionOutput { state, receipts, .. } = + executor.executor(&mut db).execute((&block, U256::ZERO).into())?; + let bundle_state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); - let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - let mut sum_blob_gas_used = 0; - for tx in &body.transactions { - if let Some(blob_tx) = tx.transaction.as_eip4844() { - sum_blob_gas_used += blob_tx.blob_gas(); - } - } - Some(sum_blob_gas_used) - } else { - None - }; + let Block { mut header, body, .. } = block.block; + let body = BlockBody { transactions: body, ommers, withdrawals }; trace!(target: "consensus::auto", ?bundle_state, ?header, ?body, "executed block, calculating state root and completing header"); - // fill in the rest of the fields - let header = self.complete_header( - header, - &bundle_state, - client, - gas_used, - blob_gas_used, - #[cfg(feature = "optimism")] - chain_spec.as_ref(), - )?; - + // calculate the state root + header.state_root = db.state_root(bundle_state.state())?; trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); // finally insert into storage diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 7e2a700ef4b44..42f1268f33125 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -2,7 +2,7 @@ use crate::{mode::MiningMode, Storage}; use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_engine_primitives::EngineTypes; -use reth_evm::ConfigureEvm; +use reth_evm::execute::BlockExecutorProvider; use reth_primitives::{ Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders, Withdrawals, }; @@ -22,7 +22,7 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, warn}; /// A Future that listens for new ready transactions and puts new blocks into storage -pub struct MiningTask { +pub struct MiningTask { /// The configured chain spec chain_spec: Arc, /// The client used to interact with the state @@ -43,14 +43,14 @@ pub struct MiningTask>, - /// The type that defines how to configure the EVM. - evm_config: EvmConfig, + /// The type used for block execution + block_executor: Executor, } // === impl MiningTask === -impl - MiningTask +impl + MiningTask { /// Creates a new instance of the task #[allow(clippy::too_many_arguments)] @@ -62,7 +62,7 @@ impl storage: Storage, client: Client, pool: Pool, - evm_config: EvmConfig, + block_executor: Executor, ) -> Self { Self { chain_spec, @@ -75,7 +75,7 @@ impl canon_state_notification, queued: Default::default(), pipe_line_events: None, - evm_config, + block_executor, } } @@ -85,13 +85,13 @@ impl } } -impl Future for MiningTask +impl Future for MiningTask where Client: StateProviderFactory + CanonChainTracker + Clone + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, ::Transaction: IntoRecoveredTransaction, Engine: EngineTypes + 'static, - EvmConfig: ConfigureEvm + Clone + Unpin + Send + Sync + 'static, + Executor: BlockExecutorProvider, { type Output = (); @@ -121,7 +121,7 @@ where let pool = this.pool.clone(); let events = this.pipe_line_events.take(); let canon_state_notification = this.canon_state_notification.clone(); - let evm_config = this.evm_config.clone(); + let executor = this.block_executor.clone(); // Create the mining future that creates a block, notifies the engine that drives // the pipeline @@ -145,7 +145,7 @@ where withdrawals.clone(), &client, chain_spec, - &evm_config, + &executor, ) { Ok((new_header, bundle_state)) => { // clear all transactions from pool diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 4e35d06f0f49a..8fb9d3ec3b4e6 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -51,6 +51,7 @@ reth-stages = { workspace = true, features = ["test-utils"] } reth-blockchain-tree = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-evm = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true reth-rpc.workspace = true reth-tracing.workspace = true diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 513987e7581f9..27fc6b44cfa51 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -14,9 +14,9 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_evm_ethereum::EthEvmConfig; +use reth_evm::{either::Either, test_utils::MockExecutorProvider}; +use reth_evm_ethereum::execute::EthExecutorProvider; use reth_interfaces::{ - executor::BlockExecutionError, p2p::{bodies::client::BodiesClient, either::EitherDownloader, headers::client::HeadersClient}, sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, @@ -24,13 +24,10 @@ use reth_interfaces::{ use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{BlockNumber, ChainSpec, FinishedExExHeight, PruneModes, B256}; use reth_provider::{ - providers::BlockchainProvider, - test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, - BundleStateWithReceipts, ExecutorFactory, HeaderSyncMode, PrunableBlockExecutor, - StaticFileProviderFactory, + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, + BundleStateWithReceipts, HeaderSyncMode, StaticFileProviderFactory, }; use reth_prune::Pruner; -use reth_revm::EvmProcessorFactory; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; @@ -155,31 +152,6 @@ impl Default for TestExecutorConfig { } } -/// A type that represents one of two possible executor factories. -#[derive(Debug, Clone)] -pub enum EitherExecutorFactory { - /// The first factory variant - Left(A), - /// The second factory variant - Right(B), -} - -impl ExecutorFactory for EitherExecutorFactory -where - A: ExecutorFactory, - B: ExecutorFactory, -{ - fn with_state<'a, SP: reth_provider::StateProvider + 'a>( - &'a self, - sp: SP, - ) -> Box + 'a> { - match self { - EitherExecutorFactory::Left(a) => a.with_state::<'a, SP>(sp), - EitherExecutorFactory::Right(b) => b.with_state::<'a, SP>(sp), - } - } -} - /// The basic configuration for a `TestConsensusEngine`, without generics for the client or /// consensus engine. #[derive(Debug)] @@ -366,14 +338,13 @@ where // use either test executor or real executor let executor_factory = match self.base_config.executor_config { TestExecutorConfig::Test(results) => { - let executor_factory = TestExecutorFactory::default(); + let executor_factory = MockExecutorProvider::default(); executor_factory.extend(results); - EitherExecutorFactory::Left(executor_factory) + Either::Left(executor_factory) + } + TestExecutorConfig::Real => { + Either::Right(EthExecutorProvider::ethereum(self.base_config.chain_spec.clone())) } - TestExecutorConfig::Real => EitherExecutorFactory::Right(EvmProcessorFactory::new( - self.base_config.chain_spec.clone(), - EthEvmConfig::default(), - )), }; let static_file_producer = StaticFileProducer::new( diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index ea7cfab8c2368..6fa61e34ff23a 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -16,7 +16,6 @@ reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true reth-interfaces.workspace = true -reth-provider.workspace = true # Ethereum revm-primitives.workspace = true diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 4239fe44924a2..c3dd315f74381 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -1,10 +1,10 @@ //! Ethereum block executor. -use crate::EthEvmConfig; +use crate::{verify::verify_receipts, EthEvmConfig}; use reth_evm::{ execute::{ - BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, - ExecutorProvider, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, }, ConfigureEvm, ConfigureEvmEnv, }; @@ -13,15 +13,13 @@ use reth_interfaces::{ provider::ProviderError, }; use reth_primitives::{ - BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, Receipts, - Withdrawals, U256, + BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, + Receipts, Withdrawals, MAINNET, U256, }; -use reth_provider::BundleStateWithReceipts; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - processor::verify_receipt, stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, @@ -35,35 +33,33 @@ use tracing::debug; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] -pub struct EthExecutorProvider { +pub struct EthExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, inspector: Option, - prune_modes: PruneModes, } -impl EthExecutorProvider { +impl EthExecutorProvider { /// Creates a new default ethereum executor provider. pub fn ethereum(chain_spec: Arc) -> Self { Self::new(chain_spec, Default::default()) } + + /// Returns a new provider for the mainnet. + pub fn mainnet() -> Self { + Self::ethereum(MAINNET.clone()) + } } impl EthExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } + Self { chain_spec, evm_config, inspector: None } } /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: InspectorStack) -> Self { - self.inspector = Some(inspector); - self - } - - /// Configures the prune modes for the executor. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; + pub fn with_inspector(mut self, inspector: Option) -> Self { + self.inspector = inspector; self } } @@ -86,7 +82,7 @@ where } } -impl ExecutorProvider for EthExecutorProvider +impl BlockExecutorProvider for EthExecutorProvider where EvmConfig: ConfigureEvm, EvmConfig: ConfigureEvmEnv, @@ -102,14 +98,14 @@ where self.eth_executor(db) } - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database, { let executor = self.eth_executor(db); EthBatchExecutor { executor, - batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + batch_record: BlockBatchRecord::new(prune_modes), stats: BlockExecutorStats::default(), } } @@ -318,9 +314,11 @@ where // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = - verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts.iter()) - { + if let Err(error) = verify_receipts( + block.header.receipts_root, + block.header.logs_bloom, + receipts.iter(), + ) { debug!(target: "evm", %error, ?receipts, "receipts verification failed"); return Err(error) }; @@ -382,8 +380,8 @@ where EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = EthBlockOutput; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; type Error = BlockExecutionError; /// Executes the block and commits the state changes. @@ -394,13 +392,13 @@ where /// /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; - // prepare the state for extraction - self.state.merge_transitions(BundleRetention::PlainState); + // NOTE: we need to merge keep the reverts for the bundle retention + self.state.merge_transitions(BundleRetention::Reverts); - Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, gas_used }) } } @@ -433,12 +431,12 @@ where EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = BundleStateWithReceipts; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; // prepare the state according to the prune mode @@ -448,18 +446,30 @@ where // store receipts in the set self.batch_record.save_receipts(receipts)?; - Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + if self.batch_record.first_block().is_none() { + self.batch_record.set_first_block(block.number); + } + + Ok(()) } fn finalize(mut self) -> Self::Output { self.stats.log_debug(); - BundleStateWithReceipts::new( + BatchBlockExecutionOutput::new( self.executor.state.take_bundle(), self.batch_record.take_receipts(), self.batch_record.first_block().unwrap_or_default(), ) } + + fn set_tip(&mut self, tip: BlockNumber) { + self.batch_record.set_tip(tip); + } + + fn size_hint(&self) -> Option { + Some(self.executor.state.bundle_state.size_hint()) + } } #[cfg(test)] @@ -468,7 +478,7 @@ mod tests { use reth_primitives::{ bytes, constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, - keccak256, Account, Block, Bytes, ChainSpecBuilder, ForkCondition, B256, MAINNET, + keccak256, Account, Block, Bytes, ChainSpecBuilder, ForkCondition, B256, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, @@ -497,12 +507,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { - EthExecutorProvider { - chain_spec, - evm_config: Default::default(), - inspector: None, - prune_modes: Default::default(), - } + EthExecutorProvider { chain_spec, evm_config: Default::default(), inspector: None } } #[test] @@ -697,7 +702,8 @@ mod tests { let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); @@ -777,7 +783,8 @@ mod tests { let provider = executor_provider(chain_spec); // execute header - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); // Now execute a block with the fixed header, ensure that it does not fail executor diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index adcfd700db0d7..88621a66aa9d8 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -16,6 +16,7 @@ use reth_primitives::{ }; use reth_revm::{Database, EvmBuilder}; pub mod execute; +pub mod verify; /// Ethereum-related EVM configuration. #[derive(Debug, Clone, Copy, Default)] diff --git a/crates/ethereum/evm/src/verify.rs b/crates/ethereum/evm/src/verify.rs new file mode 100644 index 0000000000000..6f552fe424224 --- /dev/null +++ b/crates/ethereum/evm/src/verify.rs @@ -0,0 +1,53 @@ +//! Helpers for verifying the receipts. + +use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; +use reth_primitives::{Bloom, GotExpected, Receipt, ReceiptWithBloom, B256}; + +/// Calculate the receipts root, and compare it against against the expected receipts root and logs +/// bloom. +pub fn verify_receipts<'a>( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, +) -> Result<(), BlockExecutionError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +pub fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), BlockExecutionError> { + if calculated_receipts_root != expected_receipts_root { + return Err(BlockValidationError::ReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + ) + .into()) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(BlockValidationError::BloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + ) + .into()) + } + + Ok(()) +} diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index f13c471a7a4bd..854dcd95a20f6 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -17,3 +17,11 @@ revm-primitives.workspace = true revm.workspace = true reth-interfaces.workspace = true +futures-util.workspace = true +parking_lot = { workspace = true, optional = true } + +[dev-dependencies] +parking_lot.workspace = true + +[features] +test-utils = ["dep:parking_lot"] \ No newline at end of file diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs new file mode 100644 index 0000000000000..d1ae4ed78ff47 --- /dev/null +++ b/crates/evm/src/either.rs @@ -0,0 +1,119 @@ +//! Helper type that represents one of two possible executor types + +use crate::execute::{ + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, +}; +use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use revm_primitives::db::Database; + +// re-export Either +pub use futures_util::future::Either; + +impl BlockExecutorProvider for Either +where + A: BlockExecutorProvider, + B: BlockExecutorProvider, +{ + type Executor> = Either, B::Executor>; + type BatchExecutor> = + Either, B::BatchExecutor>; + + fn executor(&self, db: DB) -> Self::Executor + where + DB: Database, + { + match self { + Either::Left(a) => Either::Left(a.executor(db)), + Either::Right(b) => Either::Right(b.executor(db)), + } + } + + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + where + DB: Database, + { + match self { + Either::Left(a) => Either::Left(a.batch_executor(db, prune_modes)), + Either::Right(b) => Either::Right(b.batch_executor(db, prune_modes)), + } + } +} + +impl Executor for Either +where + A: for<'a> Executor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BlockExecutionOutput, + Error = BlockExecutionError, + >, + B: for<'a> Executor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BlockExecutionOutput, + Error = BlockExecutionError, + >, + DB: Database, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute(self, input: Self::Input<'_>) -> Result { + match self { + Either::Left(a) => a.execute(input), + Either::Right(b) => b.execute(input), + } + } +} + +impl BatchExecutor for Either +where + A: for<'a> BatchExecutor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BatchBlockExecutionOutput, + Error = BlockExecutionError, + >, + B: for<'a> BatchExecutor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BatchBlockExecutionOutput, + Error = BlockExecutionError, + >, + DB: Database, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + match self { + Either::Left(a) => a.execute_one(input), + Either::Right(b) => b.execute_one(input), + } + } + + fn finalize(self) -> Self::Output { + match self { + Either::Left(a) => a.finalize(), + Either::Right(b) => b.finalize(), + } + } + + fn set_tip(&mut self, tip: BlockNumber) { + match self { + Either::Left(a) => a.set_tip(tip), + Either::Right(b) => b.set_tip(tip), + } + } + + fn size_hint(&self) -> Option { + match self { + Either::Left(a) => a.size_hint(), + Either::Right(b) => b.size_hint(), + } + } +} diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index b8c1536029a9d..7b3e586467252 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,7 +1,7 @@ //! Traits for execution. -use reth_interfaces::provider::ProviderError; -use reth_primitives::U256; +use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, Receipts, U256}; use revm::db::BundleState; use revm_primitives::db::Database; @@ -21,8 +21,8 @@ pub trait Executor { fn execute(self, input: Self::Input<'_>) -> Result; } -/// An executor that can execute multiple blocks in a row and keep track of the state over the -/// entire batch. +/// A general purpose executor that can execute multiple inputs in sequence and keep track of the +/// state over the entire batch. pub trait BatchExecutor { /// The input type for the executor. type Input<'a>; @@ -32,17 +32,20 @@ pub trait BatchExecutor { type Error; /// Executes the next block in the batch and update the state internally. - fn execute_one(&mut self, input: Self::Input<'_>) -> Result; + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; /// Finishes the batch and return the final state. fn finalize(self) -> Self::Output; -} -/// The output of an executed block in a batch. -#[derive(Debug, Clone, Copy)] -pub struct BatchBlockOutput { - /// The size hint of the batch's tracked state. - pub size_hint: Option, + /// Set the expected tip of the batch. + /// + /// This can be used to optimize state pruning during execution. + fn set_tip(&mut self, tip: BlockNumber); + + /// The size hint of the batch's tracked state size. + /// + /// This is used to optimize DB commits depending on the size of the state. + fn size_hint(&self) -> Option; } /// The output of an ethereum block. @@ -51,7 +54,7 @@ pub struct BatchBlockOutput { /// /// TODO(mattsse): combine with BundleStateWithReceipts #[derive(Debug)] -pub struct EthBlockOutput { +pub struct BlockExecutionOutput { /// The changed state of the block after execution. pub state: BundleState, /// All the receipts of the transactions in the block. @@ -60,42 +63,94 @@ pub struct EthBlockOutput { pub gas_used: u64, } +/// The output of a batch of ethereum blocks. +#[derive(Debug)] +pub struct BatchBlockExecutionOutput { + /// Bundle state with reverts. + pub bundle: BundleState, + /// The collection of receipts. + /// Outer vector stores receipts for each block sequentially. + /// The inner vector stores receipts ordered by transaction number. + /// + /// If receipt is None it means it is pruned. + pub receipts: Receipts, + /// First block of bundle state. + pub first_block: BlockNumber, +} + +impl BatchBlockExecutionOutput { + /// Create Bundle State. + pub fn new(bundle: BundleState, receipts: Receipts, first_block: BlockNumber) -> Self { + Self { bundle, receipts, first_block } + } +} + /// A helper type for ethereum block inputs that consists of a block and the total difficulty. #[derive(Debug)] -pub struct EthBlockExecutionInput<'a, Block> { +pub struct BlockExecutionInput<'a, Block> { /// The block to execute. pub block: &'a Block, /// The total difficulty of the block. pub total_difficulty: U256, } -impl<'a, Block> EthBlockExecutionInput<'a, Block> { +impl<'a, Block> BlockExecutionInput<'a, Block> { /// Creates a new input. pub fn new(block: &'a Block, total_difficulty: U256) -> Self { Self { block, total_difficulty } } } -impl<'a, Block> From<(&'a Block, U256)> for EthBlockExecutionInput<'a, Block> { +impl<'a, Block> From<(&'a Block, U256)> for BlockExecutionInput<'a, Block> { fn from((block, total_difficulty): (&'a Block, U256)) -> Self { Self::new(block, total_difficulty) } } -/// A type that can create a new executor. -pub trait ExecutorProvider: Send + Sync + Clone { +/// A type that can create a new executor for block execution. +pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// An executor that can execute a single block given a database. - type Executor>: Executor; + /// + /// # Verification + /// + /// The on [Executor::execute] the executor is expected to validate the execution output of the + /// input, this includes: + /// - Cumulative gas used must match the input's gas used. + /// - Receipts must match the input's receipts root. + /// + /// It is not expected to validate the state trie root, this must be done by the caller using + /// the returned state. + type Executor>: for<'a> Executor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BlockExecutionOutput, + Error = BlockExecutionError, + >; + /// An executor that can execute a batch of blocks given a database. + type BatchExecutor>: for<'a> BatchExecutor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + // TODO: change to bundle state with receipts + Output = BatchBlockExecutionOutput, + Error = BlockExecutionError, + >; - type BatchExecutor>: BatchExecutor; /// Creates a new executor for single block execution. + /// + /// This is used to execute a single block and get the changed state. fn executor(&self, db: DB) -> Self::Executor where DB: Database; - /// Creates a new batch executor - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + /// Creates a new batch executor with the given database and pruning modes. + /// + /// Batch executor is used to execute multiple blocks in sequence and keep track of the state + /// during historical sync which involves executing multiple blocks in sequence. + /// + /// The pruning modes are used to determine which parts of the state should be kept during + /// execution. + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database; } @@ -103,13 +158,14 @@ pub trait ExecutorProvider: Send + Sync + Clone { #[cfg(test)] mod tests { use super::*; + use reth_primitives::Block; use revm::db::{CacheDB, EmptyDBTyped}; use std::marker::PhantomData; #[derive(Clone, Default)] struct TestExecutorProvider; - impl ExecutorProvider for TestExecutorProvider { + impl BlockExecutorProvider for TestExecutorProvider { type Executor> = TestExecutor; type BatchExecutor> = TestExecutor; @@ -120,7 +176,7 @@ mod tests { TestExecutor(PhantomData) } - fn batch_executor(&self, _db: DB) -> Self::BatchExecutor + fn batch_executor(&self, _db: DB, _prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database, { @@ -131,28 +187,35 @@ mod tests { struct TestExecutor(PhantomData); impl Executor for TestExecutor { - type Input<'a> = &'static str; - type Output = (); - type Error = String; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; fn execute(self, _input: Self::Input<'_>) -> Result { - Ok(()) + Err(BlockExecutionError::UnavailableForTest) } } impl BatchExecutor for TestExecutor { - type Input<'a> = &'static str; - type Output = (); - type Error = String; - - fn execute_one( - &mut self, - _input: Self::Input<'_>, - ) -> Result { - Ok(BatchBlockOutput { size_hint: None }) + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> { + Ok(()) + } + + fn finalize(self) -> Self::Output { + todo!() } - fn finalize(self) -> Self::Output {} + fn set_tip(&mut self, _tip: BlockNumber) { + todo!() + } + + fn size_hint(&self) -> Option { + None + } } #[test] @@ -160,6 +223,9 @@ mod tests { let provider = TestExecutorProvider; let db = CacheDB::>::default(); let executor = provider.executor(db); - executor.execute("test").unwrap(); + let block = + Block { header: Default::default(), body: vec![], ommers: vec![], withdrawals: None }; + let block = BlockWithSenders::new(block, Default::default()).unwrap(); + let _ = executor.execute(BlockExecutionInput::new(&block, U256::ZERO)); } } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index d8e50b759ed2a..c69e33d652a67 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -12,8 +12,13 @@ use reth_primitives::{revm::env::fill_block_env, Address, ChainSpec, Header, Tra use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv}; +pub mod either; pub mod execute; +#[cfg(any(test, feature = "test-utils"))] +/// test helpers for mocking executor +pub mod test_utils; + /// Trait for configuring the EVM for executing full blocks. pub trait ConfigureEvm: ConfigureEvmEnv { /// Associated type for the default external context that should be configured for the EVM. diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs new file mode 100644 index 0000000000000..e0ee4691704b2 --- /dev/null +++ b/crates/evm/src/test_utils.rs @@ -0,0 +1,80 @@ +//! Helpers for testing. + +use crate::execute::{ + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, +}; +use parking_lot::Mutex; +use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use revm_primitives::db::Database; +use std::sync::Arc; + +/// A [BlockExecutorProvider] that returns mocked execution results. +#[derive(Clone, Debug, Default)] +pub struct MockExecutorProvider { + exec_results: Arc>>, +} + +impl MockExecutorProvider { + /// Extend the mocked execution results + pub fn extend(&self, results: impl IntoIterator>) { + self.exec_results.lock().extend(results.into_iter().map(Into::into)); + } +} + +impl BlockExecutorProvider for MockExecutorProvider { + type Executor> = Self; + + type BatchExecutor> = Self; + + fn executor(&self, _: DB) -> Self::Executor + where + DB: Database, + { + self.clone() + } + + fn batch_executor(&self, _: DB, _: PruneModes) -> Self::BatchExecutor + where + DB: Database, + { + self.clone() + } +} + +impl Executor for MockExecutorProvider { + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute(self, _: Self::Input<'_>) -> Result { + let BatchBlockExecutionOutput { bundle, receipts, .. } = + self.exec_results.lock().pop().unwrap(); + Ok(BlockExecutionOutput { + state: bundle, + receipts: receipts.into_iter().flatten().flatten().collect(), + gas_used: 0, + }) + } +} + +impl BatchExecutor for MockExecutorProvider { + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { + Ok(()) + } + + fn finalize(self) -> Self::Output { + self.exec_results.lock().pop().unwrap() + } + + fn set_tip(&mut self, _: BlockNumber) {} + + fn size_hint(&self) -> Option { + None + } +} diff --git a/crates/exex/src/context.rs b/crates/exex/src/context.rs index 733047400ac15..7cedb4977868b 100644 --- a/crates/exex/src/context.rs +++ b/crates/exex/src/context.rs @@ -53,11 +53,20 @@ impl FullNodeTypes for ExExContext { impl FullNodeComponents for ExExContext { type Pool = Node::Pool; type Evm = Node::Evm; + type Executor = Node::Executor; fn pool(&self) -> &Self::Pool { self.components.pool() } + fn evm_config(&self) -> &Self::Evm { + self.components.evm_config() + } + + fn block_executor(&self) -> &Self::Executor { + self.components.block_executor() + } + fn provider(&self) -> &Self::Provider { self.components.provider() } @@ -73,8 +82,4 @@ impl FullNodeComponents for ExExContext { fn task_executor(&self) -> &TaskExecutor { self.components.task_executor() } - - fn evm_config(&self) -> &Self::Evm { - self.components.evm_config() - } } diff --git a/crates/node-ethereum/src/evm.rs b/crates/node-ethereum/src/evm.rs index a5528d74a4b64..d710d8d8d4528 100644 --- a/crates/node-ethereum/src/evm.rs +++ b/crates/node-ethereum/src/evm.rs @@ -1,4 +1,6 @@ //! Ethereum EVM support +#[doc(inline)] +pub use reth_evm_ethereum::execute::EthExecutorProvider; #[doc(inline)] pub use reth_evm_ethereum::EthEvmConfig; diff --git a/crates/node-ethereum/src/lib.rs b/crates/node-ethereum/src/lib.rs index cea2e7be0d02a..44ec6836c8750 100644 --- a/crates/node-ethereum/src/lib.rs +++ b/crates/node-ethereum/src/lib.rs @@ -11,7 +11,7 @@ pub use reth_ethereum_engine_primitives::EthEngineTypes; pub mod evm; -pub use evm::EthEvmConfig; +pub use evm::{EthEvmConfig, EthExecutorProvider}; pub mod node; pub use node::EthereumNode; diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 815b949de4aba..235130b426348 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -2,6 +2,7 @@ use crate::{EthEngineTypes, EthEvmConfig}; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network::NetworkHandle; use reth_node_builder::{ components::{ @@ -76,9 +77,18 @@ where Node: FullNodeTypes, { type EVM = EthEvmConfig; + type Executor = EthExecutorProvider; - async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { - Ok(EthEvmConfig::default()) + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = EthEvmConfig::default(); + let executor = + EthExecutorProvider::new(chain_spec, evm_config).with_inspector(ctx.inspector_stack()); + + Ok((evm_config, executor)) } } diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 0a76f75046696..355a7ecab02a3 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -5,6 +5,7 @@ use reth_db::{ database::Database, database_metrics::{DatabaseMetadata, DatabaseMetrics}, }; +use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::FullProvider; @@ -88,12 +89,18 @@ pub trait FullNodeComponents: FullNodeTypes + 'static { /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. type Evm: ConfigureEvm; + /// The type that knows how to execute blocks. + type Executor: BlockExecutorProvider; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; /// Returns the node's evm config. fn evm_config(&self) -> &Self::Evm; + /// Returns the node's executor type. + fn block_executor(&self) -> &Self::Executor; + /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 136c27d7c4007..68c1d5f0c3085 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -17,6 +17,7 @@ reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true reth-exex.workspace = true +reth-evm.workspace = true reth-provider.workspace = true reth-revm.workspace = true reth-db.workspace = true diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index b6f0a191e3e53..0457bbe3e9310 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -27,6 +27,7 @@ use reth_node_core::{ }; use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, ChainSpec}; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; +use reth_revm::stack::{InspectorStack, InspectorStackConfig}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; pub use states::*; @@ -460,6 +461,28 @@ impl BuilderContext { &self.config } + /// Returns an inspector stack if configured. + /// + /// This can be used to debug block execution. + pub fn inspector_stack(&self) -> Option { + use reth_revm::stack::Hook; + let stack_config = InspectorStackConfig { + use_printer_tracer: self.config.debug.print_inspector, + hook: if let Some(hook_block) = self.config.debug.hook_block { + Hook::Block(hook_block) + } else if let Some(tx) = self.config.debug.hook_transaction { + Hook::Transaction(tx) + } else if self.config.debug.hook_all { + Hook::All + } else { + // no inspector + return None + }, + }; + + Some(InspectorStack::new(stack_config)) + } + /// Returns the data dir of the node. /// /// This gives access to all relevant files and directories of the node's datadir. diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 753978de1968a..103e4f17467e5 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -98,6 +98,7 @@ impl> FullNodeTypes for NodeAdapter impl> FullNodeComponents for NodeAdapter { type Pool = C::Pool; type Evm = C::Evm; + type Executor = C::Executor; fn pool(&self) -> &Self::Pool { self.components.pool() @@ -107,6 +108,10 @@ impl> FullNodeComponents for NodeAdapter< self.components.evm_config() } + fn block_executor(&self) -> &Self::Executor { + self.components.block_executor() + } + fn provider(&self) -> &Self::Provider { &self.provider } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index d17cdc8eea88d..abeb2ca054f04 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -7,6 +7,7 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; +use reth_evm::execute::BlockExecutorProvider; use reth_transaction_pool::TransactionPool; use std::{future::Future, marker::PhantomData}; @@ -232,7 +233,7 @@ where PayloadB: PayloadServiceBuilder, ExecB: ExecutorBuilder, { - type Components = Components; + type Components = Components; async fn build_components( self, @@ -246,12 +247,12 @@ where _marker, } = self; - let evm_config = evm_builder.build_evm(context).await?; + let (evm_config, executor) = evm_builder.build_evm(context).await?; let pool = pool_builder.build_pool(context).await?; let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; - Ok(Components { transaction_pool: pool, evm_config, network, payload_builder }) + Ok(Components { transaction_pool: pool, evm_config, network, payload_builder, executor }) } } @@ -287,15 +288,16 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm, + Executor: BlockExecutorProvider, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 01684e9c2bccb..891f8e01feecb 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -1,34 +1,41 @@ //! EVM component for the node builder. use crate::{BuilderContext, FullNodeTypes}; +use reth_evm::execute::BlockExecutorProvider; use reth_node_api::ConfigureEvm; use std::future::Future; /// A type that knows how to build the executor types. pub trait ExecutorBuilder: Send { - /// The EVM config to build. + /// The EVM config to use. + /// + /// This provides the node with the necessary configuration to configure an EVM. type EVM: ConfigureEvm; - // TODO(mattsse): integrate `Executor` + + /// The type that knows how to execute blocks. + type Executor: BlockExecutorProvider; /// Creates the EVM config. fn build_evm( self, ctx: &BuilderContext, - ) -> impl Future> + Send; + ) -> impl Future> + Send; } -impl ExecutorBuilder for F +impl ExecutorBuilder for F where Node: FullNodeTypes, EVM: ConfigureEvm, + Executor: BlockExecutorProvider, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future> + Send, + Fut: Future> + Send, { type EVM = EVM; + type Executor = Executor; fn build_evm( self, ctx: &BuilderContext, - ) -> impl Future> { + ) -> impl Future> { self(ctx) } } diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 24d83da0da57b..ef5ea49956556 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -13,6 +13,7 @@ pub use execute::*; pub use network::*; pub use payload::*; pub use pool::*; +use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::TransactionPool; @@ -35,12 +36,18 @@ pub trait NodeComponents: Clone + Send + Sync + 'stati /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. type Evm: ConfigureEvm; + /// The type that knows how to execute blocks. + type Executor: BlockExecutorProvider; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; /// Returns the node's evm config. fn evm_config(&self) -> &Self::Evm; + /// Returns the node's executor type. + fn block_executor(&self) -> &Self::Executor; + /// Returns the handle to the network fn network(&self) -> &NetworkHandle; @@ -52,25 +59,29 @@ pub trait NodeComponents: Clone + Send + Sync + 'stati /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. pub evm_config: EVM, + /// The node's executor type used to execute individual blocks and batches of blocks. + pub executor: Executor, /// The network implementation of the node. pub network: NetworkHandle, /// The handle to the payload builder service. pub payload_builder: PayloadBuilderHandle, } -impl NodeComponents for Components +impl NodeComponents for Components where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm, + Executor: BlockExecutorProvider, { type Pool = Pool; type Evm = EVM; + type Executor = Executor; fn pool(&self) -> &Self::Pool { &self.transaction_pool @@ -80,6 +91,10 @@ where &self.evm_config } + fn block_executor(&self) -> &Self::Executor { + &self.executor + } + fn network(&self) -> &NetworkHandle { &self.network } @@ -89,16 +104,18 @@ where } } -impl Clone for Components +impl Clone for Components where Node: FullNodeTypes, Pool: TransactionPool, EVM: ConfigureEvm, + Executor: BlockExecutorProvider, { fn clone(&self) -> Self { Self { transaction_pool: self.transaction_pool.clone(), evm_config: self.evm_config.clone(), + executor: self.executor.clone(), network: self.network.clone(), payload_builder: self.payload_builder.clone(), } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 201965fa9b33e..e8c5b2967eb50 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -30,7 +30,6 @@ use reth_node_core::{ use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::format_ether; use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; -use reth_revm::EvmProcessorFactory; use reth_rpc_engine_api::EngineApi; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; @@ -157,7 +156,7 @@ where let tree_externals = TreeExternals::new( ctx.provider_factory().clone(), consensus.clone(), - EvmProcessorFactory::new(ctx.chain_spec(), components.evm_config().clone()), + components.block_executor().clone(), ); let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? .with_sync_metrics_tx(sync_metrics_tx.clone()) @@ -303,7 +302,7 @@ where consensus_engine_tx.clone(), canon_state_notification_sender, mining_mode, - node_adapter.components.evm_config().clone(), + node_adapter.components.block_executor().clone(), ) .build(); @@ -318,7 +317,7 @@ where ctx.prune_config(), max_block, static_file_producer, - node_adapter.components.evm_config().clone(), + node_adapter.components.block_executor().clone(), pipeline_exex_handle, ) .await?; @@ -341,7 +340,7 @@ where ctx.prune_config(), max_block, static_file_producer, - node_adapter.components.evm_config().clone(), + node_adapter.components.block_executor().clone(), pipeline_exex_handle, ) .await?; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 03bf45893369b..8033ab1c68b49 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -1,6 +1,5 @@ //! Helpers for setting up parts of the node. -use crate::ConfigureEvm; use reth_config::{config::StageConfig, PruneConfig}; use reth_consensus::Consensus; use reth_db::database::Database; @@ -8,6 +7,7 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_interfaces::p2p::{ bodies::{client::BodiesClient, downloader::BodyDownloader}, @@ -18,7 +18,6 @@ use reth_node_core::{ primitives::{BlockNumber, B256}, }; use reth_provider::{HeaderSyncMode, ProviderFactory}; -use reth_revm::stack::{Hook, InspectorStackConfig}; use reth_stages::{ prelude::DefaultStages, stages::{ @@ -36,7 +35,7 @@ use tokio::sync::watch; /// Constructs a [Pipeline] that's wired to the network #[allow(clippy::too_many_arguments)] -pub async fn build_networked_pipeline( +pub async fn build_networked_pipeline( node_config: &NodeConfig, config: &StageConfig, client: Client, @@ -47,13 +46,13 @@ pub async fn build_networked_pipeline( prune_config: Option, max_block: Option, static_file_producer: StaticFileProducer, - evm_config: EvmConfig, + executor: Executor, exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where DB: Database + Unpin + Clone + 'static, Client: HeadersClient + BodiesClient + Clone + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, + Executor: BlockExecutorProvider, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -75,7 +74,7 @@ where metrics_tx, prune_config, static_file_producer, - evm_config, + executor, exex_manager_handle, ) .await?; @@ -85,7 +84,7 @@ where /// Builds the [Pipeline] with the given [ProviderFactory] and downloaders. #[allow(clippy::too_many_arguments)] -pub async fn build_pipeline( +pub async fn build_pipeline( node_config: &NodeConfig, provider_factory: ProviderFactory, stage_config: &StageConfig, @@ -96,14 +95,14 @@ pub async fn build_pipeline( metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, static_file_producer: StaticFileProducer, - evm_config: EvmConfig, + executor: Executor, exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where DB: Database + Clone + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, + Executor: BlockExecutorProvider, { let mut builder = Pipeline::builder(); @@ -113,22 +112,6 @@ where } let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = reth_revm::EvmProcessorFactory::new(node_config.chain.clone(), evm_config); - - let stack_config = InspectorStackConfig { - use_printer_tracer: node_config.debug.print_inspector, - hook: if let Some(hook_block) = node_config.debug.hook_block { - Hook::Block(hook_block) - } else if let Some(tx) = node_config.debug.hook_transaction { - Hook::Transaction(tx) - } else if node_config.debug.hook_all { - Hook::All - } else { - Hook::None - }, - }; - - let factory = factory.with_stack_config(stack_config); let prune_modes = prune_config.map(|prune| prune.segments).unwrap_or_default(); @@ -147,7 +130,7 @@ where Arc::clone(&consensus), header_downloader, body_downloader, - factory.clone(), + executor.clone(), stage_config.etl.clone(), ) .set(SenderRecoveryStage { @@ -155,7 +138,7 @@ where }) .set( ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: stage_config.execution.max_blocks, max_changes: stage_config.execution.max_changes, diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index fbffa12455113..8e5afc5efce00 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -19,6 +19,7 @@ reth-interfaces.workspace = true reth-provider.workspace = true # Optimism +revm.workspace = true revm-primitives.workspace = true # misc diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 0a5e057806f49..2ea32782c7f06 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,10 +1,10 @@ //! Optimism block executor. -use crate::OptimismEvmConfig; +use crate::{l1::ensure_create2_deployer, verify::verify_receipts, OptimismEvmConfig}; use reth_evm::{ execute::{ - BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, - ExecutorProvider, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, }, ConfigureEvm, ConfigureEvmEnv, }; @@ -13,16 +13,12 @@ use reth_interfaces::{ provider::ProviderError, }; use reth_primitives::{ - proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, Bytes, ChainSpec, - GotExpected, Hardfork, Header, PruneModes, Receipt, ReceiptWithBloom, Receipts, TxType, - Withdrawals, B256, U256, + BlockNumber, BlockWithSenders, Bytes, ChainSpec, GotExpected, Hardfork, Header, PruneModes, + Receipt, Receipts, TxType, Withdrawals, U256, }; -use reth_provider::BundleStateWithReceipts; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - optimism::ensure_create2_deployer, - processor::compare_receipts_root_and_logs_bloom, stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, @@ -36,14 +32,13 @@ use tracing::{debug, trace}; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] -pub struct OpExecutorProvider { +pub struct OpExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, inspector: Option, - prune_modes: PruneModes, } -impl OpExecutorProvider { +impl OpExecutorProvider { /// Creates a new default optimism executor provider. pub fn optimism(chain_spec: Arc) -> Self { Self::new(chain_spec, Default::default()) @@ -53,7 +48,7 @@ impl OpExecutorProvider { impl OpExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } + Self { chain_spec, evm_config, inspector: None } } /// Configures an optional inspector stack for debugging. @@ -61,12 +56,6 @@ impl OpExecutorProvider { self.inspector = inspector; self } - - /// Configures the prune modes for the executor. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; - self - } } impl OpExecutorProvider @@ -87,7 +76,7 @@ where } } -impl ExecutorProvider for OpExecutorProvider +impl BlockExecutorProvider for OpExecutorProvider where EvmConfig: ConfigureEvm, EvmConfig: ConfigureEvmEnv, @@ -102,14 +91,14 @@ where self.op_executor(db) } - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database, { let executor = self.op_executor(db); OpBatchExecutor { executor, - batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + batch_record: BlockBatchRecord::new(prune_modes), stats: BlockExecutorStats::default(), } } @@ -370,7 +359,7 @@ where // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = verify_receipt_optimism( + if let Err(error) = verify_receipts( block.header.receipts_root, block.header.logs_bloom, receipts.iter(), @@ -424,8 +413,8 @@ where EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = EthBlockOutput; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; type Error = BlockExecutionError; /// Executes the block and commits the state changes. @@ -436,13 +425,13 @@ where /// /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; - // prepare the state for extraction - self.state.merge_transitions(BundleRetention::PlainState); + // NOTE: we need to merge keep the reverts for the bundle retention + self.state.merge_transitions(BundleRetention::Reverts); - Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, gas_used }) } } @@ -478,12 +467,12 @@ where EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = BundleStateWithReceipts; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; // prepare the state according to the prune mode @@ -493,45 +482,30 @@ where // store receipts in the set self.batch_record.save_receipts(receipts)?; - Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + if self.batch_record.first_block().is_none() { + self.batch_record.set_first_block(block.number); + } + + Ok(()) } fn finalize(mut self) -> Self::Output { - // TODO: track stats self.stats.log_debug(); - BundleStateWithReceipts::new( + BatchBlockExecutionOutput::new( self.executor.state.take_bundle(), self.batch_record.take_receipts(), self.batch_record.first_block().unwrap_or_default(), ) } -} -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipt_optimism<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) + fn set_tip(&mut self, tip: BlockNumber) { + self.batch_record.set_tip(tip); + } + + fn size_hint(&self) -> Option { + Some(self.executor.state.bundle_state.size_hint()) + } } #[cfg(test)] @@ -574,12 +548,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { - chain_spec, - evm_config: Default::default(), - inspector: None, - prune_modes: Default::default(), - } + OpExecutorProvider { chain_spec, evm_config: Default::default(), inspector: None } } #[test] @@ -626,7 +595,8 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); @@ -706,7 +676,8 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); diff --git a/crates/revm/src/optimism/mod.rs b/crates/optimism/evm/src/l1.rs similarity index 97% rename from crates/revm/src/optimism/mod.rs rename to crates/optimism/evm/src/l1.rs index 0dc6c687704eb..896cbc36ada5e 100644 --- a/crates/revm/src/optimism/mod.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,3 +1,5 @@ +//! Optimism-specific implementation and utilities for the executor + use reth_interfaces::{ executor::{self as reth_executor, BlockExecutionError}, RethError, @@ -10,14 +12,13 @@ use revm::{ use std::sync::Arc; use tracing::trace; -/// Optimism-specific processor implementation for the `EVMProcessor` -pub mod processor; - /// The address of the create2 deployer const CREATE_2_DEPLOYER_ADDR: Address = address!("13b0D85CcB8bf860b6b79AF3029fCA081AE9beF2"); + /// The codehash of the create2 deployer contract. const CREATE_2_DEPLOYER_CODEHASH: B256 = b256!("b0550b5b431e30d38000efb7107aaa0ade03d48a7198a140edda9d27134468b2"); + /// The raw bytecode of the create2 deployer contract. const CREATE_2_DEPLOYER_BYTECODE: [u8; 1584] = hex!("6080604052600436106100435760003560e01c8063076c37b21461004f578063481286e61461007157806356299481146100ba57806366cfa057146100da57600080fd5b3661004a57005b600080fd5b34801561005b57600080fd5b5061006f61006a366004610327565b6100fa565b005b34801561007d57600080fd5b5061009161008c366004610327565b61014a565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100c657600080fd5b506100916100d5366004610349565b61015d565b3480156100e657600080fd5b5061006f6100f53660046103ca565b610172565b61014582826040518060200161010f9061031a565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604052610183565b505050565b600061015683836102e7565b9392505050565b600061016a8484846102f0565b949350505050565b61017d838383610183565b50505050565b6000834710156101f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f437265617465323a20696e73756666696369656e742062616c616e636500000060448201526064015b60405180910390fd5b815160000361025f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f437265617465323a2062797465636f6465206c656e677468206973207a65726f60448201526064016101eb565b8282516020840186f5905073ffffffffffffffffffffffffffffffffffffffff8116610156576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f437265617465323a204661696c6564206f6e206465706c6f790000000000000060448201526064016101eb565b60006101568383305b6000604051836040820152846020820152828152600b8101905060ff815360559020949350505050565b61014e806104ad83390190565b6000806040838503121561033a57600080fd5b50508035926020909101359150565b60008060006060848603121561035e57600080fd5b8335925060208401359150604084013573ffffffffffffffffffffffffffffffffffffffff8116811461039057600080fd5b809150509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000606084860312156103df57600080fd5b8335925060208401359150604084013567ffffffffffffffff8082111561040557600080fd5b818601915086601f83011261041957600080fd5b81358181111561042b5761042b61039b565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156104715761047161039b565b8160405282815289602084870101111561048a57600080fd5b826020860160208301376000602084830101528095505050505050925092509256fe608060405234801561001057600080fd5b5061012e806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063249cb3fa14602d575b600080fd5b603c603836600460b1565b604e565b60405190815260200160405180910390f35b60008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915281205460ff16608857600060aa565b7fa2ef4600d742022d532d4747cb3547474667d6f13804902513b2ec01c848f4b45b9392505050565b6000806040838503121560c357600080fd5b82359150602083013573ffffffffffffffffffffffffffffffffffffffff8116811460ed57600080fd5b80915050925092905056fea26469706673582212205ffd4e6cede7d06a5daf93d48d0541fc68189eeb16608c1999a82063b666eb1164736f6c63430008130033a2646970667358221220fdc4a0fe96e3b21c108ca155438d37c9143fb01278a3c1d274948bad89c564ba64736f6c63430008130033"); @@ -75,21 +76,21 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, + chain_spec: &ChainSpec, + timestamp: u64, +) -> Result<(), BlockExecutionError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = + calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +pub fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), BlockExecutionError> { + if calculated_receipts_root != expected_receipts_root { + return Err(BlockValidationError::ReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + ) + .into()) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(BlockValidationError::BloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + ) + .into()) + } + + Ok(()) +} diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 7e7d5470305a9..a2cbc287cddb6 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -7,7 +7,7 @@ use crate::{ }; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_evm::ConfigureEvm; -use reth_evm_optimism::OptimismEvmConfig; +use reth_evm_optimism::{OpExecutorProvider, OptimismEvmConfig}; use reth_network::{NetworkHandle, NetworkManager}; use reth_node_builder::{ components::{ @@ -97,9 +97,18 @@ where Node: FullNodeTypes, { type EVM = OptimismEvmConfig; + type Executor = OpExecutorProvider; - async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { - Ok(OptimismEvmConfig::default()) + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = OptimismEvmConfig::default(); + let executor = + OpExecutorProvider::new(chain_spec, evm_config).with_inspector(ctx.inspector_stack()); + + Ok((evm_config, executor)) } } diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 7ee1bb9ece3e2..db6a6266eebf3 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -1,8 +1,9 @@ //! OP transaction pool types use parking_lot::RwLock; +use reth_evm_optimism::RethL1BlockInfo; use reth_primitives::{Block, ChainSpec, GotExpected, InvalidTransactionError, SealedBlock}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; -use reth_revm::{optimism::RethL1BlockInfo, L1BlockInfo}; +use reth_revm::L1BlockInfo; use reth_transaction_pool::{ CoinbaseTipOrdering, EthPoolTransaction, EthPooledTransaction, EthTransactionValidator, Pool, TransactionOrigin, TransactionValidationOutcome, TransactionValidationTaskExecutor, @@ -75,7 +76,7 @@ where /// Update the L1 block info. fn update_l1_block_info(&self, block: &Block) { self.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); - if let Ok(cost_addition) = reth_revm::optimism::extract_l1_info(block) { + if let Ok(cost_addition) = reth_evm_optimism::extract_l1_info(block) { *self.block_info.l1_block_info.write() = cost_addition; } } diff --git a/crates/payload/optimism/Cargo.toml b/crates/payload/optimism/Cargo.toml index ebc776e746a45..c58d0ecb583e2 100644 --- a/crates/payload/optimism/Cargo.toml +++ b/crates/payload/optimism/Cargo.toml @@ -21,6 +21,7 @@ reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true reth-evm.workspace = true +reth-evm-optimism.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true @@ -39,4 +40,5 @@ optimism = [ "reth-revm/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", + "reth-evm-optimism/optimism", ] \ No newline at end of file diff --git a/crates/payload/optimism/src/builder.rs b/crates/payload/optimism/src/builder.rs index 8e8bfb8f0f732..2794ad96892a5 100644 --- a/crates/payload/optimism/src/builder.rs +++ b/crates/payload/optimism/src/builder.rs @@ -303,7 +303,7 @@ where // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - reth_revm::optimism::ensure_create2_deployer( + reth_evm_optimism::ensure_create2_deployer( chain_spec.clone(), attributes.payload_attributes.timestamp, &mut db, diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 5c62f324eb107..151d53a978f8b 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -17,7 +17,7 @@ reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true reth-consensus-common.workspace = true -reth-evm.workspace = true +reth-evm = { workspace = true, optional = true } reth-trie = { workspace = true, optional = true } # revm @@ -28,10 +28,11 @@ revm-inspectors.workspace = true tracing.workspace = true [dev-dependencies] +reth-evm.workspace = true reth-trie.workspace = true [features] -test-utils = ["dep:reth-trie"] +test-utils = ["dep:reth-trie", "dep:reth-evm"] optimism = [ "revm/optimism", "reth-primitives/optimism", diff --git a/crates/revm/src/factory.rs b/crates/revm/src/factory.rs deleted file mode 100644 index fdaae52c0e787..0000000000000 --- a/crates/revm/src/factory.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::{ - database::StateProviderDatabase, - processor::EVMProcessor, - stack::{InspectorStack, InspectorStackConfig}, -}; -use reth_evm::ConfigureEvm; -use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::ChainSpec; -use reth_provider::{ExecutorFactory, PrunableBlockExecutor, StateProvider}; -use std::sync::Arc; - -/// Factory for creating [EVMProcessor]. -#[derive(Clone, Debug)] -pub struct EvmProcessorFactory { - chain_spec: Arc, - stack: Option, - /// Type that defines how the produced EVM should be configured. - evm_config: EvmConfig, -} - -impl EvmProcessorFactory { - /// Create new factory - pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, stack: None, evm_config } - } - - /// Sets the inspector stack for all generated executors. - pub fn with_stack(mut self, stack: InspectorStack) -> Self { - self.stack = Some(stack); - self - } - - /// Sets the inspector stack for all generated executors using the provided config. - pub fn with_stack_config(mut self, config: InspectorStackConfig) -> Self { - self.stack = Some(InspectorStack::new(config)); - self - } -} - -impl ExecutorFactory for EvmProcessorFactory -where - EvmConfig: ConfigureEvm + Send + Sync + Clone + 'static, -{ - fn with_state<'a, SP: StateProvider + 'a>( - &'a self, - sp: SP, - ) -> Box + 'a> { - let database_state = StateProviderDatabase::new(sp); - let mut evm = - EVMProcessor::new_with_db(self.chain_spec.clone(), database_state, &self.evm_config); - if let Some(stack) = &self.stack { - evm.set_stack(stack.clone()); - } - Box::new(evm) - } -} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index f4ed01ada2f33..d8c5761d03ad6 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -11,20 +11,11 @@ /// Contains glue code for integrating reth database into revm's [Database]. pub mod database; -/// revm implementation of reth block and transaction executors. -mod factory; - pub mod batch; -/// new revm account state executor -pub mod processor; - /// State changes that are not related to transactions. pub mod state_change; -/// revm executor factory. -pub use factory::EvmProcessorFactory; - /// Ethereum DAO hardfork state change data. pub mod eth_dao_fork; @@ -33,10 +24,6 @@ pub mod eth_dao_fork; /// used in the main Reth executor. pub mod stack; -/// Optimism-specific implementation and utilities for the executor -#[cfg(feature = "optimism")] -pub mod optimism; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs deleted file mode 100644 index 9fe51d059cfa3..0000000000000 --- a/crates/revm/src/optimism/processor.rs +++ /dev/null @@ -1,401 +0,0 @@ -use crate::processor::{compare_receipts_root_and_logs_bloom, EVMProcessor}; -use reth_evm::ConfigureEvm; -use reth_interfaces::executor::{ - BlockExecutionError, BlockValidationError, OptimismBlockExecutionError, -}; -use reth_primitives::{ - proofs::calculate_receipt_root_optimism, revm_primitives::ResultAndState, BlockWithSenders, - Bloom, ChainSpec, Hardfork, Receipt, ReceiptWithBloom, TxType, B256, U256, -}; -use reth_provider::{BlockExecutor, BundleStateWithReceipts}; -use revm::DatabaseCommit; -use std::time::Instant; -use tracing::{debug, trace}; - -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipt_optimism<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -impl<'a, EvmConfig> BlockExecutor for EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - type Error = BlockExecutionError; - - fn execute_and_verify_receipt( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - // execute block - let receipts = self.execute_inner(block, total_difficulty)?; - - // TODO Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is needed for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec.fork(Hardfork::Byzantium).active_at_block(block.header.number) { - let time = Instant::now(); - if let Err(error) = verify_receipt_optimism( - block.header.receipts_root, - block.header.logs_bloom, - receipts.iter(), - self.chain_spec.as_ref(), - block.timestamp, - ) { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - self.stats.receipt_root_duration += time.elapsed(); - } - - self.batch_record.save_receipts(receipts) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.init_env(&block.header, total_difficulty); - - // perf: do not execute empty blocks - if block.body.is_empty() { - return Ok((Vec::new(), 0)) - } - - let is_regolith = - self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - super::ensure_create2_deployer(self.chain_spec().clone(), block.timestamp, self.db_mut()) - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::ForceCreate2DeployerFail, - ) - })?; - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.len()); - for (sender, transaction) in block.transactions_with_sender() { - let time = Instant::now(); - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas && - (is_regolith || !transaction.is_system_transaction()) - { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - // An optimism block should never contain blob transactions. - if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::BlobTransactionRejected, - )) - } - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && transaction.is_deposit()) - .then(|| { - self.db_mut() - .load_cache_account(*sender) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::AccountLoadFailed(*sender), - ) - })?; - - // Execute transaction. - let ResultAndState { result, state } = self.transact(transaction, *sender)?; - trace!( - target: "evm", - ?transaction, ?result, ?state, - "Executed transaction" - ); - self.stats.execution_duration += time.elapsed(); - let time = Instant::now(); - - self.db_mut().commit(state); - - self.stats.apply_state_duration += time.elapsed(); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs().into_iter().map(Into::into).collect(), - #[cfg(feature = "optimism")] - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process ensures - // this is only set for post-Canyon deposit transactions. - #[cfg(feature = "optimism")] - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec() - .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) - .then_some(1), - }); - } - - Ok((receipts, cumulative_gas_used)) - } - - fn take_output_state(&mut self) -> BundleStateWithReceipts { - BundleStateWithReceipts::new( - self.evm.context.evm.db.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - ) - } - - fn size_hint(&self) -> Option { - Some(self.evm.context.evm.db.bundle_size_hint()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - database::StateProviderDatabase, - test_utils::{StateProviderTest, TestEvmConfig}, - }; - use reth_primitives::{ - b256, Account, Address, Block, ChainSpecBuilder, Header, Signature, StorageKey, - StorageValue, Transaction, TransactionSigned, TxEip1559, BASE_MAINNET, - }; - use revm::L1_BLOCK_CONTRACT; - use std::{collections::HashMap, str::FromStr, sync::Arc}; - - fn create_op_state_provider() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let l1_block_contract_account = - Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; - - let mut l1_block_storage = HashMap::new(); - // base fee - l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); - // l1 fee overhead - l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); - // l1 fee scalar - l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); - // l1 free scalars post ecotone - l1_block_storage.insert( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .unwrap(), - ); - - db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); - - db - } - - fn create_op_evm_processor<'a>( - chain_spec: Arc, - db: StateProviderTest, - ) -> EVMProcessor<'a, TestEvmConfig> { - static CONFIG: std::sync::OnceLock = std::sync::OnceLock::new(); - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - CONFIG.get_or_init(TestEvmConfig::default), - ); - executor.evm.context.evm.db.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - executor - } - - #[test] - fn op_deposit_fields_pre_canyon() { - let header = Header { - timestamp: 1, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).regolith_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: addr.into(), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: 21_000, - ..Default::default() - }), - Signature::default(), - ); - - let mut executor = create_op_evm_processor(chain_spec, db); - - // Attempt to execute a block with one deposit and one non-deposit transaction - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .unwrap(); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is not present in pre canyon transactions - assert!(deposit_receipt.deposit_receipt_version.is_none()); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } - - #[test] - fn op_deposit_fields_post_canyon() { - // ensure_create2_deployer will fail if timestamp is set to less then 2 - let header = Header { - timestamp: 2, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).canyon_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: addr.into(), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: 21_000, - ..Default::default() - }), - Signature::optimism_deposit_tx_signature(), - ); - - let mut executor = create_op_evm_processor(chain_spec, db); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .expect("Executing a block while canyon is active should not fail"); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is set to 1 for post canyon deposit transactions - assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } -} diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs deleted file mode 100644 index 487cec52805d2..0000000000000 --- a/crates/revm/src/processor.rs +++ /dev/null @@ -1,865 +0,0 @@ -#[cfg(not(feature = "optimism"))] -use revm::DatabaseCommit; -use revm::{ - db::StateDBBox, - inspector_handle_register, - interpreter::Host, - primitives::{CfgEnvWithHandlerCfg, ResultAndState}, - Evm, State, -}; -use std::{marker::PhantomData, sync::Arc, time::Instant}; -#[cfg(not(feature = "optimism"))] -use tracing::{debug, trace}; - -use reth_evm::ConfigureEvm; -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -#[cfg(feature = "optimism")] -use reth_primitives::revm::env::fill_op_tx_env; -#[cfg(not(feature = "optimism"))] -use reth_primitives::revm::env::fill_tx_env; -use reth_primitives::{ - Address, Block, BlockNumber, BlockWithSenders, Bloom, ChainSpec, GotExpected, Hardfork, Header, - PruneModes, Receipt, ReceiptWithBloom, Receipts, TransactionSigned, Withdrawals, B256, U256, -}; -#[cfg(not(feature = "optimism"))] -use reth_provider::BundleStateWithReceipts; -use reth_provider::{BlockExecutor, ProviderError, PrunableBlockExecutor, StateProvider}; - -use crate::{ - batch::{BlockBatchRecord, BlockExecutorStats}, - database::StateProviderDatabase, - eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - stack::{InspectorStack, InspectorStackConfig}, - state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, -}; - -/// EVMProcessor is a block executor that uses revm to execute blocks or multiple blocks. -/// -/// Output is obtained by calling `take_output_state` function. -/// -/// It is capable of pruning the data that will be written to the database -/// and implemented [PrunableBlockExecutor] traits. -/// -/// It implemented the [BlockExecutor] that give it the ability to take block -/// apply pre state (Cancun system contract call), execute transaction and apply -/// state change and then apply post execution changes (block reward, withdrawals, irregular DAO -/// hardfork state change). And if `execute_and_verify_receipt` is called it will verify the -/// receipt. -/// -/// InspectorStack are used for optional inspecting execution. And it contains -/// various duration of parts of execution. -#[allow(missing_debug_implementations)] -pub struct EVMProcessor<'a, EvmConfig> { - /// The configured chain-spec - pub(crate) chain_spec: Arc, - /// revm instance that contains database and env environment. - pub(crate) evm: Evm<'a, InspectorStack, StateDBBox<'a, ProviderError>>, - /// Keeps track of the recorded receipts and pruning configuration. - pub(crate) batch_record: BlockBatchRecord, - /// Execution stats - pub(crate) stats: BlockExecutorStats, - /// The type that is able to configure the EVM environment. - _phantom: PhantomData, -} - -impl<'a, EvmConfig> EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - /// Return chain spec. - pub fn chain_spec(&self) -> &Arc { - &self.chain_spec - } - - /// Creates a new executor from the given chain spec and database. - pub fn new_with_db( - chain_spec: Arc, - db: StateProviderDatabase, - evm_config: &'a EvmConfig, - ) -> Self { - let state = State::builder() - .with_database_boxed(Box::new(db)) - .with_bundle_update() - .without_state_clear() - .build(); - EVMProcessor::new_with_state(chain_spec, state, evm_config) - } - - /// Create a new EVM processor with the given revm state. - pub fn new_with_state( - chain_spec: Arc, - revm_state: StateDBBox<'a, ProviderError>, - evm_config: &'a EvmConfig, - ) -> Self { - let stack = InspectorStack::new(InspectorStackConfig::default()); - let evm = evm_config.evm_with_inspector(revm_state, stack); - EVMProcessor { - chain_spec, - evm, - batch_record: BlockBatchRecord::default(), - stats: BlockExecutorStats::default(), - _phantom: PhantomData, - } - } - - /// Configures the executor with the given inspectors. - pub fn set_stack(&mut self, stack: InspectorStack) { - self.evm.context.external = stack; - } - - /// Configure the executor with the given block. - pub fn set_first_block(&mut self, num: BlockNumber) { - self.batch_record.set_first_block(num); - } - - /// Saves the receipts to the batch record. - pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { - self.batch_record.save_receipts(receipts) - } - - /// Returns the recorded receipts. - pub fn receipts(&self) -> &Receipts { - self.batch_record.receipts() - } - - /// Returns a reference to the database - pub fn db_mut(&mut self) -> &mut StateDBBox<'a, ProviderError> { - &mut self.evm.context.evm.db - } - - /// Initializes the config and block env. - pub(crate) fn init_env(&mut self, header: &Header, total_difficulty: U256) { - // Set state clear flag. - let state_clear_flag = - self.chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(header.number); - - self.db_mut().set_state_clear_flag(state_clear_flag); - - let mut cfg = - CfgEnvWithHandlerCfg::new_with_spec_id(self.evm.cfg().clone(), self.evm.spec_id()); - EvmConfig::fill_cfg_and_block_env( - &mut cfg, - self.evm.block_mut(), - &self.chain_spec, - header, - total_difficulty, - ); - *self.evm.cfg_mut() = cfg.cfg_env; - - // This will update the spec in case it changed - self.evm.modify_spec_id(cfg.handler_cfg.spec_id); - } - - /// Applies the pre-block call to the EIP-4788 beacon block root contract. - /// - /// If cancun is not activated or the block is the genesis block, then this is a no-op, and no - /// state changes are made. - fn apply_beacon_root_contract_call( - &mut self, - block: &Block, - ) -> Result<(), BlockExecutionError> { - apply_beacon_root_contract_call( - &self.chain_spec, - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut self.evm, - )?; - Ok(()) - } - - /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO - /// hardfork state change. - pub fn apply_post_execution_state_change( - &mut self, - block: &Block, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - let mut balance_increments = post_block_balance_increments( - &self.chain_spec, - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - total_difficulty, - &block.ommers, - block.withdrawals.as_ref().map(Withdrawals::as_ref), - ); - - // Irregular state change at Ethereum DAO hardfork - if self.chain_spec.fork(Hardfork::Dao).transitions_at_block(block.number) { - // drain balances from hardcoded addresses. - let drained_balance: u128 = self - .db_mut() - .drain_balances(DAO_HARDKFORK_ACCOUNTS) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)? - .into_iter() - .sum(); - - // return balance to DAO beneficiary. - *balance_increments.entry(DAO_HARDFORK_BENEFICIARY).or_default() += drained_balance; - } - // increment balances - self.db_mut() - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(()) - } - - /// Runs a single transaction in the configured environment and proceeds - /// to return the result and state diff (without applying it). - /// - /// Assumes the rest of the block environment has been filled via `init_block_env`. - pub fn transact( - &mut self, - transaction: &TransactionSigned, - sender: Address, - ) -> Result { - // Fill revm structure. - #[cfg(not(feature = "optimism"))] - fill_tx_env(self.evm.tx_mut(), transaction, sender); - - #[cfg(feature = "optimism")] - { - let mut envelope_buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut envelope_buf); - fill_op_tx_env(self.evm.tx_mut(), transaction, sender, envelope_buf.into()); - } - - let hash = transaction.hash_ref(); - let should_inspect = self.evm.context.external.should_inspect(self.evm.env(), hash); - let out = if should_inspect { - // push inspector handle register. - self.evm.handler.append_handler_register_plain(inspector_handle_register); - let output = self.evm.transact(); - tracing::trace!( - target: "evm", - %hash, ?output, ?transaction, env = ?self.evm.context.evm.env, - "Executed transaction" - ); - // pop last handle register - self.evm.handler.pop_handle_register(); - output - } else { - // Main execution without needing the hash - self.evm.transact() - }; - - out.map_err(move |e| { - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { hash: transaction.recalculate_hash(), error: e.into() } - .into() - }) - } - - /// Execute the block, verify gas usage and apply post-block state changes. - pub(crate) fn execute_inner( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result, BlockExecutionError> { - self.init_env(&block.header, total_difficulty); - self.apply_beacon_root_contract_call(block)?; - let (receipts, cumulative_gas_used) = self.execute_transactions(block, total_difficulty)?; - - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - let receipts = Receipts::from_block_receipt(receipts); - return Err(BlockValidationError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: receipts.gas_spent_by_tx()?, - } - .into()) - } - let time = Instant::now(); - self.apply_post_execution_state_change(block, total_difficulty)?; - self.stats.apply_post_execution_state_changes_duration += time.elapsed(); - - let time = Instant::now(); - let retention = self.batch_record.bundle_retention(block.number); - self.db_mut().merge_transitions(retention); - self.stats.merge_transitions_duration += time.elapsed(); - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - - Ok(receipts) - } -} - -/// Default Ethereum implementation of the [BlockExecutor] trait for the [EVMProcessor]. -#[cfg(not(feature = "optimism"))] -impl<'a, EvmConfig> BlockExecutor for EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - type Error = BlockExecutionError; - - fn execute_and_verify_receipt( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - // execute block - let receipts = self.execute_inner(block, total_difficulty)?; - - // TODO Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is needed for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec.fork(Hardfork::Byzantium).active_at_block(block.header.number) { - let time = Instant::now(); - if let Err(error) = - verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts.iter()) - { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - self.stats.receipt_root_duration += time.elapsed(); - } - - self.batch_record.save_receipts(receipts)?; - Ok(()) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.init_env(&block.header, total_difficulty); - - // perf: do not execute empty blocks - if block.body.is_empty() { - return Ok((Vec::new(), 0)) - } - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.len()); - for (sender, transaction) in block.transactions_with_sender() { - let time = Instant::now(); - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - // Execute transaction. - let ResultAndState { result, state } = self.transact(transaction, *sender)?; - trace!( - target: "evm", - ?transaction, ?result, ?state, - "Executed transaction" - ); - self.stats.execution_duration += time.elapsed(); - let time = Instant::now(); - - self.db_mut().commit(state); - - self.stats.apply_state_duration += time.elapsed(); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs().into_iter().map(Into::into).collect(), - }); - } - - Ok((receipts, cumulative_gas_used)) - } - - fn take_output_state(&mut self) -> BundleStateWithReceipts { - self.stats.log_debug(); - BundleStateWithReceipts::new( - self.evm.context.evm.db.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - ) - } - - fn size_hint(&self) -> Option { - Some(self.evm.context.evm.db.bundle_size_hint()) - } -} - -impl<'a, EvmConfig> PrunableBlockExecutor for EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } - - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); - } -} - -/// Calculate the receipts root, and copmare it against against the expected receipts root and logs -/// bloom. -pub fn verify_receipt<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -/// Compare the calculated receipts root with the expected receipts root, also copmare -/// the calculated logs bloom with the expected logs bloom. -pub fn compare_receipts_root_and_logs_bloom( - calculated_receipts_root: B256, - calculated_logs_bloom: Bloom, - expected_receipts_root: B256, - expected_logs_bloom: Bloom, -) -> Result<(), BlockExecutionError> { - if calculated_receipts_root != expected_receipts_root { - return Err(BlockValidationError::ReceiptRootDiff( - GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), - ) - .into()) - } - - if calculated_logs_bloom != expected_logs_bloom { - return Err(BlockValidationError::BloomLogDiff( - GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), - ) - .into()) - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::{StateProviderTest, TestEvmConfig}; - use reth_primitives::{ - bytes, - constants::{BEACON_ROOTS_ADDRESS, EIP1559_INITIAL_BASE_FEE, SYSTEM_ADDRESS}, - keccak256, Account, Bytes, ChainSpecBuilder, ForkCondition, Signature, Transaction, - TxEip1559, MAINNET, - }; - use revm::{Database, TransitionState}; - use std::collections::HashMap; - - static BEACON_ROOT_CONTRACT_CODE: Bytes = bytes!("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"); - - fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let beacon_root_contract_account = Account { - balance: U256::ZERO, - bytecode_hash: Some(keccak256(BEACON_ROOT_CONTRACT_CODE.clone())), - nonce: 1, - }; - - db.insert_account( - BEACON_ROOTS_ADDRESS, - beacon_root_contract_account, - Some(BEACON_ROOT_CONTRACT_CODE.clone()), - HashMap::new(), - ); - - db - } - - #[test] - fn eip_4788_non_genesis_call() { - let mut header = - Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - // execute invalid header (no parent beacon block root) - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - - // attempt to execute a block without parent beacon block root, expect err - let err = executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect_err( - "Executing cancun block without parent beacon block root field should fail", - ); - assert_eq!( - err, - BlockExecutionError::Validation(BlockValidationError::MissingParentBeaconBlockRoot) - ); - - // fix header, set a gas limit - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - // get timestamp storage and compare - let timestamp_storage = - executor.db_mut().storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .db_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .expect("storage value should exist"); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - #[test] - fn eip_4788_no_code_cancun() { - // This test ensures that we "silently fail" when cancun is active and there is no code at - // BEACON_ROOTS_ADDRESS - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = StateProviderTest::default(); - - // DON'T deploy the contract at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - executor.init_env(&header, U256::ZERO); - - // get the env - let previous_env = executor.evm.context.evm.env.clone(); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - - // ensure that the env has not changed - assert_eq!(executor.evm.context.evm.env, previous_env); - } - - #[test] - fn eip_4788_empty_account_call() { - // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account - // during the pre-block call - - let mut db = create_state_provider_with_beacon_root_contract(); - - // insert an empty SYSTEM_ADDRESS - db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::new()); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - - // construct the header for block one - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - executor.init_env(&header, U256::ZERO); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - - // ensure that the nonce of the system address account has not changed - let nonce = executor.db_mut().basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce; - assert_eq!(nonce, 0); - } - - #[test] - fn eip_4788_genesis_call() { - let db = create_state_provider_with_beacon_root_contract(); - - // activate cancun at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut header = chain_spec.genesis_header(); - - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - executor.init_env(&header, U256::ZERO); - - // attempt to execute the genesis block with non-zero parent beacon block root, expect err - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - let _err = executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect_err( - "Executing genesis cancun block with non-zero parent beacon block root field should fail", - ); - - // fix header - header.parent_beacon_block_root = Some(B256::ZERO); - - // now try to process the genesis block again, this time ensuring that a system contract - // call does not occur - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .unwrap(); - - // there is no system contract call so there should be NO STORAGE CHANGES - // this means we'll check the transition state - let state = executor.evm.context.evm.inner.db; - let transition_state = - state.transition_state.expect("the evm should be initialized with bundle updates"); - - // assert that it is the default (empty) transition state - assert_eq!(transition_state, TransitionState::default()); - } - - #[test] - fn eip_4788_high_base_fee() { - // This test ensures that if we have a base fee, then we don't return an error when the - // system contract is called, due to the gas price being less than the base fee. - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - base_fee_per_gas: Some(u64::MAX), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - // execute header - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - executor.init_env(&header, U256::ZERO); - - // ensure that the env is configured with a base fee - assert_eq!(executor.evm.block().basefee, U256::from(u64::MAX)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - // get timestamp storage and compare - let timestamp_storage = - executor.db_mut().storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .db_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .unwrap(); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - #[test] - fn test_transact_error_includes_correct_hash() { - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let db = StateProviderTest::default(); - let chain_id = chain_spec.chain.id(); - - // execute header - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - - // Create a test transaction that gonna fail - let transaction = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id, - nonce: 1, - gas_limit: 21_000, - to: Address::ZERO.into(), - max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, - ..Default::default() - }), - Signature::default(), - ); - - let result = executor.transact(&transaction, Address::random()); - - let expected_hash = transaction.recalculate_hash(); - - // Check the error - match result { - Err(BlockExecutionError::Validation(BlockValidationError::EVM { hash, error: _ })) => { - assert_eq!(hash, expected_hash, "The EVM error does not include the correct transaction hash."); - }, - _ => panic!("Expected a BlockExecutionError::Validation error, but transaction did not fail as expected."), - } - } -} diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index ef79a7ed3df77..7e198c9989f6c 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -44,7 +44,8 @@ tracing.workspace = true reth-beacon-consensus.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 51ba2f145254b..4b95d11ed41c0 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -2,7 +2,7 @@ use crate::utils::launch_auth; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; -use reth_node_ethereum::EthEngineTypes; +use reth_ethereum_engine_primitives::EthEngineTypes; use reth_primitives::{Block, U64}; use reth_rpc::JwtSecret; use reth_rpc_api::clients::EngineApiClient; diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index c1180144253a8..403e12a1b317e 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,6 +1,7 @@ use reth_beacon_consensus::BeaconConsensusEngineHandle; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::MAINNET; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 81788f0a39117..513c7da134e9b 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -29,6 +29,8 @@ revm-inspectors.workspace = true reth-evm.workspace = true reth-network-types.workspace = true +reth-evm-optimism = { workspace = true, optional = true } + # eth alloy-rlp.workspace = true alloy-dyn-abi = { workspace = true, features = ["eip712"] } @@ -89,4 +91,6 @@ optimism = [ "reth-primitives/optimism", "reth-rpc-types-compat/optimism", "reth-provider/optimism", + "dep:reth-evm-optimism", + "reth-evm-optimism/optimism", ] diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index 95b6b6bc7a33f..cfc3fe058cb9d 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -84,7 +84,7 @@ where #[cfg(feature = "optimism")] let (block_timestamp, l1_block_info) = { - let body = reth_revm::optimism::extract_l1_info(&block); + let body = reth_evm_optimism::extract_l1_info(&block); (block.timestamp, body.ok()) }; diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 51bde5bfaaf3b..75470e1fe8e78 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -49,17 +49,9 @@ use revm::{ }; use std::future::Future; -#[cfg(feature = "optimism")] -use crate::eth::api::optimism::OptimismTxMeta; -#[cfg(feature = "optimism")] -use crate::eth::optimism::OptimismEthApiError; use crate::eth::revm_utils::FillableTransaction; #[cfg(feature = "optimism")] -use reth_revm::optimism::RethL1BlockInfo; -#[cfg(feature = "optimism")] use reth_rpc_types::OptimismTransactionReceiptFields; -#[cfg(feature = "optimism")] -use revm::L1BlockInfo; use revm_primitives::db::{Database, DatabaseRef}; /// Helper alias type for the state's [CacheDB] @@ -1498,7 +1490,7 @@ where .ok_or(EthApiError::UnknownBlockNumber)?; let block = block.unseal(); - let l1_block_info = reth_revm::optimism::extract_l1_info(&block).ok(); + let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); let optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; build_transaction_receipt_with_block_receipts( @@ -1510,17 +1502,19 @@ where ) } - /// Builds [OptimismTxMeta] object using the provided [TransactionSigned], - /// [L1BlockInfo] and `block_timestamp`. The [L1BlockInfo] is used to calculate - /// the l1 fee and l1 data gas for the transaction. - /// If the [L1BlockInfo] is not provided, the [OptimismTxMeta] will be empty. + /// Builds op metadata object using the provided [TransactionSigned], L1 block info and + /// `block_timestamp`. The L1BlockInfo is used to calculate the l1 fee and l1 data gas for the + /// transaction. If the L1BlockInfo is not provided, the meta info will be empty. #[cfg(feature = "optimism")] pub(crate) fn build_op_tx_meta( &self, tx: &TransactionSigned, - l1_block_info: Option, + l1_block_info: Option, block_timestamp: u64, - ) -> EthResult { + ) -> EthResult { + use crate::eth::{api::optimism::OptimismTxMeta, optimism::OptimismEthApiError}; + use reth_evm_optimism::RethL1BlockInfo; + let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; let (l1_fee, l1_data_gas) = if !tx.is_deposit() { @@ -1711,7 +1705,7 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( meta: TransactionMeta, receipt: Receipt, all_receipts: &[Receipt], - #[cfg(feature = "optimism")] optimism_tx_meta: OptimismTxMeta, + #[cfg(feature = "optimism")] optimism_tx_meta: crate::eth::api::optimism::OptimismTxMeta, ) -> EthResult { // Note: we assume this transaction is valid, because it's mined (or part of pending block) and // we don't need to check for pre EIP-2 diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index f3bd16a5e2a18..ef91b2be2371a 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -24,6 +24,8 @@ reth-etl.workspace = true reth-config.workspace = true reth-stages-api = { workspace = true, features = ["test-utils"] } reth-consensus.workspace = true +reth-evm.workspace = true +reth-revm.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 92c2b3a09a6fd..2c6aaff251063 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -16,7 +16,7 @@ //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_interfaces::test_utils::{TestBodiesClient, TestHeadersClient}; -//! # use reth_revm::EvmProcessorFactory; +//! # use reth_evm_ethereum::execute::EthExecutorProvider; //! # use reth_primitives::{MAINNET, B256, PruneModes}; //! # use reth_network_types::PeerId; //! # use reth_stages::Pipeline; @@ -45,7 +45,7 @@ //! # provider_factory.clone() //! # ); //! # let (tip_tx, tip_rx) = watch::channel(B256::default()); -//! # let executor_factory = EvmProcessorFactory::new(chain_spec.clone(), EthEvmConfig::default()); +//! # let executor_provider = EthExecutorProvider::mainnet(); //! # let static_file_producer = StaticFileProducer::new( //! # provider_factory.clone(), //! # provider_factory.static_file_provider(), @@ -55,17 +55,15 @@ //! # let pipeline = //! Pipeline::builder() //! .with_tip_sender(tip_tx) -//! .add_stages( -//! DefaultStages::new( -//! provider_factory.clone(), -//! HeaderSyncMode::Tip(tip_rx), -//! consensus, -//! headers_downloader, -//! bodies_downloader, -//! executor_factory, -//! EtlConfig::default(), -//! ) -//! ) +//! .add_stages(DefaultStages::new( +//! provider_factory.clone(), +//! HeaderSyncMode::Tip(tip_rx), +//! consensus, +//! headers_downloader, +//! bodies_downloader, +//! executor_provider, +//! EtlConfig::default(), +//! )) //! .build(provider_factory, static_file_producer); //! ``` //! diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 99edf05b7212e..7ec85170fc0f3 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -12,44 +12,29 @@ //! ```no_run //! # use reth_stages::Pipeline; //! # use reth_stages::sets::{OfflineStages}; -//! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PruneModes, MAINNET}; //! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::StaticFileProviderFactory; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; +//! # use reth_evm::execute::BlockExecutorProvider; //! -//! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone(), EthEvmConfig::default()); -//! # let provider_factory = create_test_provider_factory(); -//! # let static_file_producer = StaticFileProducer::new( +//! # fn create(exec: impl BlockExecutorProvider) { +//! +//! let provider_factory = create_test_provider_factory(); +//! let static_file_producer = StaticFileProducer::new( //! provider_factory.clone(), //! provider_factory.static_file_provider(), //! PruneModes::default(), //! ); //! // Build a pipeline with all offline stages. -//! # let pipeline = Pipeline::builder() -//! .add_stages(OfflineStages::new(executor_factory, EtlConfig::default())) +//! let pipeline = Pipeline::builder() +//! .add_stages(OfflineStages::new(exec, EtlConfig::default())) //! .build(provider_factory, static_file_producer); -//! ``` //! -//! ```ignore -//! # use reth_stages::Pipeline; -//! # use reth_stages::{StageSet, sets::OfflineStages}; -//! # use reth_revm::EvmProcessorFactory; -//! # use reth_node_ethereum::EthEvmConfig; -//! # use reth_primitives::MAINNET; -//! # use reth_config::config::EtlConfig; -//! -//! // Build a pipeline with all offline stages and a custom stage at the end. -//! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone(), EthEvmConfig::default()); -//! Pipeline::builder() -//! .add_stages( -//! OfflineStages::new(executor_factory, EtlConfig::default()).builder().add_stage(MyCustomStage) -//! ) -//! .build(); +//! # } //! ``` - use crate::{ stages::{ AccountHashingStage, BodyStage, ExecutionStage, FinishStage, HeaderStage, @@ -61,10 +46,11 @@ use crate::{ use reth_config::config::EtlConfig; use reth_consensus::Consensus; use reth_db::database::Database; +use reth_evm::execute::BlockExecutorProvider; use reth_interfaces::p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, }; -use reth_provider::{ExecutorFactory, HeaderSyncGapProvider, HeaderSyncMode}; +use reth_provider::{HeaderSyncGapProvider, HeaderSyncMode}; use std::sync::Arc; /// A set containing all stages to run a fully syncing instance of reth. @@ -98,7 +84,7 @@ pub struct DefaultStages { etl_config: EtlConfig, } -impl DefaultStages { +impl DefaultStages { /// Create a new set of default stages with default values. pub fn new( provider: Provider, @@ -106,11 +92,11 @@ impl DefaultStages { consensus: Arc, header_downloader: H, body_downloader: B, - executor_factory: EF, + executor_factory: E, etl_config: EtlConfig, ) -> Self where - EF: ExecutorFactory, + E: BlockExecutorProvider, { Self { online: OnlineStages::new( @@ -127,14 +113,14 @@ impl DefaultStages { } } -impl DefaultStages +impl DefaultStages where - EF: ExecutorFactory, + E: BlockExecutorProvider, { /// Appends the default offline stages and default finish stage to the given builder. pub fn add_offline_stages( default_offline: StageSetBuilder, - executor_factory: EF, + executor_factory: E, etl_config: EtlConfig, ) -> StageSetBuilder { StageSetBuilder::default() @@ -144,12 +130,12 @@ where } } -impl StageSet for DefaultStages +impl StageSet for DefaultStages where Provider: HeaderSyncGapProvider + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, - EF: ExecutorFactory, + E: BlockExecutorProvider, DB: Database + 'static, { fn builder(self) -> StageSetBuilder { @@ -269,7 +255,11 @@ impl OfflineStages { } } -impl StageSet for OfflineStages { +impl StageSet for OfflineStages +where + E: BlockExecutorProvider, + DB: Database, +{ fn builder(self) -> StageSetBuilder { ExecutionStages::new(self.executor_factory) .builder() @@ -281,23 +271,27 @@ impl StageSet for OfflineStages { /// A set containing all stages that are required to execute pre-existing block data. #[derive(Debug)] #[non_exhaustive] -pub struct ExecutionStages { +pub struct ExecutionStages { /// Executor factory that will create executors. - executor_factory: EF, + executor_factory: E, } -impl ExecutionStages { +impl ExecutionStages { /// Create a new set of execution stages with default values. - pub fn new(executor_factory: EF) -> Self { + pub fn new(executor_factory: E) -> Self { Self { executor_factory } } } -impl StageSet for ExecutionStages { +impl StageSet for ExecutionStages +where + DB: Database, + E: BlockExecutorProvider, +{ fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(SenderRecoveryStage::default()) - .add_stage(ExecutionStage::new_with_factory(self.executor_factory)) + .add_stage(ExecutionStage::new_with_executor(self.executor_factory)) } } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 1771e2570d6dd..0db907211ddda 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -3,6 +3,7 @@ use num_traits::Zero; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; +use reth_evm::execute::{BatchBlockExecutionOutput, BatchExecutor, BlockExecutorProvider}; use reth_exex::{ExExManagerHandle, ExExNotification}; use reth_primitives::{ stage::{ @@ -12,9 +13,10 @@ use reth_primitives::{ }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - BlockReader, Chain, DatabaseProviderRW, ExecutorFactory, HeaderProvider, + BlockReader, BundleStateWithReceipts, Chain, DatabaseProviderRW, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, TransactionVariant, }; +use reth_revm::database::StateProviderDatabase; use reth_stages_api::{ BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, UnwindInput, UnwindOutput, @@ -59,10 +61,10 @@ use tracing::*; /// to [tables::PlainStorageState] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] -pub struct ExecutionStage { +pub struct ExecutionStage { metrics_tx: Option, - /// The stage's internal executor - executor_factory: EF, + /// The stage's internal block executor + executor_provider: E, /// The commit thresholds of the execution stage. thresholds: ExecutionStageThresholds, /// The highest threshold (in number of blocks) for switching between incremental @@ -76,10 +78,10 @@ pub struct ExecutionStage { exex_manager_handle: ExExManagerHandle, } -impl ExecutionStage { +impl ExecutionStage { /// Create new execution stage with specified config. pub fn new( - executor_factory: EF, + executor_provider: E, thresholds: ExecutionStageThresholds, external_clean_threshold: u64, prune_modes: PruneModes, @@ -88,19 +90,19 @@ impl ExecutionStage { Self { metrics_tx: None, external_clean_threshold, - executor_factory, + executor_provider, thresholds, prune_modes, exex_manager_handle, } } - /// Create an execution stage with the provided executor factory. + /// Create an execution stage with the provided executor. /// /// The commit threshold will be set to 10_000. - pub fn new_with_factory(executor_factory: EF) -> Self { + pub fn new_with_executor(executor_provider: E) -> Self { Self::new( - executor_factory, + executor_provider, ExecutionStageThresholds::default(), MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::none(), @@ -144,7 +146,10 @@ impl ExecutionStage { } } -impl ExecutionStage { +impl ExecutionStage +where + E: BlockExecutorProvider, +{ /// Execute the stage. pub fn execute_inner( &mut self, @@ -169,12 +174,11 @@ impl ExecutionStage { None }; - // Build executor - let mut executor = self.executor_factory.with_state(LatestStateProviderRef::new( + let db = StateProviderDatabase(LatestStateProviderRef::new( provider.tx_ref(), provider.static_file_provider().clone(), )); - executor.set_prune_modes(prune_modes); + let mut executor = self.executor_provider.batch_executor(db, prune_modes); executor.set_tip(max_block); // Progress tracking @@ -213,7 +217,8 @@ impl ExecutionStage { // Execute the block let execute_start = Instant::now(); - executor.execute_and_verify_receipt(&block, td).map_err(|error| StageError::Block { + + executor.execute_one((&block, td).into()).map_err(|error| StageError::Block { block: Box::new(block.header.clone().seal_slow()), error: BlockErrorKind::Execution(error), })?; @@ -245,7 +250,8 @@ impl ExecutionStage { } } let time = Instant::now(); - let state = executor.take_output_state(); + let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); + let state = BundleStateWithReceipts::new(bundle, receipts, first_block); let write_preparation_duration = time.elapsed(); // Check if we should send a [`ExExNotification`] to execution extensions. @@ -383,7 +389,11 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -impl Stage for ExecutionStage { +impl Stage for ExecutionStage +where + DB: Database, + E: BlockExecutorProvider, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::Execution @@ -609,7 +619,7 @@ mod tests { use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_db::{models::AccountBeforeTx, transaction::DbTxMut}; - use reth_evm_ethereum::EthEvmConfig; + use reth_evm_ethereum::execute::EthExecutorProvider; use reth_interfaces::executor::BlockValidationError; use reth_primitives::{ address, hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Address, @@ -620,16 +630,14 @@ mod tests { test_utils::create_test_provider_factory, AccountReader, ReceiptProvider, StaticFileProviderFactory, }; - use reth_revm::EvmProcessorFactory; use std::collections::BTreeMap; - fn stage() -> ExecutionStage> { - let executor_factory = EvmProcessorFactory::new( - Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build()), - EthEvmConfig::default(), - ); + fn stage() -> ExecutionStage { + let executor_provider = EthExecutorProvider::ethereum(Arc::new( + ChainSpecBuilder::mainnet().berlin_activated().build(), + )); ExecutionStage::new( - executor_factory, + executor_provider, ExecutionStageThresholds { max_blocks: Some(100), max_changes: None, @@ -864,7 +872,7 @@ mod tests { mode.receipts_log_filter = random_filter.clone(); } - let mut execution_stage: ExecutionStage> = stage(); + let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); let output = execution_stage.execute(&provider, input).unwrap(); diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index a40da1c496365..7bb88ff96e47b 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -50,7 +50,7 @@ mod tests { transaction::{DbTx, DbTxMut}, AccountsHistory, DatabaseEnv, }; - use reth_evm_ethereum::EthEvmConfig; + use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex::ExExManagerHandle; use reth_interfaces::test_utils::generators::{self, random_block}; use reth_primitives::{ @@ -61,7 +61,6 @@ mod tests { providers::StaticFileWriter, AccountExtReader, ProviderFactory, ReceiptProvider, StorageReader, }; - use reth_revm::EvmProcessorFactory; use reth_stages_api::{ExecInput, Stage}; use std::sync::Arc; @@ -140,10 +139,9 @@ mod tests { // Check execution and create receipts and changesets according to the pruning // configuration let mut execution_stage = ExecutionStage::new( - EvmProcessorFactory::new( - Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build()), - EthEvmConfig::default(), - ), + EthExecutorProvider::ethereum(Arc::new( + ChainSpecBuilder::mainnet().berlin_activated().build(), + )), ExecutionStageThresholds { max_blocks: Some(100), max_changes: None, diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index baf5fa5977318..a57f18f114ed7 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -4,6 +4,7 @@ use reth_db::{ tables, transaction::{DbTx, DbTxMut}, }; +use reth_evm::execute::BatchBlockExecutionOutput; use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ logs_bloom, @@ -34,6 +35,22 @@ pub struct BundleStateWithReceipts { first_block: BlockNumber, } +// TODO(mattsse): unify the types, currently there's a cyclic dependency between +impl From for BundleStateWithReceipts { + fn from(value: BatchBlockExecutionOutput) -> Self { + let BatchBlockExecutionOutput { bundle, receipts, first_block } = value; + Self { bundle, receipts, first_block } + } +} + +// TODO(mattsse): unify the types, currently there's a cyclic dependency between +impl From for BatchBlockExecutionOutput { + fn from(value: BundleStateWithReceipts) -> Self { + let BundleStateWithReceipts { bundle, receipts, first_block } = value; + Self { bundle, receipts, first_block } + } +} + /// Type used to initialize revms bundle state. pub type BundleStateInit = HashMap, Option, HashMap)>; diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 31edf4f039528..d2c016add2f6c 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -19,7 +19,7 @@ use reth::{ }; use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{EthEvmConfig, EthereumNode}; +use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; use reth_primitives::{Chain, ChainSpec, Genesis, Header, Transaction}; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; @@ -106,7 +106,7 @@ impl ConfigureEvm for MyEvmConfig { } } -/// A regular ethereum evm and executor builder. +/// Builds a regular ethereum block executor that uses the custom EVM. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] pub struct MyExecutorBuilder; @@ -116,9 +116,16 @@ where Node: FullNodeTypes, { type EVM = MyEvmConfig; - - async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { - Ok(MyEvmConfig::default()) + type Executor = EthExecutorProvider; + + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + Ok(( + MyEvmConfig::default(), + EthExecutorProvider::new(ctx.chain_spec(), MyEvmConfig::default()), + )) } } diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index 3f219322710bb..2584c42d67c5b 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -22,12 +22,12 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-stages.workspace = true reth-interfaces.workspace = true reth-revm.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true alloy-rlp.workspace = true -tokio = "1.28.1" +tokio.workspace = true walkdir = "2.3.3" -serde = "1.0.163" +serde.workspace = true serde_json.workspace = true thiserror.workspace = true rayon.workspace = true diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 424603cb4c219..27f62f886906c 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -7,7 +7,6 @@ use crate::{ use alloy_rlp::Decodable; use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; use reth_provider::{providers::StaticFileWriter, HashingWriter, ProviderFactory}; use reth_stages::{stages::ExecutionStage, ExecInput, Stage}; @@ -136,10 +135,11 @@ impl Case for BlockchainTestCase { // Execute the execution stage using the EVM processor factory for the test case // network. - let _ = ExecutionStage::new_with_factory(reth_revm::EvmProcessorFactory::new( - Arc::new(case.network.clone().into()), - EthEvmConfig::default(), - )) + let _ = ExecutionStage::new_with_executor( + reth_evm_ethereum::execute::EthExecutorProvider::ethereum(Arc::new( + case.network.clone().into(), + )), + ) .execute( &provider, ExecInput { target: last_block.as_ref().map(|b| b.number), checkpoint: None }, From 90f3161256f2dbfafedbc4f71266887ecfd41116 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 May 2024 14:15:04 +0200 Subject: [PATCH 461/700] chore: remove InspectorStack (#8073) --- Cargo.lock | 1 - crates/ethereum/evm/src/execute.rs | 39 +---- crates/node-core/src/args/debug.rs | 33 +--- crates/node-ethereum/src/node.rs | 3 +- crates/node/builder/Cargo.toml | 1 - crates/node/builder/src/builder/mod.rs | 23 --- crates/optimism/evm/src/execute.rs | 39 +---- crates/optimism/node/src/node.rs | 3 +- crates/revm/src/lib.rs | 5 - crates/revm/src/stack.rs | 202 ------------------------- 10 files changed, 15 insertions(+), 334 deletions(-) delete mode 100644 crates/revm/src/stack.rs diff --git a/Cargo.lock b/Cargo.lock index b2e179a955bc2..a3f0450ab57f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7278,7 +7278,6 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune", - "reth-revm", "reth-rpc", "reth-rpc-engine-api", "reth-stages", diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index c3dd315f74381..b65e7be17470d 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -20,7 +20,6 @@ use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, }; @@ -36,7 +35,6 @@ use tracing::debug; pub struct EthExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, - inspector: Option, } impl EthExecutorProvider { @@ -54,13 +52,7 @@ impl EthExecutorProvider { impl EthExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None } - } - - /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { chain_spec, evm_config } } } @@ -78,7 +70,6 @@ where self.evm_config.clone(), State::builder().with_database(db).with_bundle_update().without_state_clear().build(), ) - .with_inspector(self.inspector.clone()) } } @@ -221,20 +212,12 @@ pub struct EthBlockExecutor { executor: EthEvmExecutor, /// The state to use for execution state: State, - /// Optional inspector stack for debugging - inspector: Option, } impl EthBlockExecutor { /// Creates a new Ethereum block executor. pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: EthEvmExecutor { chain_spec, evm_config }, state, inspector: None } - } - - /// Sets the inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { executor: EthEvmExecutor { chain_spec, evm_config }, state } } #[inline] @@ -292,19 +275,9 @@ where let env = self.evm_env_for_block(&block.header, total_difficulty); let (receipts, gas_used) = { - if let Some(inspector) = self.inspector.as_mut() { - let evm = self.executor.evm_config.evm_with_env_and_inspector( - &mut self.state, - env, - inspector, - ); - self.executor.execute_pre_and_transactions(block, evm)? - } else { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - - self.executor.execute_pre_and_transactions(block, evm)? - } - }; + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + self.executor.execute_pre_and_transactions(block, evm) + }?; // 3. apply post execution changes self.post_execution(block, total_difficulty)?; @@ -507,7 +480,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { - EthExecutorProvider { chain_spec, evm_config: Default::default(), inspector: None } + EthExecutorProvider { chain_spec, evm_config: Default::default() } } #[test] diff --git a/crates/node-core/src/args/debug.rs b/crates/node-core/src/args/debug.rs index 3eda71ad0a7d2..d1c4e9b738546 100644 --- a/crates/node-core/src/args/debug.rs +++ b/crates/node-core/src/args/debug.rs @@ -1,7 +1,7 @@ //! clap [Args](clap::Args) for debugging purposes use clap::Args; -use reth_primitives::{TxHash, B256}; +use reth_primitives::B256; use std::path::PathBuf; /// Parameters for debugging purposes @@ -28,37 +28,6 @@ pub struct DebugArgs { #[arg(long = "debug.max-block", help_heading = "Debug")] pub max_block: Option, - /// Print opcode level traces directly to console during execution. - #[arg(long = "debug.print-inspector", help_heading = "Debug")] - pub print_inspector: bool, - - /// Hook on a specific block during execution. - #[arg( - long = "debug.hook-block", - help_heading = "Debug", - conflicts_with = "hook_transaction", - conflicts_with = "hook_all" - )] - pub hook_block: Option, - - /// Hook on a specific transaction during execution. - #[arg( - long = "debug.hook-transaction", - help_heading = "Debug", - conflicts_with = "hook_block", - conflicts_with = "hook_all" - )] - pub hook_transaction: Option, - - /// Hook on every transaction in a block. - #[arg( - long = "debug.hook-all", - help_heading = "Debug", - conflicts_with = "hook_block", - conflicts_with = "hook_transaction" - )] - pub hook_all: bool, - /// If provided, the engine will skip `n` consecutive FCUs. #[arg(long = "debug.skip-fcu", help_heading = "Debug")] pub skip_fcu: Option, diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 235130b426348..87bc54d15a0ce 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -85,8 +85,7 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); let evm_config = EthEvmConfig::default(); - let executor = - EthExecutorProvider::new(chain_spec, evm_config).with_inspector(ctx.inspector_stack()); + let executor = EthExecutorProvider::new(chain_spec, evm_config); Ok((evm_config, executor)) } diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 68c1d5f0c3085..26635e536deb9 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -19,7 +19,6 @@ reth-blockchain-tree.workspace = true reth-exex.workspace = true reth-evm.workspace = true reth-provider.workspace = true -reth-revm.workspace = true reth-db.workspace = true reth-rpc-engine-api.workspace = true reth-rpc.workspace = true diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 0457bbe3e9310..b6f0a191e3e53 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -27,7 +27,6 @@ use reth_node_core::{ }; use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, ChainSpec}; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; -use reth_revm::stack::{InspectorStack, InspectorStackConfig}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; pub use states::*; @@ -461,28 +460,6 @@ impl BuilderContext { &self.config } - /// Returns an inspector stack if configured. - /// - /// This can be used to debug block execution. - pub fn inspector_stack(&self) -> Option { - use reth_revm::stack::Hook; - let stack_config = InspectorStackConfig { - use_printer_tracer: self.config.debug.print_inspector, - hook: if let Some(hook_block) = self.config.debug.hook_block { - Hook::Block(hook_block) - } else if let Some(tx) = self.config.debug.hook_transaction { - Hook::Transaction(tx) - } else if self.config.debug.hook_all { - Hook::All - } else { - // no inspector - return None - }, - }; - - Some(InspectorStack::new(stack_config)) - } - /// Returns the data dir of the node. /// /// This gives access to all relevant files and directories of the node's datadir. diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 2ea32782c7f06..d19d441a8c06d 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -19,7 +19,6 @@ use reth_primitives::{ use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, }; @@ -35,7 +34,6 @@ use tracing::{debug, trace}; pub struct OpExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, - inspector: Option, } impl OpExecutorProvider { @@ -48,13 +46,7 @@ impl OpExecutorProvider { impl OpExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None } - } - - /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { chain_spec, evm_config } } } @@ -72,7 +64,6 @@ where self.evm_config.clone(), State::builder().with_database(db).with_bundle_update().without_state_clear().build(), ) - .with_inspector(self.inspector.clone()) } } @@ -268,20 +259,12 @@ pub struct OpBlockExecutor { executor: OpEvmExecutor, /// The state to use for execution state: State, - /// Optional inspector stack for debugging - inspector: Option, } impl OpBlockExecutor { /// Creates a new Ethereum block executor. pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, inspector: None } - } - - /// Sets the inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { executor: OpEvmExecutor { chain_spec, evm_config }, state } } #[inline] @@ -337,19 +320,9 @@ where let env = self.evm_env_for_block(&block.header, total_difficulty); let (receipts, gas_used) = { - if let Some(inspector) = self.inspector.as_mut() { - let evm = self.executor.evm_config.evm_with_env_and_inspector( - &mut self.state, - env, - inspector, - ); - self.executor.execute_pre_and_transactions(block, evm)? - } else { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - - self.executor.execute_pre_and_transactions(block, evm)? - } - }; + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + self.executor.execute_pre_and_transactions(block, evm) + }?; // 3. apply post execution changes self.post_execution(block, total_difficulty)?; @@ -548,7 +521,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { chain_spec, evm_config: Default::default(), inspector: None } + OpExecutorProvider { chain_spec, evm_config: Default::default() } } #[test] diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index a2cbc287cddb6..7d715fecef1ff 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -105,8 +105,7 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); let evm_config = OptimismEvmConfig::default(); - let executor = - OpExecutorProvider::new(chain_spec, evm_config).with_inspector(ctx.inspector_stack()); + let executor = OpExecutorProvider::new(chain_spec, evm_config); Ok((evm_config, executor)) } diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index d8c5761d03ad6..375b230abbbb7 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -19,11 +19,6 @@ pub mod state_change; /// Ethereum DAO hardfork state change data. pub mod eth_dao_fork; -/// An inspector stack abstracting the implementation details of -/// each inspector and allowing to hook on block/transaction execution, -/// used in the main Reth executor. -pub mod stack; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/revm/src/stack.rs b/crates/revm/src/stack.rs deleted file mode 100644 index 8f8bfa5ce0ddc..0000000000000 --- a/crates/revm/src/stack.rs +++ /dev/null @@ -1,202 +0,0 @@ -use revm::{ - inspectors::CustomPrintTracer, - interpreter::{CallInputs, CallOutcome, CreateInputs, CreateOutcome, Interpreter}, - primitives::{Address, Env, Log, B256, U256}, - Database, EvmContext, Inspector, -}; -use std::fmt::Debug; - -/// A hook to inspect the execution of the EVM. -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] -pub enum Hook { - /// No hook. - #[default] - None, - /// Hook on a specific block. - Block(u64), - /// Hook on a specific transaction hash. - Transaction(B256), - /// Hooks on every transaction in a block. - All, -} - -impl Hook { - /// Returns `true` if this hook should be used. - #[inline] - pub fn is_enabled(&self, block_number: u64, tx_hash: &B256) -> bool { - match self { - Hook::None => false, - Hook::Block(block) => block_number == *block, - Hook::Transaction(hash) => hash == tx_hash, - Hook::All => true, - } - } -} - -/// An inspector that calls multiple inspectors in sequence. -#[derive(Clone, Default)] -pub struct InspectorStack { - /// An inspector that prints the opcode traces to the console. - pub custom_print_tracer: Option, - /// The provided hook - pub hook: Hook, -} - -impl Debug for InspectorStack { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("InspectorStack") - .field("custom_print_tracer", &self.custom_print_tracer.is_some()) - .field("hook", &self.hook) - .finish() - } -} - -impl InspectorStack { - /// Creates a new inspector stack with the given configuration. - #[inline] - pub fn new(config: InspectorStackConfig) -> Self { - Self { - hook: config.hook, - custom_print_tracer: config.use_printer_tracer.then(Default::default), - } - } - - /// Returns `true` if this inspector should be used. - #[inline] - pub fn should_inspect(&self, env: &Env, tx_hash: &B256) -> bool { - self.custom_print_tracer.is_some() && - self.hook.is_enabled(env.block.number.saturating_to(), tx_hash) - } -} - -/// Configuration for the inspectors. -#[derive(Clone, Copy, Debug, Default)] -pub struct InspectorStackConfig { - /// Enable revm inspector printer. - /// In execution this will print opcode level traces directly to console. - pub use_printer_tracer: bool, - - /// Hook on a specific block or transaction. - pub hook: Hook, -} - -/// Helper macro to call the same method on multiple inspectors without resorting to dynamic -/// dispatch. -#[macro_export] -macro_rules! call_inspectors { - ([$($inspector:expr),+ $(,)?], |$id:ident $(,)?| $call:expr $(,)?) => {{$( - if let Some($id) = $inspector { - $call - } - )+}} -} - -impl Inspector for InspectorStack -where - DB: Database, -{ - #[inline] - fn initialize_interp(&mut self, interp: &mut Interpreter, context: &mut EvmContext) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.initialize_interp(interp, context); - }); - } - - #[inline] - fn step(&mut self, interp: &mut Interpreter, context: &mut EvmContext) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.step(interp, context); - }); - } - - #[inline] - fn step_end(&mut self, interp: &mut Interpreter, context: &mut EvmContext) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.step_end(interp, context); - }); - } - - #[inline] - fn log(&mut self, context: &mut EvmContext, log: &Log) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.log(context, log); - }); - } - - #[inline] - fn call( - &mut self, - context: &mut EvmContext, - inputs: &mut CallInputs, - ) -> Option { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - if let Some(outcome) = inspector.call(context, inputs) { - return Some(outcome) - } - }); - - None - } - - #[inline] - fn call_end( - &mut self, - context: &mut EvmContext, - inputs: &CallInputs, - outcome: CallOutcome, - ) -> CallOutcome { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - let new_ret = inspector.call_end(context, inputs, outcome.clone()); - - // If the inspector returns a different ret or a revert with a non-empty message, - // we assume it wants to tell us something - if new_ret != outcome { - return new_ret - } - }); - - outcome - } - - #[inline] - fn create( - &mut self, - context: &mut EvmContext, - inputs: &mut CreateInputs, - ) -> Option { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - if let Some(out) = inspector.create(context, inputs) { - return Some(out) - } - }); - - None - } - - #[inline] - fn create_end( - &mut self, - context: &mut EvmContext, - inputs: &CreateInputs, - outcome: CreateOutcome, - ) -> CreateOutcome { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - let new_ret = inspector.create_end(context, inputs, outcome.clone()); - - // If the inspector returns a different ret or a revert with a non-empty message, - // we assume it wants to tell us something - if new_ret != outcome { - return new_ret - } - }); - - outcome - } - - #[inline] - fn selfdestruct(&mut self, contract: Address, target: Address, value: U256) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - Inspector::::selfdestruct(inspector, contract, target, value); - }); - } -} From f20e4cbad8ae82aed527ecd809b1e9a553f46e6a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 3 May 2024 14:38:26 +0200 Subject: [PATCH 462/700] debt(discv5): discv5 integration into network (#8065) --- bin/reth/src/commands/p2p/mod.rs | 6 +++++- crates/net/discv4/src/lib.rs | 10 ---------- crates/net/discv5/src/config.rs | 12 +++++++++++- crates/net/discv5/src/lib.rs | 5 +++-- crates/net/network/src/config.rs | 16 ---------------- crates/node-core/src/args/network.rs | 14 +++----------- crates/node-core/src/node_config.rs | 7 ++++++- 7 files changed, 28 insertions(+), 42 deletions(-) diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index 1cc5d4f880c37..18cc6aba8304a 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -126,6 +126,7 @@ impl Command { let mut network_config_builder = config .network_config(self.nat, None, p2p_secret_key) .chain_spec(self.chain.clone()) + .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .boot_nodes(self.chain.bootnodes().unwrap_or_default()); network_config_builder = self.discovery.apply_to_builder(network_config_builder); @@ -136,7 +137,10 @@ impl Command { data_dir.static_files(), )?)); - if self.discovery.enable_discv5_discovery { + if !self.discovery.disable_discovery && + (self.discovery.enable_discv5_discovery || + network_config.chain_spec.chain.is_optimism()) + { network_config = network_config.discovery_v5_with_config_builder(|builder| { let DiscoveryArgs { discv5_addr, diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 9a0cb9c11a4b0..77cc309ebf93d 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -94,16 +94,6 @@ pub const DEFAULT_DISCOVERY_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); /// Note: the default TCP port is the same. pub const DEFAULT_DISCOVERY_PORT: u16 = 30303; -/// The default address for discv5 via UDP. -/// -/// Note: the default TCP address is the same. -pub const DEFAULT_DISCOVERY_V5_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - -/// The default port for discv5 via UDP. -/// -/// Default is port 9000. -pub const DEFAULT_DISCOVERY_V5_PORT: u16 = 9000; - /// The default address for discv4 via UDP: "0.0.0.0:30303" /// /// Note: The default TCP address is the same. diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 05c2863c85572..da7e58cb7f61a 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -3,7 +3,7 @@ use std::{ collections::HashSet, fmt::Debug, - net::{IpAddr, SocketAddr}, + net::{IpAddr, Ipv4Addr, SocketAddr}, }; use derive_more::Display; @@ -13,6 +13,16 @@ use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord}; use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId}; +/// The default address for discv5 via UDP. +/// +/// Default is 0.0.0.0, all interfaces. See [`discv5::ListenConfig`] default. +pub const DEFAULT_DISCOVERY_V5_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + +/// The default port for discv5 via UDP. +/// +/// Default is port 9000. See [`discv5::ListenConfig`] default. +pub const DEFAULT_DISCOVERY_V5_PORT: u16 = 9000; + /// Default interval in seconds at which to run a lookup up query. /// /// Default is 60 seconds. diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index e9bc79dce91a7..a5ac1d808f345 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -38,8 +38,9 @@ pub mod network_stack_id; pub use discv5::{self, IpMode}; pub use config::{ - BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, - DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, + BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR, + DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, + DEFAULT_SECONDS_LOOKUP_INTERVAL, }; pub use enr::enr_to_discv4_id; pub use error::Error; diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 9e898014fe539..40d88f991e53c 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -176,8 +176,6 @@ pub struct NetworkConfigBuilder { dns_discovery_config: Option, /// How to set up discovery version 4. discovery_v4_builder: Option, - /// Whether to enable discovery version 5. Disabled by default. - enable_discovery_v5: bool, /// All boot nodes to start network discovery with. boot_nodes: HashSet, /// Address to use for discovery @@ -220,7 +218,6 @@ impl NetworkConfigBuilder { secret_key, dns_discovery_config: Some(Default::default()), discovery_v4_builder: Some(Default::default()), - enable_discovery_v5: false, boot_nodes: Default::default(), discovery_addr: None, listener_addr: None, @@ -353,12 +350,6 @@ impl NetworkConfigBuilder { self } - /// Allows discv5 discovery. - pub fn discovery_v5(mut self) -> Self { - self.enable_discovery_v5 = true; - self - } - /// Sets the dns discovery config to use. pub fn dns_discovery(mut self, config: DnsDiscoveryConfig) -> Self { self.dns_discovery_config = Some(config); @@ -407,12 +398,6 @@ impl NetworkConfigBuilder { self } - /// Enable the Discv5 discovery. - pub fn enable_discv5_discovery(mut self) -> Self { - self.enable_discovery_v5 = true; - self - } - /// Disable the DNS discovery if the given condition is true. pub fn disable_dns_discovery_if(self, disable: bool) -> Self { if disable { @@ -469,7 +454,6 @@ impl NetworkConfigBuilder { secret_key, mut dns_discovery_config, discovery_v4_builder, - enable_discovery_v5: _, boot_nodes, discovery_addr, listener_addr, diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index df6f8ece8a54d..0d5206e7f77c6 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -3,13 +3,10 @@ use crate::version::P2P_CLIENT_VERSION; use clap::Args; use reth_config::Config; -use reth_discv4::{ - DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT, DEFAULT_DISCOVERY_V5_ADDR, - DEFAULT_DISCOVERY_V5_PORT, -}; +use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; use reth_discv5::{ - DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, - DEFAULT_SECONDS_LOOKUP_INTERVAL, + DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR, DEFAULT_DISCOVERY_V5_PORT, + DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; use reth_net_nat::NatResolver; use reth_network::{ @@ -272,11 +269,6 @@ impl DiscoveryArgs { network_config_builder = network_config_builder.disable_discv4_discovery(); } - if !self.disable_discovery && (self.enable_discv5_discovery || cfg!(feature = "optimism")) { - network_config_builder = network_config_builder.disable_discv4_discovery(); - network_config_builder = network_config_builder.enable_discv5_discovery(); - } - network_config_builder } diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 3f149a824c5b3..a4301b8046a79 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -462,6 +462,7 @@ impl NodeConfig { // set discovery port based on instance number self.network.port + self.instance - 1, )) + .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .discovery_addr(SocketAddr::new( self.network.discovery.addr, // set discovery port based on instance number @@ -470,9 +471,13 @@ impl NodeConfig { let config = cfg_builder.build(client); - if !self.network.discovery.enable_discv5_discovery { + if self.network.discovery.disable_discovery || + !self.network.discovery.enable_discv5_discovery && + !config.chain_spec.chain.is_optimism() + { return config } + // work around since discv5 config builder can't be integrated into network config builder // due to unsatisfied trait bounds config.discovery_v5_with_config_builder(|builder| { From 43599f983c1a13c90a3dfc9a8032c7ce7f9e9306 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 May 2024 14:52:00 +0200 Subject: [PATCH 463/700] chore: move node-ethereum to ethereum/node (#8076) --- Cargo.toml | 4 ++-- crates/{node-ethereum => ethereum/node}/Cargo.toml | 0 crates/{node-ethereum => ethereum/node}/src/evm.rs | 0 crates/{node-ethereum => ethereum/node}/src/lib.rs | 0 crates/{node-ethereum => ethereum/node}/src/node.rs | 0 .../node}/tests/assets/genesis.json | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/blobs.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/dev.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/eth.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/main.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/p2p.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/utils.rs | 0 crates/{node-ethereum => ethereum/node}/tests/it/builder.rs | 0 crates/{node-ethereum => ethereum/node}/tests/it/exex.rs | 0 crates/{node-ethereum => ethereum/node}/tests/it/main.rs | 0 15 files changed, 2 insertions(+), 2 deletions(-) rename crates/{node-ethereum => ethereum/node}/Cargo.toml (100%) rename crates/{node-ethereum => ethereum/node}/src/evm.rs (100%) rename crates/{node-ethereum => ethereum/node}/src/lib.rs (100%) rename crates/{node-ethereum => ethereum/node}/src/node.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/assets/genesis.json (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/blobs.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/dev.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/eth.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/main.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/p2p.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/utils.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/it/builder.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/it/exex.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/it/main.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 28b0692dd7f22..12b31162d47b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,7 +49,7 @@ members = [ "crates/rpc/rpc-types-compat/", "crates/engine-primitives/", "crates/ethereum/engine-primitives/", - "crates/node-ethereum/", + "crates/ethereum/node", "crates/node/builder/", "crates/optimism/consensus", "crates/optimism/node/", @@ -225,7 +225,7 @@ reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node/builder" } -reth-node-ethereum = { path = "crates/node-ethereum" } +reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-optimism = { path = "crates/optimism/node" } reth-evm-optimism = { path = "crates/optimism/evm" } reth-node-core = { path = "crates/node-core" } diff --git a/crates/node-ethereum/Cargo.toml b/crates/ethereum/node/Cargo.toml similarity index 100% rename from crates/node-ethereum/Cargo.toml rename to crates/ethereum/node/Cargo.toml diff --git a/crates/node-ethereum/src/evm.rs b/crates/ethereum/node/src/evm.rs similarity index 100% rename from crates/node-ethereum/src/evm.rs rename to crates/ethereum/node/src/evm.rs diff --git a/crates/node-ethereum/src/lib.rs b/crates/ethereum/node/src/lib.rs similarity index 100% rename from crates/node-ethereum/src/lib.rs rename to crates/ethereum/node/src/lib.rs diff --git a/crates/node-ethereum/src/node.rs b/crates/ethereum/node/src/node.rs similarity index 100% rename from crates/node-ethereum/src/node.rs rename to crates/ethereum/node/src/node.rs diff --git a/crates/node-ethereum/tests/assets/genesis.json b/crates/ethereum/node/tests/assets/genesis.json similarity index 100% rename from crates/node-ethereum/tests/assets/genesis.json rename to crates/ethereum/node/tests/assets/genesis.json diff --git a/crates/node-ethereum/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/blobs.rs rename to crates/ethereum/node/tests/e2e/blobs.rs diff --git a/crates/node-ethereum/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/dev.rs rename to crates/ethereum/node/tests/e2e/dev.rs diff --git a/crates/node-ethereum/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/eth.rs rename to crates/ethereum/node/tests/e2e/eth.rs diff --git a/crates/node-ethereum/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/main.rs rename to crates/ethereum/node/tests/e2e/main.rs diff --git a/crates/node-ethereum/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/p2p.rs rename to crates/ethereum/node/tests/e2e/p2p.rs diff --git a/crates/node-ethereum/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/utils.rs rename to crates/ethereum/node/tests/e2e/utils.rs diff --git a/crates/node-ethereum/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs similarity index 100% rename from crates/node-ethereum/tests/it/builder.rs rename to crates/ethereum/node/tests/it/builder.rs diff --git a/crates/node-ethereum/tests/it/exex.rs b/crates/ethereum/node/tests/it/exex.rs similarity index 100% rename from crates/node-ethereum/tests/it/exex.rs rename to crates/ethereum/node/tests/it/exex.rs diff --git a/crates/node-ethereum/tests/it/main.rs b/crates/ethereum/node/tests/it/main.rs similarity index 100% rename from crates/node-ethereum/tests/it/main.rs rename to crates/ethereum/node/tests/it/main.rs From 1cf65e339478c497309b43396672748a5df869cf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 3 May 2024 15:23:16 +0200 Subject: [PATCH 464/700] feat(op): pass unverifiable ENRs to rlpx (#8059) --- crates/net/discv5/src/lib.rs | 58 +++++++++++++++++++++++++----------- 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index a5ac1d808f345..8e156dde1bffe 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -237,19 +237,45 @@ impl Discv5 { None } discv5::Event::SessionEstablished(enr, remote_socket) => { - // covers `reth_discv4::DiscoveryUpdate` equivalents `DiscoveryUpdate::Added(_)` - // and `DiscoveryUpdate::DiscoveredAtCapacity(_) + // this branch is semantically similar to branches of + // `reth_discv4::DiscoveryUpdate`: `DiscoveryUpdate::Added(_)` and + // `DiscoveryUpdate::DiscoveredAtCapacity(_) // peer has been discovered as part of query, or, by incoming session (peer has // discovered us) - self.metrics.discovered_peers_advertised_networks.increment_once_by_network_type(&enr); - self.metrics.discovered_peers.increment_established_sessions_raw(1); self.on_discovered_peer(&enr, remote_socket) } - _ => None, + discv5::Event::UnverifiableEnr { + enr, + socket, + node_id: _, + } => { + // this branch is semantically similar to branches of + // `reth_discv4::DiscoveryUpdate`: `DiscoveryUpdate::Added(_)` and + // `DiscoveryUpdate::DiscoveredAtCapacity(_) + + // peer has been discovered as part of query, or, by an outgoing session (but peer + // is behind NAT and responds from a different socket) + + // NOTE: `discv5::Discv5` won't initiate a session with any peer with an + // unverifiable node record, for example one that advertises a reserved LAN IP + // address on a WAN network. This is in order to prevent DoS attacks, where some + // malicious peers may advertise a victim's socket. We will still try and connect + // to them over RLPx, to be compatible with EL discv5 implementations that don't + // enforce this security measure. + + trace!(target: "net::discv5", + ?enr, + %socket, + "discovered unverifiable enr, source socket doesn't match socket advertised in ENR" + ); + + self.on_discovered_peer(&enr, socket) + } + _ => None } } @@ -259,10 +285,12 @@ impl Discv5 { enr: &discv5::Enr, socket: SocketAddr, ) -> Option { + self.metrics.discovered_peers_advertised_networks.increment_once_by_network_type(enr); + let node_record = match self.try_into_reachable(enr, socket) { Ok(enr_bc) => enr_bc, Err(err) => { - trace!(target: "net::discovery::discv5", + trace!(target: "net::discv5", %err, ?enr, "discovered peer is unreachable" @@ -274,7 +302,7 @@ impl Discv5 { } }; if let FilterOutcome::Ignore { reason } = self.filter_discovered_peer(enr) { - trace!(target: "net::discovery::discv5", + trace!(target: "net::discv5", ?enr, reason, "filtered out discovered peer" @@ -290,7 +318,7 @@ impl Discv5 { .then(|| self.get_fork_id(enr).ok()) .flatten(); - trace!(target: "net::discovery::discv5", + trace!(target: "net::discv5", ?fork_id, ?enr, "discovered peer" @@ -300,11 +328,7 @@ impl Discv5 { } /// Tries to convert an [`Enr`](discv5::Enr) into the backwards compatible type [`NodeRecord`], - /// w.r.t. local [`IpMode`]. Tries the socket from which the ENR was sent, if socket is missing - /// from ENR. - /// - /// Note: [`discv5::Discv5`] won't initiate a session with any peer with a malformed node - /// record, that advertises a reserved IP address on a WAN network. + /// w.r.t. local [`IpMode`]. Uses source socket as udp socket. pub fn try_into_reachable( &self, enr: &discv5::Enr, @@ -312,8 +336,6 @@ impl Discv5 { ) -> Result { let id = enr_to_discv4_id(enr).ok_or(Error::IncompatibleKeyType)?; - let udp_socket = self.ip_mode().get_contactable_addr(enr).unwrap_or(socket); - // since we, on bootstrap, set tcp4 in local ENR for `IpMode::Dual`, we prefer tcp4 here // too let Some(tcp_port) = (match self.ip_mode() { @@ -323,7 +345,7 @@ impl Discv5 { return Err(Error::IpVersionMismatchRlpx(self.ip_mode())) }; - Ok(NodeRecord { address: udp_socket.ip(), tcp_port, udp_port: udp_socket.port(), id }) + Ok(NodeRecord { address: socket.ip(), tcp_port, udp_port: socket.port(), id }) } /// Applies filtering rules on an ENR. Returns [`Ok`](FilterOutcome::Ok) if peer should be @@ -620,7 +642,7 @@ pub async fn lookup( } #[cfg(test)] -mod tests { +mod test { use super::*; use ::enr::{CombinedKey, EnrKey}; use reth_primitives::MAINNET; @@ -674,7 +696,7 @@ mod tests { let (node_2, mut stream_2, _) = start_discovery_node(30355).await; let node_2_enr = node_2.with_discv5(|discv5| discv5.local_enr()); - trace!(target: "net::discovery::tests", + trace!(target: "net::discv5::test", node_1_node_id=format!("{:#}", node_1_enr.node_id()), node_2_node_id=format!("{:#}", node_2_enr.node_id()), "started nodes" From d9f4adc2ebb373220e681acad537a58251dfe214 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 3 May 2024 16:50:40 +0200 Subject: [PATCH 465/700] chore(deps): bump alloy, evm-inspectors (#8077) --- Cargo.lock | 136 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 32 ++++++------- 2 files changed, 84 insertions(+), 84 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3f0450ab57f5..1828b2a2faef3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "c-kzg", "serde", "sha2 0.10.8", @@ -161,9 +161,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ab339ca7b4ea9115f0578c941abc80a171edf8e5eadd01e6c4237b68db8083" +checksum = "545885d9b0b2c30fd344ae291439b4bfe59e48dd62fbc862f8503d98088967dc" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -180,11 +180,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "arbitrary", "c-kzg", "derive_more", @@ -212,10 +212,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "serde", "serde_json", ] @@ -233,9 +233,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44294729c145cf7ae65feab544b5b81fb2bb7e2fd060214842eb3989a1e9d882" +checksum = "786689872ec4e7d354810ab0dffd48bb40b838c047522eb031cbd47d15634849" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -246,7 +246,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", "serde", @@ -258,13 +258,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -275,9 +275,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "k256", "serde_json", @@ -289,9 +289,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c715249705afa1e32be79dabfd35e2ef0f1cc02ad2cf48c9d1e20026ee637b" +checksum = "525448f6afc1b70dd0f9d0a8145631bf2f5e434678ab23ab18409ca264cae6b3" dependencies = [ "alloy-rlp", "arbitrary", @@ -317,14 +317,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -367,7 +367,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -387,14 +387,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -427,24 +427,24 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "serde", ] [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -457,11 +457,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "serde", "serde_json", ] @@ -469,7 +469,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", "serde", @@ -489,7 +489,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", "async-trait", @@ -502,9 +502,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -518,9 +518,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef9a94a27345fb31e3fcb5f5e9f592bb4847493b07fa1e47dd9fde2222f2e28" +checksum = "89c80a2cb97e7aa48611cbb63950336f9824a174cdf670527cc6465078a26ea1" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -537,9 +537,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31fe73cd259527e24dc2dbfe64bc95e5ddfcd2b2731f670a11ff72b2be2c25b" +checksum = "c58894b58ac50979eeac6249661991ac40b9d541830d9a725f7714cc9ef08c23" dependencies = [ "alloy-json-abi", "const-hex", @@ -554,18 +554,18 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c8d6e74e4feeaa2bcfdecfd3da247ab53c67bd654ba1907270c32e02b142331" +checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ "winnow 0.6.7", ] [[package]] name = "alloy-sol-types" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afaffed78bfb17526375754931e045f96018aa810844b29c7aef823266dd4b4b" +checksum = "399287f68d1081ed8b1f4903c49687658b95b142207d7cb4ae2f4813915343ef" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -577,7 +577,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -595,7 +595,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -2951,7 +2951,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6567,8 +6567,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "arbitrary", "bytes", @@ -6773,9 +6773,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7512,8 +7512,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7778,10 +7778,10 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7808,7 +7808,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7914,7 +7914,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "reth-primitives", "secp256k1", ] @@ -8048,10 +8048,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=3d2077e#3d2077ee665046c256448a8bd90d8e93ea85de56" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=d15add2#d15add2614fc359025f43bd7ad6096719580ba81" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", @@ -9155,9 +9155,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70aba06097b6eda3c15f6eebab8a6339e121475bcf08bbe6758807e716c372a1" +checksum = "5aa0cefd02f532035d83cfec82647c6eb53140b0485220760e669f4bad489e36" dependencies = [ "paste", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 12b31162d47b5..0aca2afbbaaa8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,29 +282,29 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "3d2077e" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "d15add2" } # eth alloy-chains = "0.1.15" -alloy-primitives = "0.7.1" -alloy-dyn-abi = "0.7.1" -alloy-sol-types = "0.7.1" +alloy-primitives = "0.7.2" +alloy-dyn-abi = "0.7.2" +alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "af788af", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "ca54552" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "af788af" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } # misc auto_impl = "1" From 66f70838ae90cd6d031dfa058d7e5f6ed2bd9fad Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 May 2024 17:26:48 +0200 Subject: [PATCH 466/700] chore: move dao hardfork constants to evm-ethereum (#8078) --- .../src/eth_dao_fork.rs => ethereum/evm/src/dao_fork.rs} | 0 crates/ethereum/evm/src/execute.rs | 7 +++++-- crates/ethereum/evm/src/lib.rs | 3 +++ crates/revm/src/lib.rs | 3 --- 4 files changed, 8 insertions(+), 5 deletions(-) rename crates/{revm/src/eth_dao_fork.rs => ethereum/evm/src/dao_fork.rs} (100%) diff --git a/crates/revm/src/eth_dao_fork.rs b/crates/ethereum/evm/src/dao_fork.rs similarity index 100% rename from crates/revm/src/eth_dao_fork.rs rename to crates/ethereum/evm/src/dao_fork.rs diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index b65e7be17470d..ff3a4e76d0d85 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -1,6 +1,10 @@ //! Ethereum block executor. -use crate::{verify::verify_receipts, EthEvmConfig}; +use crate::{ + dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, + verify::verify_receipts, + EthEvmConfig, +}; use reth_evm::{ execute::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, @@ -19,7 +23,6 @@ use reth_primitives::{ use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, }; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 88621a66aa9d8..0c8506ff7cda5 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -18,6 +18,9 @@ use reth_revm::{Database, EvmBuilder}; pub mod execute; pub mod verify; +/// Ethereum DAO hardfork state change data. +pub mod dao_fork; + /// Ethereum-related EVM configuration. #[derive(Debug, Clone, Copy, Default)] #[non_exhaustive] diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 375b230abbbb7..7f950afb0f962 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -16,9 +16,6 @@ pub mod batch; /// State changes that are not related to transactions. pub mod state_change; -/// Ethereum DAO hardfork state change data. -pub mod eth_dao_fork; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; From ead753db4c96f5f9f0fd2867106d88ef6d312405 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 3 May 2024 18:12:03 +0200 Subject: [PATCH 467/700] fix(cli): debug merkle script (#8067) --- bin/reth/src/commands/debug_cmd/merkle.rs | 348 ++++++++++------------ 1 file changed, 152 insertions(+), 196 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 6d895fccf41a1..75ad1870f70d2 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -17,28 +17,25 @@ use reth_cli_runner::CliContext; use reth_config::Config; use reth_consensus::Consensus; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; -use reth_exex::ExExManagerHandle; +use reth_evm::execute::{BatchBlockExecutionOutput, BatchExecutor, BlockExecutorProvider}; use reth_interfaces::p2p::full_block::FullBlockClient; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_primitives::{ - fs, - stage::{StageCheckpoint, StageId}, - BlockHashOrNumber, ChainSpec, PruneModes, +use reth_primitives::{fs, stage::StageCheckpoint, BlockHashOrNumber, ChainSpec, PruneModes}; +use reth_provider::{ + BlockNumReader, BlockWriter, BundleStateWithReceipts, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, ProviderFactory, }; -use reth_provider::{BlockWriter, ProviderFactory, StageCheckpointReader}; +use reth_revm::database::StateProviderDatabase; use reth_stages::{ - stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, - StorageHashingStage, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, - }, + stages::{AccountHashingStage, MerkleStage, StorageHashingStage}, ExecInput, Stage, }; use reth_tasks::TaskExecutor; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; -use tracing::{debug, info, warn}; +use tracing::*; -/// `reth merkle-debug` command +/// `reth debug merkle` command #[derive(Debug, Parser)] pub struct Command { /// The path to the data dir for all reth files and subdirectories. @@ -140,6 +137,8 @@ impl Command { ) .await?; + let executor_provider = block_executor!(self.chain.clone()); + // Initialize the fetch client info!(target: "reth::cli", target_block_number=self.to, "Downloading tip of block range"); let fetch_client = network.fetch_client().await?; @@ -160,224 +159,181 @@ impl Command { let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); let block_range_client = FullBlockClient::new(fetch_client, consensus); - // get the execution checkpoint - let execution_checkpoint_block = - provider_rw.get_stage_checkpoint(StageId::Execution)?.unwrap_or_default().block_number; - assert!(execution_checkpoint_block < self.to, "Nothing to run"); + // get best block number + let best_block_number = provider_rw.best_block_number()?; + assert!(best_block_number < self.to, "Nothing to run"); // get the block range from the network - info!(target: "reth::cli", target_block_number=?self.to, "Downloading range of blocks"); - let block_range = block_range_client - .get_full_block_range(to_header.hash_slow(), self.to - execution_checkpoint_block) + let block_range = best_block_number..=self.to; + info!(target: "reth::cli", ?block_range, "Downloading range of blocks"); + let blocks = block_range_client + .get_full_block_range(to_header.hash_slow(), self.to - best_block_number) .await; - // recover senders - let blocks_with_senders = - block_range.into_iter().map(|block| block.try_seal_with_senders()); - - // insert the blocks - for senders_res in blocks_with_senders { - let sealed_block = match senders_res { - Ok(senders) => senders, - Err(err) => { - warn!(target: "reth::cli", "Error sealing block with senders: {err:?}. Skipping..."); - continue - } - }; - provider_rw.insert_block(sealed_block, None)?; - } - - // Check if any of hashing or merkle stages aren't on the same block number as - // Execution stage or have any intermediate progress. - let should_reset_stages = - [StageId::AccountHashing, StageId::StorageHashing, StageId::MerkleExecute] - .into_iter() - .map(|stage_id| provider_rw.get_stage_checkpoint(stage_id)) - .collect::, _>>()? - .into_iter() - .map(Option::unwrap_or_default) - .any(|checkpoint| { - checkpoint.block_number != execution_checkpoint_block || - checkpoint.stage_checkpoint.is_some() - }); - - let executor = block_executor!(self.chain.clone()); - let mut execution_stage = ExecutionStage::new( - executor, - ExecutionStageThresholds { - max_blocks: Some(1), - max_changes: None, - max_cumulative_gas: None, - max_duration: None, - }, - MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, - PruneModes::all(), - ExExManagerHandle::empty(), - ); + let mut td = provider_rw + .header_td_by_number(best_block_number)? + .ok_or(ProviderError::TotalDifficultyNotFound(best_block_number))?; let mut account_hashing_stage = AccountHashingStage::default(); let mut storage_hashing_stage = StorageHashingStage::default(); let mut merkle_stage = MerkleStage::default_execution(); - for block in execution_checkpoint_block + 1..=self.to { - tracing::trace!(target: "reth::cli", block, "Executing block"); - let progress = - if (!should_reset_stages || block > execution_checkpoint_block + 1) && block > 0 { - Some(block - 1) - } else { - None - }; - - execution_stage.execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: block.checked_sub(1).map(StageCheckpoint::new), - }, + for block in blocks.into_iter().rev() { + let block_number = block.number; + let sealed_block = block + .try_seal_with_senders() + .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; + trace!(target: "reth::cli", block_number, "Executing block"); + + provider_rw.insert_block(sealed_block.clone(), None)?; + + td += sealed_block.difficulty; + let mut executor = executor_provider.batch_executor( + StateProviderDatabase::new(LatestStateProviderRef::new( + provider_rw.tx_ref(), + provider_rw.static_file_provider().clone(), + )), + PruneModes::none(), + ); + executor.execute_one((&sealed_block.clone().unseal(), td).into())?; + let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); + BundleStateWithReceipts::new(bundle, receipts, first_block).write_to_storage( + provider_rw.tx_ref(), + None, + OriginalValuesKnown::Yes, )?; + let checkpoint = Some(StageCheckpoint::new(block_number - 1)); + let mut account_hashing_done = false; while !account_hashing_done { - let output = account_hashing_stage.execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: progress.map(StageCheckpoint::new), - }, - )?; + let output = account_hashing_stage + .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; account_hashing_done = output.done; } let mut storage_hashing_done = false; while !storage_hashing_done { - let output = storage_hashing_stage.execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: progress.map(StageCheckpoint::new), - }, - )?; + let output = storage_hashing_stage + .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; storage_hashing_done = output.done; } - let incremental_result = merkle_stage.execute( - &provider_rw, - ExecInput { target: Some(block), checkpoint: progress.map(StageCheckpoint::new) }, - ); + let incremental_result = merkle_stage + .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint }); - if incremental_result.is_err() { - tracing::warn!(target: "reth::cli", block, "Incremental calculation failed, retrying from scratch"); - let incremental_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let incremental_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - let clean_input = ExecInput { target: Some(block), checkpoint: None }; - loop { - let clean_result = merkle_stage.execute(&provider_rw, clean_input); - assert!(clean_result.is_ok(), "Clean state root calculation failed"); - if clean_result.unwrap().done { - break - } + if incremental_result.is_ok() { + debug!(target: "reth::cli", block_number, "Successfully computed incremental root"); + continue + } + + warn!(target: "reth::cli", block_number, "Incremental calculation failed, retrying from scratch"); + let incremental_account_trie = provider_rw + .tx_ref() + .cursor_read::()? + .walk_range(..)? + .collect::, _>>()?; + let incremental_storage_trie = provider_rw + .tx_ref() + .cursor_dup_read::()? + .walk_range(..)? + .collect::, _>>()?; + + let clean_input = ExecInput { target: Some(sealed_block.number), checkpoint: None }; + loop { + let clean_result = merkle_stage.execute(&provider_rw, clean_input); + assert!(clean_result.is_ok(), "Clean state root calculation failed"); + if clean_result.unwrap().done { + break } + } - let clean_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let clean_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - tracing::info!(target: "reth::cli", block, "Comparing incremental trie vs clean trie"); - - // Account trie - let mut incremental_account_mismatched = Vec::new(); - let mut clean_account_mismatched = Vec::new(); - let mut incremental_account_trie_iter = - incremental_account_trie.into_iter().peekable(); - let mut clean_account_trie_iter = clean_account_trie.into_iter().peekable(); - while incremental_account_trie_iter.peek().is_some() || - clean_account_trie_iter.peek().is_some() - { - match (incremental_account_trie_iter.next(), clean_account_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - similar_asserts::assert_eq!( - incremental.0, - clean.0, - "Nibbles don't match" - ); - if incremental.1 != clean.1 && - clean.0 .0.len() > self.skip_node_depth.unwrap_or_default() - { - incremental_account_mismatched.push(incremental); - clean_account_mismatched.push(clean); - } - } - (Some(incremental), None) => { - tracing::warn!(target: "reth::cli", next = ?incremental, "Incremental account trie has more entries"); - } - (None, Some(clean)) => { - tracing::warn!(target: "reth::cli", next = ?clean, "Clean account trie has more entries"); - } - (None, None) => { - tracing::info!(target: "reth::cli", "Exhausted all account trie entries"); + let clean_account_trie = provider_rw + .tx_ref() + .cursor_read::()? + .walk_range(..)? + .collect::, _>>()?; + let clean_storage_trie = provider_rw + .tx_ref() + .cursor_dup_read::()? + .walk_range(..)? + .collect::, _>>()?; + + info!(target: "reth::cli", block_number, "Comparing incremental trie vs clean trie"); + + // Account trie + let mut incremental_account_mismatched = Vec::new(); + let mut clean_account_mismatched = Vec::new(); + let mut incremental_account_trie_iter = incremental_account_trie.into_iter().peekable(); + let mut clean_account_trie_iter = clean_account_trie.into_iter().peekable(); + while incremental_account_trie_iter.peek().is_some() || + clean_account_trie_iter.peek().is_some() + { + match (incremental_account_trie_iter.next(), clean_account_trie_iter.next()) { + (Some(incremental), Some(clean)) => { + similar_asserts::assert_eq!(incremental.0, clean.0, "Nibbles don't match"); + if incremental.1 != clean.1 && + clean.0 .0.len() > self.skip_node_depth.unwrap_or_default() + { + incremental_account_mismatched.push(incremental); + clean_account_mismatched.push(clean); } } + (Some(incremental), None) => { + warn!(target: "reth::cli", next = ?incremental, "Incremental account trie has more entries"); + } + (None, Some(clean)) => { + warn!(target: "reth::cli", next = ?clean, "Clean account trie has more entries"); + } + (None, None) => { + info!(target: "reth::cli", "Exhausted all account trie entries"); + } } + } - // Stoarge trie - let mut first_mismatched_storage = None; - let mut incremental_storage_trie_iter = - incremental_storage_trie.into_iter().peekable(); - let mut clean_storage_trie_iter = clean_storage_trie.into_iter().peekable(); - while incremental_storage_trie_iter.peek().is_some() || - clean_storage_trie_iter.peek().is_some() - { - match (incremental_storage_trie_iter.next(), clean_storage_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - if incremental != clean && - clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default() - { - first_mismatched_storage = Some((incremental, clean)); - break - } - } - (Some(incremental), None) => { - tracing::warn!(target: "reth::cli", next = ?incremental, "Incremental storage trie has more entries"); - } - (None, Some(clean)) => { - tracing::warn!(target: "reth::cli", next = ?clean, "Clean storage trie has more entries") - } - (None, None) => { - tracing::info!(target: "reth::cli", "Exhausted all storage trie entries.") + // Stoarge trie + let mut first_mismatched_storage = None; + let mut incremental_storage_trie_iter = incremental_storage_trie.into_iter().peekable(); + let mut clean_storage_trie_iter = clean_storage_trie.into_iter().peekable(); + while incremental_storage_trie_iter.peek().is_some() || + clean_storage_trie_iter.peek().is_some() + { + match (incremental_storage_trie_iter.next(), clean_storage_trie_iter.next()) { + (Some(incremental), Some(clean)) => { + if incremental != clean && + clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default() + { + first_mismatched_storage = Some((incremental, clean)); + break } } + (Some(incremental), None) => { + warn!(target: "reth::cli", next = ?incremental, "Incremental storage trie has more entries"); + } + (None, Some(clean)) => { + warn!(target: "reth::cli", next = ?clean, "Clean storage trie has more entries") + } + (None, None) => { + info!(target: "reth::cli", "Exhausted all storage trie entries.") + } } - - similar_asserts::assert_eq!( - ( - incremental_account_mismatched, - first_mismatched_storage.as_ref().map(|(incremental, _)| incremental) - ), - ( - clean_account_mismatched, - first_mismatched_storage.as_ref().map(|(_, clean)| clean) - ), - "Mismatched trie nodes" - ); } + + similar_asserts::assert_eq!( + ( + incremental_account_mismatched, + first_mismatched_storage.as_ref().map(|(incremental, _)| incremental) + ), + ( + clean_account_mismatched, + first_mismatched_storage.as_ref().map(|(_, clean)| clean) + ), + "Mismatched trie nodes" + ); } + info!(target: "reth::cli", ?block_range, "Successfully validated incremental roots"); + Ok(()) } } From f8cd8c56a297853403a6ad2e740e07677bf4abfc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 May 2024 13:50:23 +0200 Subject: [PATCH 468/700] feat: add helper functions for batch executor (#8087) --- crates/evm/src/execute.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 7b3e586467252..e7ce09e798056 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -34,6 +34,27 @@ pub trait BatchExecutor { /// Executes the next block in the batch and update the state internally. fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; + /// Executes multiple inputs in the batch and update the state internally. + fn execute_many<'a, I>(&mut self, inputs: I) -> Result<(), Self::Error> + where + I: IntoIterator>, + { + for input in inputs { + self.execute_one(input)?; + } + Ok(()) + } + + /// Executes the entire batch and return the final state. + fn execute_batch<'a, I>(mut self, batch: I) -> Result + where + I: IntoIterator>, + Self: Sized, + { + self.execute_many(batch)?; + Ok(self.finalize()) + } + /// Finishes the batch and return the final state. fn finalize(self) -> Self::Output; From 82e4ad9e764ac4003139d819246e5fa40b60b953 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Sat, 4 May 2024 13:07:18 +0100 Subject: [PATCH 469/700] docs(book): update CLI (#8093) --- book/cli/reth/db/checksum.md | 124 +++++++++++++++++++++++++++ book/cli/reth/init-state.md | 158 +++++++++++++++++++++++++++++++++++ book/cli/reth/node.md | 15 +--- 3 files changed, 285 insertions(+), 12 deletions(-) create mode 100644 book/cli/reth/db/checksum.md create mode 100644 book/cli/reth/init-state.md diff --git a/book/cli/reth/db/checksum.md b/book/cli/reth/db/checksum.md new file mode 100644 index 0000000000000..6f080c74ba890 --- /dev/null +++ b/book/cli/reth/db/checksum.md @@ -0,0 +1,124 @@ +# reth db checksum + +Calculates the content checksum of a table + +```bash +$ reth db checksum --help +Usage: reth db checksum [OPTIONS]
+ +Arguments: +
+ The table name + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, goerli, holesky, dev + + [default: mainnet] + + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md new file mode 100644 index 0000000000000..0254a43f58c47 --- /dev/null +++ b/book/cli/reth/init-state.md @@ -0,0 +1,158 @@ +# reth init-state + +Initialize the database from a state dump file + +```bash +$ reth init-state --help +Usage: reth init-state [OPTIONS] + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, goerli, holesky, dev + + [default: mainnet] + + --state + JSONL file with state dump. + + Must contain accounts in following format, additional account fields are ignored. Can + also contain { "root": \ } as first line. + { + "balance": "\", + "nonce": \, + "code": "\", + "storage": { + "\": "\", + .. + }, + "address": "\", + } + + Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + and including the non-genesis block to init chain at. See 'import' command. + + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index d1972a6085228..edf0993d7a3bb 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -438,21 +438,12 @@ Debug: --debug.max-block Runs the sync only up to the specified block - --debug.print-inspector - Print opcode level traces directly to console during execution - - --debug.hook-block - Hook on a specific block during execution - - --debug.hook-transaction - Hook on a specific transaction during execution - - --debug.hook-all - Hook on every transaction in a block - --debug.skip-fcu If provided, the engine will skip `n` consecutive FCUs + --debug.skip-new-payload + If provided, the engine will skip `n` consecutive new payloads + --debug.engine-api-store The path to store engine API messages at. If specified, all of the intercepted engine API messages will be written to specified location From ac1d5324ec597ad6494a3025f97c5d4dd501945e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 May 2024 14:35:23 +0200 Subject: [PATCH 470/700] chore: rm unused file (#8096) --- crates/rpc/rpc-types/src/admin.rs | 123 ------------------------------ 1 file changed, 123 deletions(-) delete mode 100644 crates/rpc/rpc-types/src/admin.rs diff --git a/crates/rpc/rpc-types/src/admin.rs b/crates/rpc/rpc-types/src/admin.rs deleted file mode 100644 index aeb44fab615c1..0000000000000 --- a/crates/rpc/rpc-types/src/admin.rs +++ /dev/null @@ -1,123 +0,0 @@ -use crate::{NodeRecord, PeerId}; -use alloy_genesis::ChainConfig; -use alloy_primitives::{B256, U256}; -use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - net::{IpAddr, SocketAddr}, -}; - -/// Represents the `admin_nodeInfo` response, which can be queried for all the information -/// known about the running node at the networking granularity. -/// -/// Note: this format is not standardized. Reth follows Geth's format, -/// see: -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct NodeInfo { - /// Enode of the node in URL format. - pub enode: NodeRecord, - /// ID of the local node. - pub id: PeerId, - /// IP of the local node. - pub ip: IpAddr, - /// Address exposed for listening for the local node. - #[serde(rename = "listenAddr")] - pub listen_addr: SocketAddr, - /// Ports exposed by the node for discovery and listening. - pub ports: Ports, - /// Name of the network - pub name: String, - /// Networking protocols being run by the local node. - pub protocols: Protocols, -} - -impl NodeInfo { - /// Creates a new instance of `NodeInfo`. - pub fn new(enr: NodeRecord, status: NetworkStatus, config: ChainConfig) -> NodeInfo { - NodeInfo { - enode: enr, - id: enr.id, - ip: enr.address, - listen_addr: enr.tcp_addr(), - ports: Ports { discovery: enr.udp_port, listener: enr.tcp_port }, - name: status.client_version, - protocols: Protocols { - eth: EthProtocolInfo::new(status.eth_protocol_info, config), - other: Default::default(), - }, - } - } -} - -/// All supported protocols -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct Protocols { - /// Info about `eth` sub-protocol - pub eth: EthProtocolInfo, - /// Placeholder for any other protocols - #[serde(flatten, default)] - pub other: BTreeMap, -} - -/// Ports exposed by the node for discovery and listening. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct Ports { - /// Port exposed for node discovery. - pub discovery: u16, - /// Port exposed for listening. - pub listener: u16, -} - -/// The status of the network being ran by the local node. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct NetworkStatus { - /// The local node client version. - pub client_version: String, - /// The current ethereum protocol version - pub protocol_version: u64, - /// Information about the Ethereum Wire Protocol. - pub eth_protocol_info: EthProtocolInfo, -} - -/// Information about the Ethereum Wire Protocol (ETH) -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct EthProtocolInfo { - /// The current difficulty at the head of the chain. - pub difficulty: U256, - /// The block hash of the head of the chain. - pub head: B256, - /// Network ID in base 10. - pub network: u64, - /// Genesis block of the current chain. - pub genesis: B256, - /// Configuration of the chain. - pub config: ChainConfig, -} - -impl EthProtocolInfo { - /// Creates a new instance of `EthProtocolInfo`. - pub fn new(info: EthProtocolInfo, config: ChainConfig) -> EthProtocolInfo { - EthProtocolInfo { - difficulty: info.difficulty, - head: info.head, - network: info.network, - genesis: info.genesis, - config, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_node_info_roundtrip() { - let sample = r#"{"enode":"enode://44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d@[::]:30303","id":"44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d","ip":"::","listenAddr":"[::]:30303","name":"reth","ports":{"discovery":30303,"listener":30303},"protocols":{"eth":{"difficulty":17334254859343145000,"genesis":"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3","head":"0xb83f73fbe6220c111136aefd27b160bf4a34085c65ba89f24246b3162257c36a","network":1, "config": {"chainId": 17000,"homesteadBlock": 0,"daoForkSupport": true,"eip150Block": 0,"eip155Block": 0,"eip158Block": 0,"byzantiumBlock": 0,"constantinopleBlock": 0,"petersburgBlock": 0,"istanbulBlock": 0,"berlinBlock": 0,"londonBlock": 0,"shanghaiTime": 1696000704,"cancunTime": 1707305664,"terminalTotalDifficulty": 0,"terminalTotalDifficultyPassed": true,"ethash": {}}}}}"#; - - let info: NodeInfo = serde_json::from_str(sample).unwrap(); - let serialized = serde_json::to_string_pretty(&info).unwrap(); - let de_serialized: NodeInfo = serde_json::from_str(&serialized).unwrap(); - assert_eq!(info, de_serialized) - } -} From 2c70e2ab3bd0266103348b462af1abe497397bee Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 May 2024 14:40:30 +0200 Subject: [PATCH 471/700] feat: rm reth-primitives dep (#8097) --- Cargo.lock | 2 +- crates/net/types/Cargo.toml | 3 ++- crates/net/types/src/lib.rs | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1828b2a2faef3..cafe4b83ad41c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7208,8 +7208,8 @@ dependencies = [ name = "reth-network-types" version = "0.2.0-beta.6" dependencies = [ + "alloy-primitives", "enr", - "reth-primitives", "reth-rpc-types", "secp256k1", "serde_with", diff --git a/crates/net/types/Cargo.toml b/crates/net/types/Cargo.toml index 841a76dfe5744..9092236b1a1a7 100644 --- a/crates/net/types/Cargo.toml +++ b/crates/net/types/Cargo.toml @@ -14,7 +14,8 @@ workspace = true [dependencies] # reth reth-rpc-types.workspace = true -reth-primitives.workspace = true + +alloy-primitives.workspace = true # eth enr.workspace = true diff --git a/crates/net/types/src/lib.rs b/crates/net/types/src/lib.rs index ccd9757c94560..8d75af9333d8c 100644 --- a/crates/net/types/src/lib.rs +++ b/crates/net/types/src/lib.rs @@ -126,7 +126,7 @@ impl std::fmt::Display for AnyNode { AnyNode::NodeRecord(record) => write!(f, "{record}"), AnyNode::Enr(enr) => write!(f, "{enr}"), AnyNode::PeerId(peer_id) => { - write!(f, "enode://{}", reth_primitives::hex::encode(peer_id.as_slice())) + write!(f, "enode://{}", alloy_primitives::hex::encode(peer_id.as_slice())) } } } From bff14c603f26f5d0308ad930a16b7c002fbf3511 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 May 2024 14:40:39 +0200 Subject: [PATCH 472/700] chore: rm leftover peer.rs file (#8098) --- crates/primitives/src/peer.rs | 227 ---------------------------------- 1 file changed, 227 deletions(-) delete mode 100644 crates/primitives/src/peer.rs diff --git a/crates/primitives/src/peer.rs b/crates/primitives/src/peer.rs deleted file mode 100644 index f66361f39b204..0000000000000 --- a/crates/primitives/src/peer.rs +++ /dev/null @@ -1,227 +0,0 @@ -use enr::Enr; -use reth_rpc_types::NodeRecord; -use secp256k1::{constants::UNCOMPRESSED_PUBLIC_KEY_SIZE, PublicKey, SecretKey}; -use std::{net::IpAddr, str::FromStr}; - -// Re-export PeerId for ease of use. -pub use reth_rpc_types::PeerId; - -/// This tag should be set to indicate to libsecp256k1 that the following bytes denote an -/// uncompressed pubkey. -/// -/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` = `0x04` -/// -/// See: -const SECP256K1_TAG_PUBKEY_UNCOMPRESSED: u8 = 4; - -/// Converts a [secp256k1::PublicKey] to a [PeerId] by stripping the -/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag and storing the rest of the slice in the [PeerId]. -#[inline] -pub fn pk2id(pk: &PublicKey) -> PeerId { - PeerId::from_slice(&pk.serialize_uncompressed()[1..]) -} - -/// Converts a [PeerId] to a [secp256k1::PublicKey] by prepending the [PeerId] bytes with the -/// SECP256K1_TAG_PUBKEY_UNCOMPRESSED tag. -#[inline] -pub fn id2pk(id: PeerId) -> Result { - // NOTE: B512 is used as a PeerId because 512 bits is enough to represent an uncompressed - // public key. - let mut s = [0u8; UNCOMPRESSED_PUBLIC_KEY_SIZE]; - s[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED; - s[1..].copy_from_slice(id.as_slice()); - PublicKey::from_slice(&s) -} - -/// A peer that can come in ENR or [NodeRecord] form. -#[derive( - Debug, Clone, Eq, PartialEq, Hash, serde_with::SerializeDisplay, serde_with::DeserializeFromStr, -)] -pub enum AnyNode { - /// An "enode:" peer with full ip - NodeRecord(NodeRecord), - /// An "enr:" - Enr(Enr), - /// An incomplete "enode" with only a peer id - PeerId(PeerId), -} - -impl AnyNode { - /// Returns the peer id of the node. - pub fn peer_id(&self) -> PeerId { - match self { - AnyNode::NodeRecord(record) => record.id, - AnyNode::Enr(enr) => pk2id(&enr.public_key()), - AnyNode::PeerId(peer_id) => *peer_id, - } - } - - /// Returns the full node record if available. - pub fn node_record(&self) -> Option { - match self { - AnyNode::NodeRecord(record) => Some(*record), - AnyNode::Enr(enr) => { - let node_record = NodeRecord { - address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, - tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, - udp_port: enr.udp4().or_else(|| enr.udp6())?, - id: pk2id(&enr.public_key()), - } - .into_ipv4_mapped(); - Some(node_record) - } - _ => None, - } - } -} - -impl From for AnyNode { - fn from(value: NodeRecord) -> Self { - Self::NodeRecord(value) - } -} - -impl From> for AnyNode { - fn from(value: Enr) -> Self { - Self::Enr(value) - } -} - -impl FromStr for AnyNode { - type Err = String; - - fn from_str(s: &str) -> Result { - if let Some(rem) = s.strip_prefix("enode://") { - if let Ok(record) = NodeRecord::from_str(s) { - return Ok(AnyNode::NodeRecord(record)) - } - // incomplete enode - if let Ok(peer_id) = PeerId::from_str(rem) { - return Ok(AnyNode::PeerId(peer_id)) - } - return Err(format!("invalid public key: {rem}")) - } - if s.starts_with("enr:") { - return Enr::from_str(s).map(AnyNode::Enr) - } - Err("missing 'enr:' prefix for base64-encoded record".to_string()) - } -} - -impl std::fmt::Display for AnyNode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - AnyNode::NodeRecord(record) => write!(f, "{record}"), - AnyNode::Enr(enr) => write!(f, "{enr}"), - AnyNode::PeerId(peer_id) => { - write!(f, "enode://{}", crate::hex::encode(peer_id.as_slice())) - } - } - } -} - -/// Generic wrapper with peer id -#[derive(Debug)] -pub struct WithPeerId(PeerId, pub T); - -impl From<(PeerId, T)> for WithPeerId { - fn from(value: (PeerId, T)) -> Self { - Self(value.0, value.1) - } -} - -impl WithPeerId { - /// Wraps the value with the peerid. - pub fn new(peer: PeerId, value: T) -> Self { - Self(peer, value) - } - - /// Get the peer id - pub fn peer_id(&self) -> PeerId { - self.0 - } - - /// Get the underlying data - pub fn data(&self) -> &T { - &self.1 - } - - /// Returns ownership of the underlying data. - pub fn into_data(self) -> T { - self.1 - } - - /// Transform the data - pub fn transform>(self) -> WithPeerId { - WithPeerId(self.0, self.1.into()) - } - - /// Split the wrapper into [PeerId] and data tuple - pub fn split(self) -> (PeerId, T) { - (self.0, self.1) - } - - /// Maps the inner value to a new value using the given function. - pub fn map U>(self, op: F) -> WithPeerId { - WithPeerId(self.0, op(self.1)) - } -} - -impl WithPeerId> { - /// returns `None` if the inner value is `None`, otherwise returns `Some(WithPeerId)`. - pub fn transpose(self) -> Option> { - self.1.map(|v| WithPeerId(self.0, v)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use secp256k1::SECP256K1; - - #[test] - fn test_node_record_parse() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: AnyNode = url.parse().unwrap(); - assert_eq!(node, AnyNode::NodeRecord(NodeRecord { - address: IpAddr::V4([10,3,58,6].into()), - tcp_port: 30303, - udp_port: 30301, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - })); - assert_eq!(node.to_string(), url) - } - - #[test] - fn test_peer_id_parse() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0"; - let node: AnyNode = url.parse().unwrap(); - assert_eq!(node, AnyNode::PeerId("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap())); - assert_eq!(node.to_string(), url); - - let url = "enode://"; - let err = url.parse::().unwrap_err(); - assert_eq!(err, "invalid public key: "); - } - - // - #[test] - fn test_enr_parse() { - let url = "enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8"; - let node: AnyNode = url.parse().unwrap(); - assert_eq!( - node.peer_id(), - "0xca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f" - .parse::() - .unwrap() - ); - assert_eq!(node.to_string(), url); - } - - #[test] - fn pk2id2pk() { - let prikey = SecretKey::new(&mut secp256k1::rand::thread_rng()); - let pubkey = PublicKey::from_secret_key(SECP256K1, &prikey); - assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap()); - } -} From 4b78706ed6ea7d6a9dcaed7a09c5bd5dafef5a30 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 4 May 2024 14:55:03 +0200 Subject: [PATCH 473/700] chore(cli): fix displayed block range in merkle debug script (#8091) --- bin/reth/src/commands/debug_cmd/merkle.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 75ad1870f70d2..c42cbdd4df667 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -164,7 +164,7 @@ impl Command { assert!(best_block_number < self.to, "Nothing to run"); // get the block range from the network - let block_range = best_block_number..=self.to; + let block_range = best_block_number + 1..=self.to; info!(target: "reth::cli", ?block_range, "Downloading range of blocks"); let blocks = block_range_client .get_full_block_range(to_header.hash_slow(), self.to - best_block_number) From d01996103bcba6efa6b2eadcd587d444d0fcfe65 Mon Sep 17 00:00:00 2001 From: guha-rahul <52607971+guha-rahul@users.noreply.github.com> Date: Sat, 4 May 2024 22:04:34 +0530 Subject: [PATCH 474/700] replace U64 fields with primitive u64 (#8099) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-types/src/mev.rs | 133 +++++++++++++++++++++---------- crates/rpc/rpc/src/eth/bundle.rs | 2 +- 2 files changed, 92 insertions(+), 43 deletions(-) diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs index ae94375dbc296..9126c09635dbe 100644 --- a/crates/rpc/rpc-types/src/mev.rs +++ b/crates/rpc/rpc-types/src/mev.rs @@ -1,12 +1,11 @@ //! MEV bundle type bindings use crate::{BlockId, BlockNumberOrTag, Log}; -use alloy_primitives::{Address, Bytes, TxHash, B256, U256, U64}; +use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use serde::{ ser::{SerializeSeq, Serializer}, Deserialize, Deserializer, Serialize, }; - /// A bundle of transactions to send to the matchmaker. /// /// Note: this is for `mev_sendBundle` and not `eth_sendBundle`. @@ -35,28 +34,33 @@ pub struct SendBundleRequest { #[serde(rename_all = "camelCase")] pub struct Inclusion { /// The first block the bundle is valid for. - pub block: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub block: u64, /// The last block the bundle is valid for. - #[serde(skip_serializing_if = "Option::is_none")] - pub max_block: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub max_block: Option, } impl Inclusion { /// Creates a new inclusion with the given min block.. pub fn at_block(block: u64) -> Self { - Self { block: U64::from(block), max_block: None } + Self { block, max_block: None } } /// Returns the block number of the first block the bundle is valid for. #[inline] pub fn block_number(&self) -> u64 { - self.block.to() + self.block } /// Returns the block number of the last block the bundle is valid for. #[inline] pub fn max_block_number(&self) -> Option { - self.max_block.as_ref().map(|b| b.to()) + self.max_block.as_ref().map(|b| *b) } } @@ -100,8 +104,10 @@ pub struct Validity { #[serde(rename_all = "camelCase")] pub struct Refund { /// The index of the transaction in the bundle. + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub body_idx: u64, /// The minimum percent of the bundle's earnings to redistribute. + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub percent: u64, } @@ -113,6 +119,7 @@ pub struct RefundConfig { /// The address to refund. pub address: Address, /// The minimum percent of the bundle's earnings to redistribute. + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub percent: u64, } @@ -312,26 +319,42 @@ pub struct SimBundleOverrides { /// Block used for simulation state. Defaults to latest block. /// Block header data will be derived from parent block by default. /// Specify other params to override the default values. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub parent_block: Option, /// Block number used for simulation, defaults to parentBlock.number + 1 - #[serde(skip_serializing_if = "Option::is_none")] - pub block_number: Option, + #[serde(default, with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint")] + pub block_number: Option, /// Coinbase used for simulation, defaults to parentBlock.coinbase - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub coinbase: Option
, /// Timestamp used for simulation, defaults to parentBlock.timestamp + 12 - #[serde(skip_serializing_if = "Option::is_none")] - pub timestamp: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub timestamp: Option, /// Gas limit used for simulation, defaults to parentBlock.gasLimit - #[serde(skip_serializing_if = "Option::is_none")] - pub gas_limit: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub gas_limit: Option, /// Base fee used for simulation, defaults to parentBlock.baseFeePerGas - #[serde(skip_serializing_if = "Option::is_none")] - pub base_fee: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub base_fee: Option, /// Timeout in seconds, defaults to 5 - #[serde(skip_serializing_if = "Option::is_none")] - pub timeout: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub timeout: Option, } /// Response from the matchmaker after sending a simulation request. @@ -341,20 +364,25 @@ pub struct SimBundleResponse { /// Whether the simulation was successful. pub success: bool, /// Error message if the simulation failed. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option, /// The block number of the simulated block. - pub state_block: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub state_block: u64, /// The gas price of the simulated block. - pub mev_gas_price: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub mev_gas_price: u64, /// The profit of the simulated block. - pub profit: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub profit: u64, /// The refundable value of the simulated block. - pub refundable_value: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub refundable_value: u64, /// The gas used by the simulated block. - pub gas_used: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub gas_used: u64, /// Logs returned by mev_simBundle. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub logs: Option>, } @@ -363,18 +391,18 @@ pub struct SimBundleResponse { #[serde(rename_all = "camelCase")] pub struct SimBundleLogs { /// Logs for transactions in bundle. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub tx_logs: Option>, /// Logs for bundles in bundle. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub bundle_logs: Option>, } impl SendBundleRequest { /// Create a new bundle request. pub fn new( - block_num: U64, - max_block: Option, + block_num: u64, + max_block: Option, protocol_version: ProtocolVersion, bundle_body: Vec, ) -> Self { @@ -404,8 +432,12 @@ pub struct PrivateTransactionRequest { pub tx: Bytes, /// Hex-encoded number string, optional. Highest block number in which the transaction should /// be included. - #[serde(skip_serializing_if = "Option::is_none")] - pub max_block_number: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub max_block_number: Option, /// Preferences for private transaction. #[serde(default, skip_serializing_if = "PrivateTransactionPreferences::is_empty")] pub preferences: PrivateTransactionPreferences, @@ -415,10 +447,10 @@ pub struct PrivateTransactionRequest { #[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)] pub struct PrivateTransactionPreferences { /// Requirements for the bundle to be included in the block. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub validity: Option, /// Preferences on what data should be shared about the bundle and its transactions - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub privacy: Option, } @@ -593,18 +625,27 @@ pub struct EthSendBundle { /// A list of hex-encoded signed transactions pub txs: Vec, /// hex-encoded block number for which this bundle is valid - pub block_number: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub block_number: u64, /// unix timestamp when this bundle becomes active - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] pub min_timestamp: Option, /// unix timestamp how long this bundle stays valid - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] pub max_timestamp: Option, /// list of hashes of possibly reverting txs #[serde(default, skip_serializing_if = "Vec::is_empty")] pub reverting_tx_hashes: Vec, /// UUID that can be used to cancel/replace this bundle - #[serde(rename = "replacementUuid", skip_serializing_if = "Option::is_none")] + #[serde(default, rename = "replacementUuid", skip_serializing_if = "Option::is_none")] pub replacement_uuid: Option, } @@ -625,11 +666,16 @@ pub struct EthCallBundle { /// A list of hex-encoded signed transactions pub txs: Vec, /// hex encoded block number for which this bundle is valid on - pub block_number: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub block_number: u64, /// Either a hex encoded number or a block tag for which state to base this simulation on pub state_block_number: BlockNumberOrTag, /// the timestamp to use for this bundle simulation, in seconds since the unix epoch - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] pub timestamp: Option, } @@ -654,8 +700,10 @@ pub struct EthCallBundleResponse { /// Results of individual transactions within the bundle pub results: Vec, /// The block number used as a base for this simulation + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub state_block_number: u64, /// The total gas used by all transactions in the bundle + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub total_gas_used: u64, } @@ -678,6 +726,7 @@ pub struct EthCallBundleTransactionResult { #[serde(with = "u256_numeric_string")] pub gas_price: U256, /// The amount of gas used by the transaction + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub gas_used: u64, /// The address to which the transaction is sent (optional) pub to_address: Option
, @@ -827,7 +876,7 @@ mod tests { let bundle = SendBundleRequest { protocol_version: ProtocolVersion::V0_1, - inclusion: Inclusion { block: U64::from(1), max_block: None }, + inclusion: Inclusion { block: 1, max_block: None }, bundle_body, validity, privacy, diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index c2d56df3132da..0523141eb2488 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -52,7 +52,7 @@ where EthBundleError::EmptyBundleTransactions.to_string(), )) } - if block_number.to::() == 0 { + if block_number == 0 { return Err(EthApiError::InvalidParams( EthBundleError::BundleMissingBlockNumber.to_string(), )) From 101e99f57fec3734bb5cfc9a0cb3cd8c429e341a Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Sun, 5 May 2024 12:31:12 +0200 Subject: [PATCH 475/700] ci: remove check-cfg job (#8106) --- .github/workflows/lint.yml | 21 +++------------------ crates/net/eth-wire/tests/fuzz_roundtrip.rs | 2 +- crates/net/network/tests/it/main.rs | 1 - 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b939e159d4bcc..4f3632875afcc 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -27,8 +27,7 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: - cargo clippy --bin "${{ matrix.binary }}" --workspace --features "${{ matrix.network }} asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" + - run: cargo clippy --bin "${{ matrix.binary }}" --workspace --features "${{ matrix.network }} asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" env: RUSTFLAGS: -D warnings @@ -95,9 +94,7 @@ jobs: env: # Keep in sync with ./book.yml:jobs.build # This should only add `-D warnings` - RUSTDOCFLAGS: - --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page - -Zunstable-options -D warnings + RUSTDOCFLAGS: --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page -Zunstable-options -D warnings fmt: name: fmt @@ -126,23 +123,11 @@ jobs: with: cmd: jq empty etc/grafana/dashboards/overview.json - check-cfg: - name: check-cfg - runs-on: ubuntu-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@nightly - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - run: cargo +nightly -Zcheck-cfg c - lint-success: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana, check-cfg] + needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/crates/net/eth-wire/tests/fuzz_roundtrip.rs b/crates/net/eth-wire/tests/fuzz_roundtrip.rs index 9bd75e3f3492d..1fc5ea0bf578f 100644 --- a/crates/net/eth-wire/tests/fuzz_roundtrip.rs +++ b/crates/net/eth-wire/tests/fuzz_roundtrip.rs @@ -48,7 +48,7 @@ macro_rules! fuzz_type_and_name { }; } -#[cfg(any(test, feature = "bench"))] +#[cfg(test)] pub mod fuzz_rlp { use crate::roundtrip_encoding; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; diff --git a/crates/net/network/tests/it/main.rs b/crates/net/network/tests/it/main.rs index 2bed287d6ec7c..1b4494abdc1c6 100644 --- a/crates/net/network/tests/it/main.rs +++ b/crates/net/network/tests/it/main.rs @@ -4,7 +4,6 @@ mod multiplex; mod requests; mod session; mod startup; -#[cfg(not(feature = "optimism"))] mod txgossip; fn main() {} From fa59ec8078b6c388b75b4f971013be451b8ab128 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 5 May 2024 11:23:14 +0000 Subject: [PATCH 476/700] chore(deps): weekly `cargo update` (#8104) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 112 ++++++++++++++++++++++++++++------------------------- 1 file changed, 59 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cafe4b83ad41c..68d67a451bf7c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -148,7 +148,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -156,7 +156,6 @@ dependencies = [ "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", "c-kzg", "serde", - "sha2 0.10.8", ] [[package]] @@ -199,7 +198,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -207,6 +206,7 @@ dependencies = [ "c-kzg", "once_cell", "serde", + "sha2 0.10.8", ] [[package]] @@ -223,7 +223,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -409,7 +409,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -479,7 +479,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "serde", @@ -648,47 +648,48 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -1015,9 +1016,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backon" @@ -1735,9 +1736,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "comfy-table" @@ -3109,7 +3110,7 @@ dependencies = [ [[package]] name = "foundry-blob-explorers" version = "0.1.0" -source = "git+https://github.com/foundry-rs/block-explorers#cd824d3fc53feca59ca6a2fc76f191fbb3ac2011" +source = "git+https://github.com/foundry-rs/block-explorers#adcb750e8d8e57f7decafca433118bf7836ffd55" dependencies = [ "alloy-chains", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -4243,6 +4244,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "itertools" version = "0.10.5" @@ -4662,9 +4669,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" +checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" dependencies = [ "either", "fnv", @@ -4673,6 +4680,7 @@ dependencies = [ "instant", "libp2p-core", "libp2p-identity", + "lru", "multistream-select", "once_cell", "rand 0.8.5", @@ -5298,9 +5306,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -5321,9 +5329,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -5571,9 +5579,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -8470,9 +8478,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" +checksum = "76ad2bbb0ae5100a07b7a6f2ed7ab5fd0045551a4c507989b7a620046ea3efdc" dependencies = [ "sdd", ] @@ -8611,9 +8619,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.199" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" +checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f" dependencies = [ "serde_derive", ] @@ -8629,9 +8637,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.199" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" +checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" dependencies = [ "proc-macro2", "quote", @@ -9238,9 +9246,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-fuzz" -version = "5.0.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b139530208017f9d5a113784ed09cf1b8b22dee95eb99d51d89af1a3c2d6594e" +checksum = "f8224048089fb4c76b0569e76e00bf6cdaf06790eb5290e9582a0c485094e0a8" dependencies = [ "serde", "test-fuzz-internal", @@ -9250,9 +9258,9 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "5.0.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e78ed8148311b6a02578dee5fd77600bf8805b77b2cb8382a9435348080985" +checksum = "43cd6c1a291bd5f843f5dfb813c2fd7ad8e38de06722a14eeb54636c983485cc" dependencies = [ "bincode", "cargo_metadata", @@ -9261,9 +9269,9 @@ dependencies = [ [[package]] name = "test-fuzz-macro" -version = "5.0.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f9bc8c69f276df24e4d1c082e52ea057544495916c4aa0708b82e47f55f364" +checksum = "fffbe4466c9f941baa7dd177856ebda245d08b2aa2e3b6890d6dd8c54d6ceebe" dependencies = [ "darling 0.20.8", "itertools 0.12.1", @@ -9276,9 +9284,9 @@ dependencies = [ [[package]] name = "test-fuzz-runtime" -version = "5.0.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b657ccc932fde05dbac5c460bffa40809937adaa5558863fe8174526e1b3bc9" +checksum = "8fc507e8ea4887c091e1a57b65458c57b3a8fce1b6ed53afee77a174cfe41c17" dependencies = [ "hex", "num-traits", @@ -9512,9 +9520,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -9523,7 +9531,6 @@ dependencies = [ "pin-project-lite", "slab", "tokio", - "tracing", ] [[package]] @@ -9876,12 +9883,11 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad7eb6319ebadebca3dacf1f85a93bc54b73dd81b9036795f73de7ddfe27d5a" +checksum = "2a0e5d82932dfbf36df38de5df0cfe846d13430b3ae3fdc48b2e91ed692c8df7" dependencies = [ "glob", - "once_cell", "serde", "serde_derive", "serde_json", @@ -10543,18 +10549,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "6f4b6c273f496d8fd4eaf18853e6b448760225dc030ff2c485a786859aea6393" dependencies = [ "proc-macro2", "quote", From 199503531c4e66cba702844a5a8224620ee0e877 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 May 2024 20:26:29 +0200 Subject: [PATCH 477/700] chore: bump alloy 0bb7604 (#8107) --- Cargo.lock | 108 +++++++++++------------ Cargo.toml | 26 +++--- crates/e2e-test-utils/src/engine_api.rs | 13 ++- crates/e2e-test-utils/src/node.rs | 2 +- crates/rpc/rpc-types-compat/src/block.rs | 1 + examples/exex/rollup/src/execution.rs | 4 +- 6 files changed, 83 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 68d67a451bf7c..f054a15ba11fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,16 +133,14 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "c-kzg", "serde", - "sha2 0.10.8", - "thiserror", ] [[package]] @@ -179,11 +177,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "arbitrary", "c-kzg", "derive_more", @@ -193,6 +191,7 @@ dependencies = [ "proptest", "proptest-derive", "serde", + "sha2 0.10.8", ] [[package]] @@ -212,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "serde", "serde_json", ] @@ -246,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "serde", @@ -258,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -275,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "k256", "serde_json", @@ -317,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -367,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -387,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -427,24 +426,24 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "serde", ] [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -457,11 +456,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "serde", "serde_json", ] @@ -469,7 +468,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "serde", @@ -489,7 +488,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "async-trait", @@ -502,9 +501,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -577,7 +576,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -595,13 +594,14 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-json-rpc", "alloy-transport", "reqwest 0.12.4", "serde_json", "tower", + "tracing", "url", ] @@ -2952,7 +2952,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6575,8 +6575,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "arbitrary", "bytes", @@ -6781,9 +6781,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7520,8 +7520,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7786,10 +7786,10 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7816,7 +7816,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7922,7 +7922,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "reth-primitives", "secp256k1", ] @@ -8056,10 +8056,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=d15add2#d15add2614fc359025f43bd7ad6096719580ba81" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=42f01d0#42f01d08219f1b4fcb409b82377ec999919002de" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/Cargo.toml b/Cargo.toml index 0aca2afbbaaa8..55ff517205965 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "d15add2" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "42f01d0" } # eth alloy-chains = "0.1.15" @@ -291,20 +291,20 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "af788af", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "af788af" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "0bb7604" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } # misc auto_impl = "1" diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index fecd9b8b7f4cb..13b735aea8bdb 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,5 +1,8 @@ use crate::traits::PayloadEnvelopeExt; -use jsonrpsee::http_client::{transport::HttpBackend, HttpClient}; +use jsonrpsee::{ + core::client::ClientT, + http_client::{transport::HttpBackend, HttpClient}, +}; use reth::{ api::{EngineTypes, PayloadBuilderAttributes}, providers::CanonStateNotificationStream, @@ -29,6 +32,14 @@ impl EngineApiTestContext { Ok(EngineApiClient::::get_payload_v3(&self.engine_api_client, payload_id).await?) } + /// Retrieves a v3 payload from the engine api as serde value + pub async fn get_payload_v3_value( + &self, + payload_id: PayloadId, + ) -> eyre::Result { + Ok(self.engine_api_client.request("engine_getPayloadV3", (payload_id,)).await?) + } + /// Submits a payload to the engine api pub async fn submit_payload( &self, diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index b2ccf899e1412..668af6034336f 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -112,7 +112,7 @@ where // wait for the payload builder to have finished building self.payload.wait_for_built_payload(eth_attr.payload_id()).await; // trigger resolve payload via engine api - self.engine_api.get_payload_v3(eth_attr.payload_id()).await?; + self.engine_api.get_payload_v3_value(eth_attr.payload_id()).await?; // ensure we're also receiving the built payload as event Ok((self.payload.expect_built_payload().await?, eth_attr)) } diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index b342f8a30b295..1c2a44ebb6b18 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -141,6 +141,7 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) excess_blob_gas: excess_blob_gas.map(u128::from), parent_beacon_block_root, total_difficulty: None, + requests_root: None, } } diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs index 98a8e378c7bd1..f7a98382e07b9 100644 --- a/examples/exex/rollup/src/execution.rs +++ b/examples/exex/rollup/src/execution.rs @@ -1,4 +1,4 @@ -use alloy_consensus::{SidecarCoder, SimpleCoder}; +use alloy_consensus::{Blob, SidecarCoder, SimpleCoder}; use alloy_rlp::Decodable as _; use eyre::OptionExt; use reth::transaction_pool::TransactionPool; @@ -157,7 +157,7 @@ async fn decode_transactions( .map(|(blob, commitment)| (blob, kzg_to_versioned_hash((*commitment).into()))) // Filter only blobs that are present in the block data .filter(|(_, hash)| blob_hashes.contains(hash)) - .map(|(blob, _)| blob) + .map(|(blob, _)| Blob::from(*blob)) .collect::>(); if blobs.len() != blob_hashes.len() { eyre::bail!("some blobs not found") From 8f8b29b3ceba2bae7e6c40fa8e3b3f3873fcdee0 Mon Sep 17 00:00:00 2001 From: jn Date: Mon, 6 May 2024 03:14:54 -0700 Subject: [PATCH 478/700] refactor: replace futures_util pin and tokio_pin with std pin (#8109) --- Cargo.lock | 1 - crates/cli/runner/Cargo.toml | 1 - crates/cli/runner/src/lib.rs | 12 +++++++----- crates/net/eth-wire/src/multiplex.rs | 6 +++--- crates/net/network/src/listener.rs | 8 +++++--- crates/net/network/src/manager.rs | 6 +++--- crates/rpc/ipc/src/server/mod.rs | 18 ++++++++++-------- crates/tasks/src/lib.rs | 8 ++++---- 8 files changed, 32 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f054a15ba11fc..4b33054916085 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6565,7 +6565,6 @@ dependencies = [ name = "reth-cli-runner" version = "0.2.0-beta.6" dependencies = [ - "futures", "reth-tasks", "tokio", "tracing", diff --git a/crates/cli/runner/Cargo.toml b/crates/cli/runner/Cargo.toml index 697621cee0500..3182b738b7669 100644 --- a/crates/cli/runner/Cargo.toml +++ b/crates/cli/runner/Cargo.toml @@ -15,7 +15,6 @@ workspace = true reth-tasks.workspace = true # async -futures.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal"] } # misc diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index 31a1356c62bcb..94536d0cb91b6 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -10,9 +10,8 @@ //! Entrypoint for running commands. -use futures::pin_mut; use reth_tasks::{TaskExecutor, TaskManager}; -use std::future::Future; +use std::{future::Future, pin::pin}; use tracing::{debug, error, trace}; /// Executes CLI commands. @@ -141,7 +140,7 @@ where E: Send + Sync + From + 'static, { { - pin_mut!(fut); + let fut = pin!(fut); tokio::select! { err = tasks => { return Err(err.into()) @@ -166,7 +165,9 @@ where { let mut stream = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())?; let sigterm = stream.recv(); - pin_mut!(sigterm, ctrl_c, fut); + let sigterm = pin!(sigterm); + let ctrl_c = pin!(ctrl_c); + let fut = pin!(fut); tokio::select! { _ = ctrl_c => { @@ -181,7 +182,8 @@ where #[cfg(not(unix))] { - pin_mut!(ctrl_c, fut); + let ctrl_c = pin!(ctrl_c); + let fut = pin!(fut); tokio::select! { _ = ctrl_c => { diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 82eccd5c8a18b..04b7cda37e50a 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -12,7 +12,7 @@ use std::{ fmt, future::Future, io, - pin::Pin, + pin::{pin, Pin}, task::{ready, Context, Poll}, }; @@ -23,7 +23,7 @@ use crate::{ CanDisconnect, DisconnectReason, EthStream, P2PStream, Status, UnauthedEthStream, }; use bytes::{Bytes, BytesMut}; -use futures::{pin_mut, Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; +use futures::{Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; use reth_primitives::ForkFilter; use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -159,7 +159,7 @@ impl RlpxProtocolMultiplexer { }; let f = handshake(proxy); - pin_mut!(f); + let mut f = pin!(f); // this polls the connection and the primary stream concurrently until the handshake is // complete diff --git a/crates/net/network/src/listener.rs b/crates/net/network/src/listener.rs index 1575b3933b46d..4cc2196558264 100644 --- a/crates/net/network/src/listener.rs +++ b/crates/net/network/src/listener.rs @@ -104,8 +104,10 @@ impl Stream for TcpListenerStream { #[cfg(test)] mod tests { use super::*; - use futures::pin_mut; - use std::net::{Ipv4Addr, SocketAddrV4}; + use std::{ + net::{Ipv4Addr, SocketAddrV4}, + pin::pin, + }; use tokio::macros::support::poll_fn; #[tokio::test(flavor = "multi_thread")] @@ -117,7 +119,7 @@ mod tests { let local_addr = listener.local_address(); tokio::task::spawn(async move { - pin_mut!(listener); + let mut listener = pin!(listener); match poll_fn(|cx| listener.as_mut().poll(cx)).await { ListenerEvent::Incoming { .. } => {} _ => { diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 0d2a3340816de..d516625c64074 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -35,7 +35,7 @@ use crate::{ transactions::NetworkTransactionEvent, FetchClient, NetworkBuilder, }; -use futures::{pin_mut, Future, StreamExt}; +use futures::{Future, StreamExt}; use parking_lot::Mutex; use reth_eth_wire::{ capability::{Capabilities, CapabilityMessage}, @@ -53,7 +53,7 @@ use reth_tokio_util::EventListeners; use secp256k1::SecretKey; use std::{ net::SocketAddr, - pin::Pin, + pin::{pin, Pin}, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, @@ -902,7 +902,7 @@ where shutdown_hook: impl FnOnce(&mut Self), ) { let network = self; - pin_mut!(network, shutdown); + let mut network = pin!(network); let mut graceful_guard = None; tokio::select! { diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index ed0eadb4a6df5..04608745484aa 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -19,7 +19,7 @@ use jsonrpsee::{ use std::{ future::Future, io, - pin::Pin, + pin::{pin, Pin}, sync::Arc, task::{Context, Poll}, }; @@ -155,7 +155,7 @@ where let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize); let stopped = stop_handle.clone().shutdown(); - tokio::pin!(stopped); + let mut stopped = pin!(stopped); let (drop_on_completion, mut process_connection_awaiter) = mpsc::channel::<()>(1); @@ -223,7 +223,7 @@ where S: Future + Unpin, { let accept = listener.accept(); - tokio::pin!(accept); + let accept = pin!(accept); match futures_util::future::select(accept, stopped).await { Either::Left((res, stop)) => match res { @@ -506,11 +506,11 @@ async fn to_ipc_service( pending_calls: Default::default(), items: Default::default(), }; - tokio::pin!(conn, rx_item); - let stopped = stop_handle.shutdown(); - tokio::pin!(stopped); + let mut conn = pin!(conn); + let mut rx_item = pin!(rx_item); + let mut stopped = pin!(stopped); loop { tokio::select! { @@ -522,7 +522,7 @@ async fn to_ipc_service( conn.push_back(item); } } - _ = &mut stopped=> { + _ = &mut stopped => { // shutdown break } @@ -844,6 +844,7 @@ mod tests { PendingSubscriptionSink, RpcModule, SubscriptionMessage, }; use reth_tracing::init_test_tracing; + use std::pin::pin; use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; @@ -854,7 +855,8 @@ mod tests { let sink = pending.accept().await.unwrap(); let closed = sink.closed(); - futures::pin_mut!(closed, stream); + let mut closed = pin!(closed); + let mut stream = pin!(stream); loop { match select(closed, stream.next()).await { diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 0f93e5bc5f4b5..3e526a344f8c5 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -19,12 +19,12 @@ use crate::{ use dyn_clone::DynClone; use futures_util::{ future::{select, BoxFuture}, - pin_mut, Future, FutureExt, TryFutureExt, + Future, FutureExt, TryFutureExt, }; use std::{ any::Any, fmt::{Display, Formatter}, - pin::Pin, + pin::{pin, Pin}, sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -334,7 +334,7 @@ impl TaskExecutor { async move { // Create an instance of IncCounterOnDrop with the counter to increment let _inc_counter_on_drop = IncCounterOnDrop::new(finished_regular_tasks_metrics); - pin_mut!(fut); + let fut = pin!(fut); let _ = select(on_shutdown, fut).await; } } @@ -409,7 +409,7 @@ impl TaskExecutor { let task = async move { // Create an instance of IncCounterOnDrop with the counter to increment let _inc_counter_on_drop = IncCounterOnDrop::new(finished_critical_tasks_metrics); - pin_mut!(task); + let task = pin!(task); let _ = select(on_shutdown, task).await; }; From f83a872dd6045a27fc06a95c06363139570edecb Mon Sep 17 00:00:00 2001 From: alpharush <0xalpharush@protonmail.com> Date: Mon, 6 May 2024 05:16:27 -0500 Subject: [PATCH 479/700] feat: improve exex examples' validation (#8116) --- examples/exex/op-bridge/src/main.rs | 21 ++++++++++++++++++--- examples/exex/rollup/src/main.rs | 12 ++++++++---- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/examples/exex/op-bridge/src/main.rs b/examples/exex/op-bridge/src/main.rs index 0f48b0a5f43ed..02c87ba154bbe 100644 --- a/examples/exex/op-bridge/src/main.rs +++ b/examples/exex/op-bridge/src/main.rs @@ -3,7 +3,7 @@ use futures::Future; use reth_exex::{ExExContext, ExExEvent}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; -use reth_primitives::{Log, SealedBlockWithSenders, TransactionSigned}; +use reth_primitives::{address, Address, Log, SealedBlockWithSenders, TransactionSigned}; use reth_provider::Chain; use reth_tracing::tracing::info; use rusqlite::Connection; @@ -11,6 +11,15 @@ use rusqlite::Connection; sol!(L1StandardBridge, "l1_standard_bridge_abi.json"); use crate::L1StandardBridge::{ETHBridgeFinalized, ETHBridgeInitiated, L1StandardBridgeEvents}; +const OP_BRIDGES: [Address; 6] = [ + address!("3154Cf16ccdb4C6d922629664174b904d80F2C35"), + address!("3a05E5d33d7Ab3864D53aaEc93c8301C1Fa49115"), + address!("697402166Fbf2F22E970df8a6486Ef171dbfc524"), + address!("99C9fc46f92E8a1c0deC1b1747d010903E884bE1"), + address!("735aDBbE72226BD52e818E7181953f42E3b0FF21"), + address!("3B95bC951EE0f553ba487327278cAc44f29715E5"), +]; + /// Initializes the ExEx. /// /// Opens up a SQLite database and creates the tables (if they don't exist). @@ -213,8 +222,14 @@ fn decode_chain_into_events( .zip(receipts.iter().flatten()) .map(move |(tx, receipt)| (block, tx, receipt)) }) - // Get all logs - .flat_map(|(block, tx, receipt)| receipt.logs.iter().map(move |log| (block, tx, log))) + // Get all logs from expected bridge contracts + .flat_map(|(block, tx, receipt)| { + receipt + .logs + .iter() + .filter(|log| OP_BRIDGES.contains(&log.address)) + .map(move |log| (block, tx, log)) + }) // Decode and filter bridge events .filter_map(|(block, tx, log)| { L1StandardBridgeEvents::decode_raw_log(log.topics(), &log.data.data, true) diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs index f3e7f00bc67d6..f1af0c1ae0f23 100644 --- a/examples/exex/rollup/src/main.rs +++ b/examples/exex/rollup/src/main.rs @@ -243,10 +243,14 @@ fn decode_chain_into_rollup_events( .zip(receipts.iter().flatten()) .map(move |(tx, receipt)| (block, tx, receipt)) }) - // Filter only transactions to the rollup contract - .filter(|(_, tx, _)| tx.to() == Some(ROLLUP_CONTRACT_ADDRESS)) - // Get all logs - .flat_map(|(block, tx, receipt)| receipt.logs.iter().map(move |log| (block, tx, log))) + // Get all logs from rollup contract + .flat_map(|(block, tx, receipt)| { + receipt + .logs + .iter() + .filter(|log| log.address == ROLLUP_CONTRACT_ADDRESS) + .map(move |log| (block, tx, log)) + }) // Decode and filter rollup events .filter_map(|(block, tx, log)| { RollupContractEvents::decode_raw_log(log.topics(), &log.data.data, true) From 68920b830f21ebbd3b28797813876a4ba173bce7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 May 2024 13:08:20 +0200 Subject: [PATCH 480/700] feat: add exex for in memory state (#8108) --- Cargo.lock | 12 ++++++ Cargo.toml | 4 +- examples/README.md | 11 ++--- examples/exex/in-memory-state/Cargo.toml | 15 +++++++ examples/exex/in-memory-state/src/main.rs | 49 +++++++++++++++++++++++ 5 files changed, 83 insertions(+), 8 deletions(-) create mode 100644 examples/exex/in-memory-state/Cargo.toml create mode 100644 examples/exex/in-memory-state/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 4b33054916085..1dc869a93021a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2912,6 +2912,18 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "exex-in-memory-state" +version = "0.0.0" +dependencies = [ + "eyre", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-ethereum", + "reth-tracing", +] + [[package]] name = "exex-minimal" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 55ff517205965..7eed06b7f76d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,9 +87,7 @@ members = [ "examples/txpool-tracing/", "examples/polygon-p2p/", "examples/custom-inspector/", - "examples/exex/minimal/", - "examples/exex/op-bridge/", - "examples/exex/rollup/", + "examples/exex/*", "examples/db-access", "testing/ef-tests/", "testing/testing-utils", diff --git a/examples/README.md b/examples/README.md index 0885aa294e7ab..4c135f880feb8 100644 --- a/examples/README.md +++ b/examples/README.md @@ -23,11 +23,12 @@ to make a PR! ## ExEx -| Example | Description | -| ---------------------------------- | --------------------------------------------------------------------------------- | -| [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | -| [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | -| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | +| Example | Description | +|-------------------------------------------|-----------------------------------------------------------------------------------| +| [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | +| [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | +| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | +| [In Memory State](./exex/in-memory-state) | Illustrates an ExEx that tracks the plain state in memory | ## RPC diff --git a/examples/exex/in-memory-state/Cargo.toml b/examples/exex/in-memory-state/Cargo.toml new file mode 100644 index 0000000000000..c7fd34ea59eda --- /dev/null +++ b/examples/exex/in-memory-state/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "exex-in-memory-state" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-exex.workspace = true +reth-node-api.workspace = true +reth-node-ethereum.workspace = true +reth-tracing.workspace = true + +eyre.workspace = true diff --git a/examples/exex/in-memory-state/src/main.rs b/examples/exex/in-memory-state/src/main.rs new file mode 100644 index 0000000000000..451bb9c429247 --- /dev/null +++ b/examples/exex/in-memory-state/src/main.rs @@ -0,0 +1,49 @@ +#![warn(unused_crate_dependencies)] + +use reth::providers::BundleStateWithReceipts; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::FullNodeComponents; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +/// An ExEx that keeps track of the entire state in memory +async fn track_state(mut ctx: ExExContext) -> eyre::Result<()> { + // keeps the entire plain state of the chain in memory + let mut state = BundleStateWithReceipts::default(); + + while let Some(notification) = ctx.notifications.recv().await { + match ¬ification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + // revert to block before the reorg + state.revert_to(new.first().number - 1); + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + state.revert_to(old.first().number - 1); + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + + if let Some(committed_chain) = notification.committed_chain() { + // extend the state with the new chain + state.extend(committed_chain.state().clone()); + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + Ok(()) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("in-memory-state", |ctx| async move { Ok(track_state(ctx)) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} From 00a02f5b5c4c6d36d9d6f38fd445311a7d9da0b5 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 6 May 2024 13:14:57 +0200 Subject: [PATCH 481/700] chore: remote `try_` prefix from block to payload conversion methods (#8117) --- crates/consensus/beacon/src/engine/mod.rs | 14 +++++++------- crates/ethereum/engine-primitives/src/payload.rs | 4 ++-- crates/payload/optimism/src/payload.rs | 4 ++-- crates/rpc/rpc-builder/tests/it/auth.rs | 4 ++-- crates/rpc/rpc-engine-api/tests/it/payload.rs | 8 ++++---- crates/rpc/rpc-types-compat/src/engine/mod.rs | 2 +- .../rpc/rpc-types-compat/src/engine/payload.rs | 16 ++++++++-------- 7 files changed, 26 insertions(+), 26 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 3e12c5f8e3a89..d3c5bfe09d6d3 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1906,7 +1906,7 @@ mod tests { use reth_primitives::{stage::StageCheckpoint, ChainSpecBuilder, MAINNET}; use reth_provider::{BlockWriter, ProviderFactory}; use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; - use reth_rpc_types_compat::engine::payload::try_block_to_payload_v1; + use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; use std::{collections::VecDeque, sync::Arc}; use tokio::sync::oneshot::error::TryRecvError; @@ -1968,7 +1968,7 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(try_block_to_payload_v1(SealedBlock::default()), None).await; + let _ = env.send_new_payload(block_to_payload_v1(SealedBlock::default()), None).await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); @@ -2425,7 +2425,7 @@ mod tests { // Send new payload let res = env .send_new_payload( - try_block_to_payload_v1(random_block(&mut rng, 0, None, None, Some(0))), + block_to_payload_v1(random_block(&mut rng, 0, None, None, Some(0))), None, ) .await; @@ -2436,7 +2436,7 @@ mod tests { // Send new payload let res = env .send_new_payload( - try_block_to_payload_v1(random_block(&mut rng, 1, None, None, Some(0))), + block_to_payload_v1(random_block(&mut rng, 1, None, None, Some(0))), None, ) .await; @@ -2492,7 +2492,7 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(try_block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) .await .unwrap(); @@ -2606,7 +2606,7 @@ mod tests { // Send new payload let parent = rng.gen(); let block = random_block(&mut rng, 2, Some(parent), None, Some(0)); - let res = env.send_new_payload(try_block_to_payload_v1(block), None).await; + let res = env.send_new_payload(block_to_payload_v1(block), None).await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2673,7 +2673,7 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(try_block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) .await .unwrap(); diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 264355ac22a83..55a97c96ddc51 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -11,7 +11,7 @@ use reth_rpc_types::engine::{ PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, try_block_to_payload_v1, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use std::convert::Infallible; @@ -91,7 +91,7 @@ impl<'a> BuiltPayload for &'a EthBuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: EthBuiltPayload) -> Self { - try_block_to_payload_v1(value.block) + block_to_payload_v1(value.block) } } diff --git a/crates/payload/optimism/src/payload.rs b/crates/payload/optimism/src/payload.rs index b90d05d5f7e85..9cd47ef4256f9 100644 --- a/crates/payload/optimism/src/payload.rs +++ b/crates/payload/optimism/src/payload.rs @@ -16,7 +16,7 @@ use reth_rpc_types::engine::{ OptimismPayloadAttributes, PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, try_block_to_payload_v1, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use revm::primitives::HandlerCfg; use std::sync::Arc; @@ -230,7 +230,7 @@ impl<'a> BuiltPayload for &'a OptimismBuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: OptimismBuiltPayload) -> Self { - try_block_to_payload_v1(value.block) + block_to_payload_v1(value.block) } } diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 4b95d11ed41c0..b5416bf67743e 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -8,7 +8,7 @@ use reth_rpc::JwtSecret; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_types::engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use reth_rpc_types_compat::engine::payload::{ - convert_block_to_payload_input_v2, try_block_to_payload_v1, + block_to_payload_v1, convert_block_to_payload_input_v2, }; #[allow(unused_must_use)] async fn test_basic_engine_calls(client: &C) @@ -17,7 +17,7 @@ where C: EngineApiClient, { let block = Block::default().seal_slow(); - EngineApiClient::new_payload_v1(client, try_block_to_payload_v1(block.clone())).await; + EngineApiClient::new_payload_v1(client, block_to_payload_v1(block.clone())).await; EngineApiClient::new_payload_v2(client, convert_block_to_payload_input_v2(block)).await; EngineApiClient::fork_choice_updated_v1(client, ForkchoiceState::default(), None).await; EngineApiClient::get_payload_v1(client, PayloadId::new([0, 0, 0, 0, 0, 0, 0, 0])).await; diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 0979af400cca9..22219584c7e14 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -13,8 +13,8 @@ use reth_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, }; use reth_rpc_types_compat::engine::payload::{ - convert_to_payload_body_v1, try_block_to_payload, try_block_to_payload_v1, - try_into_sealed_block, try_payload_v1_to_block, + block_to_payload, block_to_payload_v1, convert_to_payload_body_v1, try_into_sealed_block, + try_payload_v1_to_block, }; fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { @@ -23,7 +23,7 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi // Recalculate roots transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body); transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.ommers); - try_block_to_payload(SealedBlock { + block_to_payload(SealedBlock { header: transformed.header.seal_slow(), body: transformed.body, ommers: transformed.ommers, @@ -89,7 +89,7 @@ fn payload_validation() { ); // Invalid encoded transactions - let mut payload_with_invalid_txs: ExecutionPayloadV1 = try_block_to_payload_v1(block.clone()); + let mut payload_with_invalid_txs: ExecutionPayloadV1 = block_to_payload_v1(block.clone()); payload_with_invalid_txs.transactions.iter_mut().for_each(|tx| { *tx = Bytes::new().into(); diff --git a/crates/rpc/rpc-types-compat/src/engine/mod.rs b/crates/rpc/rpc-types-compat/src/engine/mod.rs index e14b8350051ca..aa7456250262c 100644 --- a/crates/rpc/rpc-types-compat/src/engine/mod.rs +++ b/crates/rpc/rpc-types-compat/src/engine/mod.rs @@ -1,3 +1,3 @@ //! Standalone functions for engine specific rpc type conversions pub mod payload; -pub use payload::{try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block}; +pub use payload::{block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block}; diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index fdacab4e6225a..3ab9a74b9717a 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -84,21 +84,21 @@ pub fn try_payload_v3_to_block(payload: ExecutionPayloadV3) -> Result ExecutionPayload { +pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 ExecutionPayload::V3(block_to_payload_v3(value)) } else if value.withdrawals.is_some() { // block with withdrawals: V2 - ExecutionPayload::V2(try_block_to_payload_v2(value)) + ExecutionPayload::V2(block_to_payload_v2(value)) } else { // otherwise V1 - ExecutionPayload::V1(try_block_to_payload_v1(value)) + ExecutionPayload::V1(block_to_payload_v1(value)) } } /// Converts [SealedBlock] to [ExecutionPayloadV1] -pub fn try_block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { +pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { let transactions = value.raw_transactions(); ExecutionPayloadV1 { parent_hash: value.parent_hash, @@ -119,7 +119,7 @@ pub fn try_block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { } /// Converts [SealedBlock] to [ExecutionPayloadV2] -pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { +pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { let transactions = value.raw_transactions(); ExecutionPayloadV2 { @@ -176,9 +176,9 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayloadFieldV2 { // if there are withdrawals, return V2 if value.withdrawals.is_some() { - ExecutionPayloadFieldV2::V2(try_block_to_payload_v2(value)) + ExecutionPayloadFieldV2::V2(block_to_payload_v2(value)) } else { - ExecutionPayloadFieldV2::V1(try_block_to_payload_v1(value)) + ExecutionPayloadFieldV2::V1(block_to_payload_v1(value)) } } @@ -205,7 +205,7 @@ pub fn convert_payload_input_v2_to_payload(value: ExecutionPayloadInputV2) -> Ex pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayloadInputV2 { ExecutionPayloadInputV2 { withdrawals: value.withdrawals.clone().map(Withdrawals::into_inner), - execution_payload: try_block_to_payload_v1(value), + execution_payload: block_to_payload_v1(value), } } From 1d9894fbee15bcfb7b3f7b630440db2397f1a674 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 6 May 2024 14:25:20 +0200 Subject: [PATCH 482/700] chore: bump alloy 17c5650 (#8118) --- Cargo.lock | 104 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 26 +++++++------- 2 files changed, 65 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1dc869a93021a..1a984e04de8be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "c-kzg", "serde", ] @@ -177,11 +177,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "arbitrary", "c-kzg", "derive_more", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "serde", "serde_json", ] @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "k256", "serde_json", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -366,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -426,24 +426,24 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "serde", ] [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -456,11 +456,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "serde", "serde_json", ] @@ -468,7 +468,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", "serde", @@ -488,7 +488,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", "async-trait", @@ -501,9 +501,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -576,7 +576,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -594,7 +594,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -2964,7 +2964,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6586,8 +6586,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "arbitrary", "bytes", @@ -6792,9 +6792,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7531,8 +7531,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7797,10 +7797,10 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7827,7 +7827,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7933,7 +7933,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "reth-primitives", "secp256k1", ] @@ -8067,10 +8067,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=42f01d0#42f01d08219f1b4fcb409b82377ec999919002de" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=089efac#089efacf72e7583630841b7027c46a3cb2f9c28b" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/Cargo.toml b/Cargo.toml index 7eed06b7f76d0..e48db171291d1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,7 +280,7 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "42f01d0" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "089efac" } # eth alloy-chains = "0.1.15" @@ -289,20 +289,20 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "0bb7604" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "17c5650" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } # misc auto_impl = "1" From 7fd091536f6f46d084fca460697035d558504981 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Mon, 6 May 2024 19:40:55 +0700 Subject: [PATCH 483/700] fix(rpc_server): remember to set ipc config for auth server (#8120) Co-authored-by: dzung --- crates/node-core/src/args/rpc_server.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node-core/src/args/rpc_server.rs index 1a60aa31af01d..e19a88737435d 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node-core/src/args/rpc_server.rs @@ -475,7 +475,9 @@ impl RethRpcConfig for RpcServerArgs { let mut builder = AuthServerConfig::builder(jwt_secret).socket_addr(address); if self.auth_ipc { - builder = builder.ipc_endpoint(self.auth_ipc_path.clone()); + builder = builder + .ipc_endpoint(self.auth_ipc_path.clone()) + .with_ipc_config(self.ipc_server_builder()); } Ok(builder.build()) } From 5e778317fb6ef0809fbd399a583ac6fb9befad2b Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Mon, 6 May 2024 18:37:25 +0530 Subject: [PATCH 484/700] convert ```OptimismBlockExecution``` error variant into a general purpose error variant (#8100) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 4 +- crates/ethereum/evm/src/execute.rs | 5 +- .../interfaces/src/blockchain_tree/error.rs | 3 +- crates/interfaces/src/error.rs | 2 +- crates/interfaces/src/executor.rs | 43 +++---- crates/optimism/evm/Cargo.toml | 1 + crates/optimism/evm/src/error.rs | 29 +++++ crates/optimism/evm/src/execute.rs | 26 ++--- crates/optimism/evm/src/l1.rs | 109 +++++++----------- crates/optimism/evm/src/lib.rs | 2 + crates/storage/provider/src/chain.rs | 2 +- 12 files changed, 111 insertions(+), 116 deletions(-) create mode 100644 crates/optimism/evm/src/error.rs diff --git a/Cargo.lock b/Cargo.lock index 1a984e04de8be..4fa6fc73d0a69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7007,6 +7007,7 @@ dependencies = [ "reth-revm", "revm", "revm-primitives", + "thiserror", "tracing", ] diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 5346eafbdc38e..64d311549dd01 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -2162,7 +2162,7 @@ mod tests { .assert(&tree); // unwind canonical - assert_eq!(tree.unwind(block1.number), Ok(())); + assert!(tree.unwind(block1.number).is_ok()); // Trie state: // b2 b2a (pending block) // / / @@ -2226,7 +2226,7 @@ mod tests { .assert(&tree); // update canonical block to b2, this would make b2a be removed - assert_eq!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12), Ok(())); + assert!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12).is_ok()); assert_eq!( tree.is_block_known(block2.num_hash()).unwrap(), diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index ff3a4e76d0d85..db361f35d54c0 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -523,9 +523,10 @@ mod tests { .expect_err( "Executing cancun block without parent beacon block root field should fail", ); + assert_eq!( - err, - BlockExecutionError::Validation(BlockValidationError::MissingParentBeaconBlockRoot) + err.as_validation().unwrap().clone(), + BlockValidationError::MissingParentBeaconBlockRoot ); // fix header, set a gas limit diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index b636985766a10..b805c6ee8e778 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -293,8 +293,7 @@ impl InsertBlockErrorKind { BlockExecutionError::CanonicalCommit { .. } | BlockExecutionError::AppendChainDoesntConnect { .. } | BlockExecutionError::UnavailableForTest => false, - #[cfg(feature = "optimism")] - BlockExecutionError::OptimismBlockExecution(_) => false, + BlockExecutionError::Other(_) => false, } } InsertBlockErrorKind::Tree(err) => { diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index df307ae091f75..38498c312ab1d 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -16,7 +16,7 @@ pub type RethResult = Result; /// This enum encapsulates various error types that can occur during blockchain interactions. /// /// It allows for structured error handling based on the nature of the encountered issue. -#[derive(Debug, thiserror::Error, Clone, PartialEq, Eq)] +#[derive(Debug, thiserror::Error)] pub enum RethError { /// Error encountered during block execution. #[error(transparent)] diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index e8f7f40b152ed..04b9832f092df 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -80,7 +80,7 @@ pub enum BlockValidationError { } /// BlockExecutor Errors -#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[derive(Error, Debug)] pub enum BlockExecutionError { /// Validation error, transparently wrapping `BlockValidationError` #[error(transparent)] @@ -118,35 +118,28 @@ pub enum BlockExecutionError { /// Error when fetching latest block state. #[error(transparent)] LatestBlock(#[from] ProviderError), - /// Optimism Block Executor Errors - #[cfg(feature = "optimism")] #[error(transparent)] - OptimismBlockExecution(#[from] OptimismBlockExecutionError), -} - -/// Optimism Block Executor Errors -#[cfg(feature = "optimism")] -#[derive(Error, Debug, Clone, PartialEq, Eq)] -pub enum OptimismBlockExecutionError { - /// Error when trying to parse L1 block info - #[error("could not get L1 block info from L2 block: {message:?}")] - L1BlockInfoError { - /// The inner error message - message: String, - }, - /// Thrown when force deploy of create2deployer code fails. - #[error("failed to force create2deployer account code")] - ForceCreate2DeployerFail, - /// Thrown when a blob transaction is included in a sequencer's block. - #[error("blob transaction included in sequencer block")] - BlobTransactionRejected, - /// Thrown when a database account could not be loaded. - #[error("failed to load account {0}")] - AccountLoadFailed(reth_primitives::Address), + Other(Box), } impl BlockExecutionError { + /// Create a new `BlockExecutionError::Other` variant. + pub fn other(error: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + Self::Other(Box::new(error)) + } + + /// Returns the inner `BlockValidationError` if the error is a validation error. + pub const fn as_validation(&self) -> Option<&BlockValidationError> { + match self { + Self::Validation(err) => Some(err), + _ => None, + } + } + /// Returns `true` if the error is fatal. /// /// This represents an unrecoverable database related error. diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 8e5afc5efce00..4e5fd2f19ff59 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -23,6 +23,7 @@ revm.workspace = true revm-primitives.workspace = true # misc +thiserror.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs new file mode 100644 index 0000000000000..de923d44ca4b8 --- /dev/null +++ b/crates/optimism/evm/src/error.rs @@ -0,0 +1,29 @@ +//! Error types for the Optimism EVM module. + +use reth_interfaces::executor::BlockExecutionError; + +/// Optimism Block Executor Errors +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum OptimismBlockExecutionError { + /// Error when trying to parse L1 block info + #[error("could not get L1 block info from L2 block: {message:?}")] + L1BlockInfoError { + /// The inner error message + message: String, + }, + /// Thrown when force deploy of create2deployer code fails. + #[error("failed to force create2deployer account code")] + ForceCreate2DeployerFail, + /// Thrown when a blob transaction is included in a sequencer's block. + #[error("blob transaction included in sequencer block")] + BlobTransactionRejected, + /// Thrown when a database account could not be loaded. + #[error("failed to load account {0}")] + AccountLoadFailed(reth_primitives::Address), +} + +impl From for BlockExecutionError { + fn from(err: OptimismBlockExecutionError) -> Self { + BlockExecutionError::other(err) + } +} diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index d19d441a8c06d..c6bb5c7cf2338 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,6 +1,9 @@ //! Optimism block executor. -use crate::{l1::ensure_create2_deployer, verify::verify_receipts, OptimismEvmConfig}; +use crate::{ + l1::ensure_create2_deployer, verify::verify_receipts, OptimismBlockExecutionError, + OptimismEvmConfig, +}; use reth_evm::{ execute::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, @@ -9,7 +12,7 @@ use reth_evm::{ ConfigureEvm, ConfigureEvmEnv, }; use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError, OptimismBlockExecutionError}, + executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, }; use reth_primitives::{ @@ -141,13 +144,8 @@ where // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()).map_err( - |_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::ForceCreate2DeployerFail, - ) - }, - )?; + ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) + .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.len()); @@ -167,9 +165,7 @@ where // An optimism block should never contain blob transactions. if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::BlobTransactionRejected, - )); + return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()); } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -184,11 +180,7 @@ where .map(|acc| acc.account_info().unwrap_or_default()) }) .transpose() - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::AccountLoadFailed(*sender), - ) - })?; + .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; let mut buf = Vec::with_capacity(transaction.length_without_header()); transaction.encode_enveloped(&mut buf); diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 896cbc36ada5e..7b605448ff309 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,9 +1,7 @@ //! Optimism-specific implementation and utilities for the executor -use reth_interfaces::{ - executor::{self as reth_executor, BlockExecutionError}, - RethError, -}; +use crate::OptimismBlockExecutionError; +use reth_interfaces::{executor::BlockExecutionError, RethError}; use reth_primitives::{address, b256, hex, Address, Block, Bytes, ChainSpec, Hardfork, B256, U256}; use revm::{ primitives::{Bytecode, HashMap, SpecId}, @@ -29,20 +27,19 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. -pub fn extract_l1_info(block: &Block) -> Result { +pub fn extract_l1_info(block: &Block) -> Result { let l1_info_tx_data = block .body .first() - .ok_or(reth_executor::OptimismBlockExecutionError::L1BlockInfoError { + .ok_or(OptimismBlockExecutionError::L1BlockInfoError { message: "could not find l1 block info tx in the L2 block".to_string(), }) .map(|tx| tx.input())?; if l1_info_tx_data.len() < 4 { - return Err(reth_executor::OptimismBlockExecutionError::L1BlockInfoError { + return Err(OptimismBlockExecutionError::L1BlockInfoError { message: "invalid l1 block info transaction calldata in the L2 block".to_string(), - } - .into()) + }) } // If the first 4 bytes of the calldata are the L1BlockInfoEcotone selector, then we parse the @@ -56,7 +53,7 @@ pub fn extract_l1_info(block: &Block) -> Result Result { +pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result { // The setL1BlockValues tx calldata must be exactly 260 bytes long, considering that // we already removed the first 4 bytes (the function selector). Detailed breakdown: // 32 bytes for the block number @@ -68,33 +65,25 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result Result { +pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { if data.len() != 160 { - return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "unexpected l1 block info tx calldata length found".to_string(), - }, - )) + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "unexpected l1 block info tx calldata length found".to_string(), + }) } let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[8..12]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee scalar".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 blob base fee scalar".to_string(), + }, )?; let l1_base_fee_scalar = U256::try_from_be_slice(&data[12..16]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 base fee scalar".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 base fee scalar".to_string(), + }, )?; let l1_base_fee = U256::try_from_be_slice(&data[32..64]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 blob base fee".to_string(), + }, )?; let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 blob base fee".to_string(), + }, )?; let mut l1block = L1BlockInfo::default(); @@ -216,11 +195,10 @@ impl RethL1BlockInfo for L1BlockInfo { } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { - return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "Optimism hardforks are not active".to_string(), - }, - )) + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "Optimism hardforks are not active".to_string(), + } + .into()) }; Ok(self.calculate_tx_l1_cost(input, spec_id)) } @@ -236,11 +214,10 @@ impl RethL1BlockInfo for L1BlockInfo { } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { - return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "Optimism hardforks are not active".to_string(), - }, - )) + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "Optimism hardforks are not active".to_string(), + } + .into()) }; Ok(self.data_gas(input, spec_id)) } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index c51265983e985..748eeab7b3726 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -22,7 +22,9 @@ pub use execute::*; pub mod l1; pub use l1::*; +mod error; pub mod verify; +pub use error::OptimismBlockExecutionError; /// Optimism-related EVM configuration. #[derive(Debug, Default, Clone, Copy)] diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index a596d93eace4d..9b9c66d4bc6b4 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -498,7 +498,7 @@ mod tests { let chain2 = Chain { blocks: BTreeMap::from([(3, block3), (4, block4)]), ..Default::default() }; - assert_eq!(chain1.append_chain(chain2.clone()), Ok(())); + assert!(chain1.append_chain(chain2.clone()).is_ok()); // chain1 got changed so this will fail assert!(chain1.append_chain(chain2).is_err()); From b77473cfab52cd6aae06834df59dbc0cd8edee7f Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 6 May 2024 16:54:28 +0200 Subject: [PATCH 485/700] fix: add checks for hardfork-specific fields to ensure_well_formed_payload (#7993) --- crates/payload/validator/src/lib.rs | 45 ++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index c3b25aef90ecd..6b95b0425763b 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -38,6 +38,12 @@ impl ExecutionPayloadValidator { self.chain_spec().is_cancun_active_at_timestamp(timestamp) } + /// Returns true if the Shanghai hardfork is active at the given timestamp. + #[inline] + fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { + self.chain_spec().is_shanghai_active_at_timestamp(timestamp) + } + /// Cancun specific checks for EIP-4844 blob transactions. /// /// Ensures that the number of blob versioned hashes matches the number hashes included in the @@ -114,11 +120,42 @@ impl ExecutionPayloadValidator { }) } - let cancun_active = self.is_cancun_active_at_timestamp(sealed_block.timestamp); + if self.is_cancun_active_at_timestamp(sealed_block.timestamp) { + if sealed_block.header.blob_gas_used.is_none() { + // cancun active but blob gas used not present + return Err(PayloadError::PostCancunBlockWithoutBlobGasUsed) + } + if sealed_block.header.excess_blob_gas.is_none() { + // cancun active but excess blob gas not present + return Err(PayloadError::PostCancunBlockWithoutExcessBlobGas) + } + if cancun_fields.as_ref().is_none() { + // cancun active but cancun fields not present + return Err(PayloadError::PostCancunWithoutCancunFields) + } + } else { + if sealed_block.has_blob_transactions() { + // cancun not active but blob transactions present + return Err(PayloadError::PreCancunBlockWithBlobTransactions) + } + if sealed_block.header.blob_gas_used.is_some() { + // cancun not active but blob gas used present + return Err(PayloadError::PreCancunBlockWithBlobGasUsed) + } + if sealed_block.header.excess_blob_gas.is_some() { + // cancun not active but excess blob gas present + return Err(PayloadError::PreCancunBlockWithExcessBlobGas) + } + if cancun_fields.as_ref().is_some() { + // cancun not active but cancun fields present + return Err(PayloadError::PreCancunWithCancunFields) + } + } - if !cancun_active && sealed_block.has_blob_transactions() { - // cancun not active but blob transactions present - return Err(PayloadError::PreCancunBlockWithBlobTransactions) + let shanghai_active = self.is_shanghai_active_at_timestamp(sealed_block.timestamp); + if !shanghai_active && sealed_block.withdrawals.is_some() { + // shanghai not active but withdrawals present + return Err(PayloadError::PreShanghaiBlockWithWitdrawals); } // EIP-4844 checks From 614e1bccd0cfdd723bee1fcfd4577c9af23f8c1a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 May 2024 17:00:35 +0200 Subject: [PATCH 486/700] chore: move NodeRecord type (#8121) --- Cargo.lock | 12 +- Cargo.toml | 1 + crates/net/types/Cargo.toml | 15 +- crates/net/types/src/lib.rs | 8 +- crates/net/types/src/node_record.rs | 362 ++++++++++++++++++++++++++++ crates/primitives/Cargo.toml | 1 + crates/primitives/src/net.rs | 2 +- crates/rpc/rpc-types/Cargo.toml | 15 +- crates/rpc/rpc-types/src/lib.rs | 1 + crates/rpc/rpc-types/src/net.rs | 357 --------------------------- crates/rpc/rpc-types/src/peer.rs | 5 - 11 files changed, 391 insertions(+), 388 deletions(-) create mode 100644 crates/net/types/src/node_record.rs diff --git a/Cargo.lock b/Cargo.lock index 4fa6fc73d0a69..14a5dd752bbb0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7229,10 +7229,14 @@ name = "reth-network-types" version = "0.2.0-beta.6" dependencies = [ "alloy-primitives", + "alloy-rlp", "enr", - "reth-rpc-types", + "rand 0.8.5", "secp256k1", + "serde_json", "serde_with", + "thiserror", + "url", ] [[package]] @@ -7558,6 +7562,7 @@ dependencies = [ "rayon", "reth-codecs", "reth-ethereum-forks", + "reth-network-types", "reth-rpc-types", "revm", "revm-primitives", @@ -7798,29 +7803,24 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", - "alloy-rlp", "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", "arbitrary", "bytes", - "enr", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", "proptest", "proptest-derive", "rand 0.8.5", - "secp256k1", "serde", "serde_json", "serde_with", "similar-asserts", "thiserror", - "url", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e48db171291d1..dc693e94fb5fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -338,6 +338,7 @@ smallvec = "1" dyn-clone = "1.0.17" sha2 = { version = "0.10", default-features = false } paste = "1.0" +url = "2.3" # proc-macros proc-macro2 = "1.0" diff --git a/crates/net/types/Cargo.toml b/crates/net/types/Cargo.toml index 9092236b1a1a7..9be9a2f3a2289 100644 --- a/crates/net/types/Cargo.toml +++ b/crates/net/types/Cargo.toml @@ -12,17 +12,22 @@ description = "Network types and utils" workspace = true [dependencies] -# reth -reth-rpc-types.workspace = true - -alloy-primitives.workspace = true # eth +alloy-primitives = { workspace = true, features = ["rlp"] } +alloy-rlp = { workspace = true, features = ["derive"] } enr.workspace = true # crypto -secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } +secp256k1.workspace = true # misc serde_with.workspace = true +thiserror.workspace = true +url.workspace = true +[dev-dependencies] +alloy-primitives = { workspace = true, features = ["rand"] } +rand.workspace = true +secp256k1 = { workspace = true, features = ["rand"] } +serde_json.workspace = true diff --git a/crates/net/types/src/lib.rs b/crates/net/types/src/lib.rs index 8d75af9333d8c..e4b9f28a4fdcf 100644 --- a/crates/net/types/src/lib.rs +++ b/crates/net/types/src/lib.rs @@ -11,12 +11,18 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_primitives::B512; use secp256k1::{constants::UNCOMPRESSED_PUBLIC_KEY_SIZE, PublicKey, SecretKey}; use std::{net::IpAddr, str::FromStr}; // Re-export PeerId for ease of use. pub use enr::Enr; -pub use reth_rpc_types::{NodeRecord, PeerId}; + +/// Alias for a peer identifier +pub type PeerId = B512; + +pub mod node_record; +pub use node_record::{NodeRecord, NodeRecordParseError}; /// This tag should be set to indicate to libsecp256k1 that the following bytes denote an /// uncompressed pubkey. diff --git a/crates/net/types/src/node_record.rs b/crates/net/types/src/node_record.rs new file mode 100644 index 0000000000000..5a6706201a73b --- /dev/null +++ b/crates/net/types/src/node_record.rs @@ -0,0 +1,362 @@ +//! Commonly used NodeRecord type for peers. + +use std::{ + fmt, + fmt::Write, + net::{IpAddr, Ipv4Addr, SocketAddr}, + num::ParseIntError, + str::FromStr, +}; + +use crate::{pk2id, PeerId}; +use alloy_rlp::{RlpDecodable, RlpEncodable}; +use enr::Enr; +use secp256k1::{SecretKey, SECP256K1}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; + +/// Represents a ENR in discovery. +/// +/// Note: this is only an excerpt of the [`NodeRecord`] data structure. +#[derive( + Clone, + Copy, + Debug, + Eq, + PartialEq, + Hash, + SerializeDisplay, + DeserializeFromStr, + RlpEncodable, + RlpDecodable, +)] +pub struct NodeRecord { + /// The Address of a node. + pub address: IpAddr, + /// TCP port of the port that accepts connections. + pub tcp_port: u16, + /// UDP discovery port. + pub udp_port: u16, + /// Public key of the discovery service + pub id: PeerId, +} + +impl NodeRecord { + /// Derive the [`NodeRecord`] from the secret key and addr + pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self { + let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk); + let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); + Self::new(addr, id) + } + + /// Converts the `address` into an [`Ipv4Addr`] if the `address` is a mapped + /// [Ipv6Addr](std::net::Ipv6Addr). + /// + /// Returns `true` if the address was converted. + /// + /// See also [std::net::Ipv6Addr::to_ipv4_mapped] + pub fn convert_ipv4_mapped(&mut self) -> bool { + // convert IPv4 mapped IPv6 address + if let IpAddr::V6(v6) = self.address { + if let Some(v4) = v6.to_ipv4_mapped() { + self.address = v4.into(); + return true + } + } + false + } + + /// Same as [Self::convert_ipv4_mapped] but consumes the type + pub fn into_ipv4_mapped(mut self) -> Self { + self.convert_ipv4_mapped(); + self + } + + /// Creates a new record from a socket addr and peer id. + #[allow(dead_code)] + pub fn new(addr: SocketAddr, id: PeerId) -> Self { + Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id } + } + + /// The TCP socket address of this node + #[must_use] + pub fn tcp_addr(&self) -> SocketAddr { + SocketAddr::new(self.address, self.tcp_port) + } + + /// The UDP socket address of this node + #[must_use] + pub fn udp_addr(&self) -> SocketAddr { + SocketAddr::new(self.address, self.udp_port) + } +} + +impl fmt::Display for NodeRecord { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("enode://")?; + alloy_primitives::hex::encode(self.id.as_slice()).fmt(f)?; + f.write_char('@')?; + match self.address { + IpAddr::V4(ip) => { + ip.fmt(f)?; + } + IpAddr::V6(ip) => { + // encapsulate with brackets + f.write_char('[')?; + ip.fmt(f)?; + f.write_char(']')?; + } + } + f.write_char(':')?; + self.tcp_port.fmt(f)?; + if self.tcp_port != self.udp_port { + f.write_str("?discport=")?; + self.udp_port.fmt(f)?; + } + + Ok(()) + } +} + +/// Possible error types when parsing a [`NodeRecord`] +#[derive(Debug, thiserror::Error)] +pub enum NodeRecordParseError { + /// Invalid url + #[error("Failed to parse url: {0}")] + InvalidUrl(String), + /// Invalid id + #[error("Failed to parse id")] + InvalidId(String), + /// Invalid discport + #[error("Failed to discport query: {0}")] + Discport(ParseIntError), +} + +impl FromStr for NodeRecord { + type Err = NodeRecordParseError; + + fn from_str(s: &str) -> Result { + use url::{Host, Url}; + + let url = Url::parse(s).map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?; + + let address = match url.host() { + Some(Host::Ipv4(ip)) => IpAddr::V4(ip), + Some(Host::Ipv6(ip)) => IpAddr::V6(ip), + Some(Host::Domain(ip)) => IpAddr::V4( + Ipv4Addr::from_str(ip) + .map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?, + ), + _ => return Err(NodeRecordParseError::InvalidUrl(format!("invalid host: {url:?}"))), + }; + let port = url + .port() + .ok_or_else(|| NodeRecordParseError::InvalidUrl("no port specified".to_string()))?; + + let udp_port = if let Some(discovery_port) = url + .query_pairs() + .find_map(|(maybe_disc, port)| (maybe_disc.as_ref() == "discport").then_some(port)) + { + discovery_port.parse::().map_err(NodeRecordParseError::Discport)? + } else { + port + }; + + let id = url + .username() + .parse::() + .map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?; + + Ok(Self { address, id, tcp_port: port, udp_port }) + } +} + +impl TryFrom<&Enr> for NodeRecord { + type Error = NodeRecordParseError; + + fn try_from(enr: &Enr) -> Result { + let Some(address) = enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from)) + else { + return Err(NodeRecordParseError::InvalidUrl("ip missing".to_string())) + }; + + let Some(udp_port) = enr.udp4().or_else(|| enr.udp6()) else { + return Err(NodeRecordParseError::InvalidUrl("udp port missing".to_string())) + }; + + let Some(tcp_port) = enr.tcp4().or_else(|| enr.tcp6()) else { + return Err(NodeRecordParseError::InvalidUrl("tcp port missing".to_string())) + }; + + let id = pk2id(&enr.public_key()); + + Ok(NodeRecord { address, tcp_port, udp_port, id }.into_ipv4_mapped()) + } +} + +#[cfg(test)] +mod tests { + use std::net::Ipv6Addr; + + use alloy_rlp::Decodable; + use rand::{thread_rng, Rng, RngCore}; + + use super::*; + + #[test] + fn test_mapped_ipv6() { + let mut rng = thread_rng(); + + let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); + let v6 = v4.to_ipv6_mapped(); + + let record = NodeRecord { + address: v6.into(), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + assert!(record.clone().convert_ipv4_mapped()); + assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); + } + + #[test] + fn test_mapped_ipv4() { + let mut rng = thread_rng(); + let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); + + let record = NodeRecord { + address: v4.into(), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + assert!(!record.clone().convert_ipv4_mapped()); + assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); + } + + #[test] + fn test_noderecord_codec_ipv4() { + let mut rng = thread_rng(); + for _ in 0..100 { + let mut ip = [0u8; 4]; + rng.fill_bytes(&mut ip); + let record = NodeRecord { + address: IpAddr::V4(ip.into()), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); + assert_eq!(record, decoded); + } + } + + #[test] + fn test_noderecord_codec_ipv6() { + let mut rng = thread_rng(); + for _ in 0..100 { + let mut ip = [0u8; 16]; + rng.fill_bytes(&mut ip); + let record = NodeRecord { + address: IpAddr::V6(ip.into()), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); + assert_eq!(record, decoded); + } + } + + #[test] + fn test_url_parse() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(node, NodeRecord { + address: IpAddr::V4([10,3,58,6].into()), + tcp_port: 30303, + udp_port: 30301, + id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), + }) + } + + #[test] + fn test_node_display() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(url, &format!("{node}")); + } + + #[test] + fn test_node_display_discport() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(url, &format!("{node}")); + } + + #[test] + fn test_node_serialize() { + let cases = vec![ + // IPv4 + ( + NodeRecord { + address: IpAddr::V4([10, 3, 58, 6].into()), + tcp_port: 30303u16, + udp_port: 30301u16, + id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), + }, + "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"" + ), + // IPv6 + ( + NodeRecord { + address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), + tcp_port: 52150u16, + udp_port: 52151u16, + id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), + }, + "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", + ) + ]; + + for (node, expected) in cases { + let ser = serde_json::to_string::(&node).expect("couldn't serialize"); + assert_eq!(ser, expected); + } + } + + #[test] + fn test_node_deserialize() { + let cases = vec![ + // IPv4 + ( + "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"", + NodeRecord { + address: IpAddr::V4([10, 3, 58, 6].into()), + tcp_port: 30303u16, + udp_port: 30301u16, + id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), + } + ), + // IPv6 + ( + "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", + NodeRecord { + address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), + tcp_port: 52150u16, + udp_port: 52151u16, + id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), + } + ), + ]; + + for (url, expected) in cases { + let node: NodeRecord = serde_json::from_str(url).expect("couldn't deserialize"); + assert_eq!(node, expected); + } + } +} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index deaee23006835..675c7167f6e0c 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-codecs.workspace = true reth-ethereum-forks.workspace = true +reth-network-types.workspace = true reth-rpc-types.workspace = true revm.workspace = true revm-primitives = { workspace = true, features = ["serde"] } diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index 068e47e5b25ec..778e2658bc435 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -1,4 +1,4 @@ -pub use reth_rpc_types::{NodeRecord, NodeRecordParseError}; +pub use reth_network_types::{NodeRecord, NodeRecordParseError}; // Ethereum bootnodes come from // OP bootnodes come from diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 5f87e9482daa7..1426b50f868e3 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -12,8 +12,8 @@ description = "Reth RPC types" workspace = true [dependencies] + # ethereum -alloy-rlp = { workspace = true, features = ["arrayvec", "derive"] } alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde"] } alloy-rpc-types = { workspace = true, features = ["jsonrpsee-types"] } alloy-rpc-types-anvil.workspace = true @@ -21,8 +21,6 @@ alloy-rpc-types-trace.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } ethereum_ssz_derive = { version = "0.5", optional = true } ethereum_ssz = { version = "0.5", optional = true } -alloy-genesis.workspace = true -enr = { workspace = true, features = ["serde", "rust-secp256k1"] } # misc thiserror.workspace = true @@ -30,19 +28,10 @@ serde = { workspace = true, features = ["derive"] } serde_with = "3.3" serde_json.workspace = true jsonrpsee-types = { workspace = true, optional = true } -url = "2.3" -# necessary so we don't hit a "undeclared 'std'": -# https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 -secp256k1.workspace = true - -# arbitrary -arbitrary = { workspace = true, features = ["derive"], optional = true } -proptest = { workspace = true, optional = true } -proptest-derive = { workspace = true, optional = true } [features] default = ["jsonrpsee-types"] -arbitrary = ["dep:arbitrary", "dep:proptest-derive", "dep:proptest", "alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] +arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] ssz = ["dep:ethereum_ssz" ,"dep:ethereum_ssz_derive", "alloy-primitives/ssz", "alloy-rpc-types/ssz", "alloy-rpc-types-engine/ssz"] diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 0adcab0f3301c..01ed0f911097f 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -8,6 +8,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] pub mod beacon; mod eth; diff --git a/crates/rpc/rpc-types/src/net.rs b/crates/rpc/rpc-types/src/net.rs index d72d00fa5e917..b434bcbf84933 100644 --- a/crates/rpc/rpc-types/src/net.rs +++ b/crates/rpc/rpc-types/src/net.rs @@ -1,19 +1,5 @@ -use crate::{pk_to_id, PeerId}; -use alloy_rlp::{RlpDecodable, RlpEncodable}; use alloy_rpc_types::admin::EthProtocolInfo; -use enr::Enr; -use secp256k1::{SecretKey, SECP256K1}; use serde::{Deserialize, Serialize}; -use serde_with::{DeserializeFromStr, SerializeDisplay}; -use std::{ - fmt, - fmt::Write, - net::{IpAddr, Ipv4Addr, SocketAddr}, - num::ParseIntError, - str::FromStr, -}; -use thiserror::Error; -use url::{Host, Url}; /// The status of the network being ran by the local node. #[derive(Clone, Debug, Serialize, Deserialize)] @@ -25,346 +11,3 @@ pub struct NetworkStatus { /// Information about the Ethereum Wire Protocol. pub eth_protocol_info: EthProtocolInfo, } - -/// Represents a ENR in discovery. -/// -/// Note: this is only an excerpt of the [`NodeRecord`] data structure. -#[derive( - Clone, - Copy, - Debug, - Eq, - PartialEq, - Hash, - SerializeDisplay, - DeserializeFromStr, - RlpEncodable, - RlpDecodable, -)] -pub struct NodeRecord { - /// The Address of a node. - pub address: IpAddr, - /// TCP port of the port that accepts connections. - pub tcp_port: u16, - /// UDP discovery port. - pub udp_port: u16, - /// Public key of the discovery service - pub id: PeerId, -} - -impl NodeRecord { - /// Derive the [`NodeRecord`] from the secret key and addr - pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self { - let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk); - let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); - Self::new(addr, id) - } - - /// Converts the `address` into an [`Ipv4Addr`] if the `address` is a mapped - /// [Ipv6Addr](std::net::Ipv6Addr). - /// - /// Returns `true` if the address was converted. - /// - /// See also [std::net::Ipv6Addr::to_ipv4_mapped] - pub fn convert_ipv4_mapped(&mut self) -> bool { - // convert IPv4 mapped IPv6 address - if let IpAddr::V6(v6) = self.address { - if let Some(v4) = v6.to_ipv4_mapped() { - self.address = v4.into(); - return true - } - } - false - } - - /// Same as [Self::convert_ipv4_mapped] but consumes the type - pub fn into_ipv4_mapped(mut self) -> Self { - self.convert_ipv4_mapped(); - self - } - - /// Creates a new record from a socket addr and peer id. - #[allow(dead_code)] - pub fn new(addr: SocketAddr, id: PeerId) -> Self { - Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id } - } - - /// The TCP socket address of this node - #[must_use] - pub fn tcp_addr(&self) -> SocketAddr { - SocketAddr::new(self.address, self.tcp_port) - } - - /// The UDP socket address of this node - #[must_use] - pub fn udp_addr(&self) -> SocketAddr { - SocketAddr::new(self.address, self.udp_port) - } -} - -impl fmt::Display for NodeRecord { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("enode://")?; - alloy_primitives::hex::encode(self.id.as_slice()).fmt(f)?; - f.write_char('@')?; - match self.address { - IpAddr::V4(ip) => { - ip.fmt(f)?; - } - IpAddr::V6(ip) => { - // encapsulate with brackets - f.write_char('[')?; - ip.fmt(f)?; - f.write_char(']')?; - } - } - f.write_char(':')?; - self.tcp_port.fmt(f)?; - if self.tcp_port != self.udp_port { - f.write_str("?discport=")?; - self.udp_port.fmt(f)?; - } - - Ok(()) - } -} - -/// Possible error types when parsing a [`NodeRecord`] -#[derive(Debug, Error)] -pub enum NodeRecordParseError { - /// Invalid url - #[error("Failed to parse url: {0}")] - InvalidUrl(String), - /// Invalid id - #[error("Failed to parse id")] - InvalidId(String), - /// Invalid discport - #[error("Failed to discport query: {0}")] - Discport(ParseIntError), -} - -impl FromStr for NodeRecord { - type Err = NodeRecordParseError; - - fn from_str(s: &str) -> Result { - let url = Url::parse(s).map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?; - - let address = match url.host() { - Some(Host::Ipv4(ip)) => IpAddr::V4(ip), - Some(Host::Ipv6(ip)) => IpAddr::V6(ip), - Some(Host::Domain(ip)) => IpAddr::V4( - Ipv4Addr::from_str(ip) - .map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?, - ), - _ => return Err(NodeRecordParseError::InvalidUrl(format!("invalid host: {url:?}"))), - }; - let port = url - .port() - .ok_or_else(|| NodeRecordParseError::InvalidUrl("no port specified".to_string()))?; - - let udp_port = if let Some(discovery_port) = url - .query_pairs() - .find_map(|(maybe_disc, port)| (maybe_disc.as_ref() == "discport").then_some(port)) - { - discovery_port.parse::().map_err(NodeRecordParseError::Discport)? - } else { - port - }; - - let id = url - .username() - .parse::() - .map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?; - - Ok(Self { address, id, tcp_port: port, udp_port }) - } -} - -impl TryFrom<&Enr> for NodeRecord { - type Error = NodeRecordParseError; - - fn try_from(enr: &Enr) -> Result { - let Some(address) = enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from)) - else { - return Err(NodeRecordParseError::InvalidUrl("ip missing".to_string())) - }; - - let Some(udp_port) = enr.udp4().or_else(|| enr.udp6()) else { - return Err(NodeRecordParseError::InvalidUrl("udp port missing".to_string())) - }; - - let Some(tcp_port) = enr.tcp4().or_else(|| enr.tcp6()) else { - return Err(NodeRecordParseError::InvalidUrl("tcp port missing".to_string())) - }; - - let id = pk_to_id(&enr.public_key()); - - Ok(NodeRecord { address, tcp_port, udp_port, id }.into_ipv4_mapped()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_rlp::Decodable; - use rand::{thread_rng, Rng, RngCore}; - use std::net::Ipv6Addr; - - #[test] - fn test_mapped_ipv6() { - let mut rng = thread_rng(); - - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - let v6 = v4.to_ipv6_mapped(); - - let record = NodeRecord { - address: v6.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_mapped_ipv4() { - let mut rng = thread_rng(); - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - - let record = NodeRecord { - address: v4.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(!record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_noderecord_codec_ipv4() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 4]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V4(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_noderecord_codec_ipv6() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 16]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V6(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_url_parse() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(node, NodeRecord { - address: IpAddr::V4([10,3,58,6].into()), - tcp_port: 30303, - udp_port: 30301, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - }) - } - - #[test] - fn test_node_display() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_display_discport() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_serialize() { - let cases = vec![ - // IPv4 - ( - NodeRecord{ - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), - }, - "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"" - ), - // IPv6 - ( - NodeRecord{ - address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), - tcp_port: 52150u16, - udp_port: 52151u16, - id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), - }, - "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", - ) - ]; - - for (node, expected) in cases { - let ser = serde_json::to_string::(&node).expect("couldn't serialize"); - assert_eq!(ser, expected); - } - } - - #[test] - fn test_node_deserialize() { - let cases = vec![ - // IPv4 - ( - "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"", - NodeRecord{ - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), - } - ), - // IPv6 - ( - "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", - NodeRecord{ - address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), - tcp_port: 52150u16, - udp_port: 52151u16, - id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), - } - ), - ]; - - for (url, expected) in cases { - let node: NodeRecord = serde_json::from_str(url).expect("couldn't deserialize"); - assert_eq!(node, expected); - } - } -} diff --git a/crates/rpc/rpc-types/src/peer.rs b/crates/rpc/rpc-types/src/peer.rs index 44dbe5d71f24c..a07e61d00285d 100644 --- a/crates/rpc/rpc-types/src/peer.rs +++ b/crates/rpc/rpc-types/src/peer.rs @@ -2,8 +2,3 @@ use alloy_primitives::B512; /// Alias for a peer identifier pub type PeerId = B512; - -/// Converts a [`secp256k1::PublicKey`] to a [`PeerId`]. -pub fn pk_to_id(pk: &secp256k1::PublicKey) -> PeerId { - PeerId::from_slice(&pk.serialize_uncompressed()[1..]) -} From c70b17a5548c5893042f89614fd5095de8f236aa Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 6 May 2024 18:50:40 +0200 Subject: [PATCH 487/700] feat: prague engine api types (#8119) --- Cargo.lock | 154 +++++++++--------- Cargo.toml | 40 +++-- crates/engine-primitives/src/lib.rs | 43 ++++- crates/optimism/node/src/engine.rs | 2 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 65 +++++++- .../rpc-types-compat/src/engine/payload.rs | 14 +- crates/rpc/rpc-types/src/beacon/payload.rs | 4 + 7 files changed, 216 insertions(+), 106 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 14a5dd752bbb0..36fa163dde544 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -71,7 +71,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.14", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "c-kzg", "serde", ] @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -171,17 +171,17 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "arbitrary", "c-kzg", "derive_more", @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "serde", "serde_json", ] @@ -222,7 +222,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "k256", "serde_json", @@ -300,7 +300,7 @@ dependencies = [ "derive_arbitrary", "derive_more", "ethereum_ssz", - "getrandom 0.2.14", + "getrandom 0.2.15", "hex-literal", "itoa", "k256", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -366,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -426,24 +426,24 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "serde", ] [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -456,11 +456,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "serde", "serde_json", ] @@ -468,7 +468,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", "serde", @@ -478,7 +478,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-primitives", "serde", @@ -488,7 +488,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", "async-trait", @@ -501,9 +501,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -557,7 +557,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] @@ -576,7 +576,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -594,7 +594,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -1546,9 +1546,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" dependencies = [ "jobserver", "libc", @@ -2964,7 +2964,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -3318,9 +3318,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -4594,7 +4594,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.14", + "getrandom 0.2.15", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -6132,7 +6132,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.15", ] [[package]] @@ -6226,7 +6226,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.15", "libredox", "thiserror", ] @@ -6586,8 +6586,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "arbitrary", "bytes", @@ -6792,9 +6792,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7536,8 +7536,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7804,7 +7804,7 @@ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7828,7 +7828,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7934,7 +7934,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "reth-primitives", "secp256k1", ] @@ -8068,10 +8068,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=089efac#089efacf72e7583630841b7027c46a3cb2f9c28b" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=7d810bc#7d810bc44c08fe8ec90ebef556883c2531ebf111" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", @@ -8173,7 +8173,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.14", + "getrandom 0.2.15", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -8575,11 +8575,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -8588,9 +8588,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -9598,7 +9598,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] @@ -10060,7 +10060,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.15", ] [[package]] @@ -10463,9 +10463,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index dc693e94fb5fc..f8d3dcac2f631 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -278,9 +278,14 @@ reth-node-events = { path = "crates/node/events" } reth-testing-utils = { path = "testing/testing-utils" } # revm -revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } -revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "089efac" } +revm = { version = "8.0.0", features = [ + "std", + "secp256k1", +], default-features = false } +revm-primitives = { version = "3.1.0", features = [ + "std", +], default-features = false } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "7d810bc" } # eth alloy-chains = "0.1.15" @@ -289,20 +294,20 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "17c5650" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "c3ea7bc" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } # misc auto_impl = "1" @@ -377,7 +382,10 @@ secp256k1 = { version = "0.28", default-features = false, features = [ "recovery", ] } # TODO: Remove `k256` feature: https://github.com/sigp/enr/pull/74 -enr = { version = "0.12.0", default-features = false, features = ["k256", "rust-secp256k1"] } +enr = { version = "0.12.0", default-features = false, features = [ + "k256", + "rust-secp256k1", +] } # for eip-4844 c-kzg = "1.0.0" diff --git a/crates/engine-primitives/src/lib.rs b/crates/engine-primitives/src/lib.rs index e144d0fcd9f10..99edf521c0b62 100644 --- a/crates/engine-primitives/src/lib.rs +++ b/crates/engine-primitives/src/lib.rs @@ -115,6 +115,29 @@ pub fn validate_payload_timestamp( // the payload does not fall within the time frame of the Cancun fork. return Err(EngineObjectValidationError::UnsupportedFork) } + + let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); + if version == EngineApiMessageVersion::V4 && !is_prague { + // From the Engine API spec: + // + // + // For `engine_getPayloadV4`: + // + // 1. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of + // the built payload does not fall within the time frame of the Prague fork. + // + // For `engine_forkchoiceUpdatedV4`: + // + // 2. Client software **MUST** return `-38005: Unsupported fork` error if the + // `payloadAttributes` is set and the `payloadAttributes.timestamp` does not fall within + // the time frame of the Prague fork. + // + // For `engine_newPayloadV4`: + // + // 2. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of + // the payload does not fall within the time frame of the Prague fork. + return Err(EngineObjectValidationError::UnsupportedFork) + } Ok(()) } @@ -128,7 +151,7 @@ pub fn validate_withdrawals_presence( timestamp: u64, has_withdrawals: bool, ) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.is_shanghai_active_at_timestamp(timestamp); + let is_shanghai_active = chain_spec.is_shanghai_active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { @@ -136,17 +159,17 @@ pub fn validate_withdrawals_presence( return Err(message_validation_kind .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) } - if is_shanghai { + if is_shanghai_active { return Err(message_validation_kind .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) } } - EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 => { - if is_shanghai && !has_withdrawals { + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { + if is_shanghai_active && !has_withdrawals { return Err(message_validation_kind .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) } - if !is_shanghai && has_withdrawals { + if !is_shanghai_active && has_withdrawals { return Err(message_validation_kind .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) } @@ -237,7 +260,7 @@ pub fn validate_parent_beacon_block_root_presence( )) } } - EngineApiMessageVersion::V3 => { + EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { if !has_parent_beacon_block_root { return Err(validation_kind .to_error(VersionSpecificValidationError::NoParentBeaconBlockRootPostCancun)) @@ -321,10 +344,14 @@ pub enum EngineApiMessageVersion { V1, /// Version 2 /// - /// Added for shanghai hardfork. + /// Added in the Shanghai hardfork. V2, /// Version 3 /// - /// Added for cancun hardfork. + /// Added in the Cancun hardfork. V3, + /// Version 4 + /// + /// Added in the Prague hardfork. + V4, } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index f5c53d98e0d83..7382d2184dedb 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -74,7 +74,7 @@ pub fn validate_withdrawals_presence( .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) } } - EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 => { + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { if is_shanghai && !has_withdrawals { return Err(message_validation_kind .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index eb3b1bfc7bd8d..0e4476bb71b09 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -12,8 +12,8 @@ use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFa use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, - PayloadStatus, TransitionConfiguration, CAPABILITIES, + ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, + PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, }; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, @@ -148,6 +148,30 @@ where Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) } + /// See also + pub async fn new_payload_v4( + &self, + payload: ExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> EngineApiResult { + let payload = ExecutionPayload::from(payload); + let payload_or_attrs = + PayloadOrAttributes::<'_, EngineT::PayloadAttributes>::from_execution_payload( + &payload, + Some(parent_beacon_block_root), + ); + EngineT::validate_version_specific_fields( + &self.inner.chain_spec, + EngineApiMessageVersion::V4, + payload_or_attrs, + )?; + + let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; + + Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) + } + /// Sends a message to the beacon consensus engine to update the fork choice _without_ /// withdrawals. /// @@ -280,7 +304,42 @@ where .map_err(|_| EngineApiError::UnknownPayload)? .try_into() .map_err(|_| { - warn!("could not transform built payload into ExecutionPayloadV2"); + warn!("could not transform built payload into ExecutionPayloadV3"); + EngineApiError::UnknownPayload + }) + } + + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. + /// + /// See also + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + pub async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> EngineApiResult { + // First we fetch the payload attributes to check the timestamp + let attributes = self.get_payload_attributes(payload_id).await?; + + // validate timestamp according to engine rules + validate_payload_timestamp( + &self.inner.chain_spec, + EngineApiMessageVersion::V4, + attributes.timestamp(), + )?; + + // Now resolve the payload + self.inner + .payload_store + .resolve(payload_id) + .await + .ok_or(EngineApiError::UnknownPayload)? + .map_err(|_| EngineApiError::UnknownPayload)? + .try_into() + .map_err(|_| { + warn!("could not transform built payload into ExecutionPayloadV4"); EngineApiError::UnknownPayload }) } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 3ab9a74b9717a..f504c169cb7b0 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -8,7 +8,8 @@ use reth_primitives::{ }; use reth_rpc_types::engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, + ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, + ExecutionPayloadV4, PayloadError, }; /// Converts [ExecutionPayloadV1] to [Block] @@ -83,8 +84,18 @@ pub fn try_payload_v3_to_block(payload: ExecutionPayloadV3) -> Result Result { + // this performs the same conversion as the underlying V3 payload. + // + // the new request lists (`deposit_requests`, `withdrawal_requests`) are EL -> CL only, so we do + // not do anything special here to handle them + try_payload_v3_to_block(payload.payload_inner) +} + /// Converts [SealedBlock] to [ExecutionPayload] pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { + // todo(onbjerg): check for requests_root here and return payload v4 if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 ExecutionPayload::V3(block_to_payload_v3(value)) @@ -224,6 +235,7 @@ pub fn try_into_block( ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload)?, ExecutionPayload::V2(payload) => try_payload_v2_to_block(payload)?, ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload)?, + ExecutionPayload::V4(payload) => try_payload_v4_to_block(payload)?, }; base_payload.header.parent_beacon_block_root = parent_beacon_block_root; diff --git a/crates/rpc/rpc-types/src/beacon/payload.rs b/crates/rpc/rpc-types/src/beacon/payload.rs index a4898b723fa12..2bc4cde781b64 100644 --- a/crates/rpc/rpc-types/src/beacon/payload.rs +++ b/crates/rpc/rpc-types/src/beacon/payload.rs @@ -498,6 +498,10 @@ impl<'a> From<&'a ExecutionPayload> for BeaconExecutionPayload<'a> { ExecutionPayload::V3(payload) => { BeaconExecutionPayload::V3(BeaconExecutionPayloadV3::from(payload)) } + ExecutionPayload::V4(_payload) => { + // TODO(onbjerg): Implement `ExecutionPayloadV4` support + todo!() + } } } } From 16f85c43397d1ee113bcdeab0e8850dba6aa1ac5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 6 May 2024 19:36:08 +0200 Subject: [PATCH 488/700] fix(discv5): decouple rlpx & discv5 ipmode (#8080) Co-authored-by: Matthias Seitz --- bin/reth/src/commands/p2p/mod.rs | 22 ++- crates/net/discv5/src/config.rs | 206 +++++++++++++++++++++++---- crates/net/discv5/src/error.rs | 3 + crates/net/discv5/src/lib.rs | 59 ++++---- crates/net/network/src/config.rs | 7 +- crates/net/network/src/discovery.rs | 2 +- crates/node-core/src/args/network.rs | 41 ++++-- crates/node-core/src/node_config.rs | 25 +++- 8 files changed, 278 insertions(+), 87 deletions(-) diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index 18cc6aba8304a..c3ad0231b030a 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -18,7 +18,11 @@ use reth_discv4::NatResolver; use reth_interfaces::p2p::bodies::client::BodiesClient; use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord}; use reth_provider::ProviderFactory; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ + net::{SocketAddrV4, SocketAddrV6}, + path::PathBuf, + sync::Arc, +}; /// `reth p2p` command #[derive(Debug, Parser)] @@ -143,19 +147,23 @@ impl Command { { network_config = network_config.discovery_v5_with_config_builder(|builder| { let DiscoveryArgs { - discv5_addr, - discv5_port, + discv5_addr: discv5_addr_ipv4, + discv5_addr_ipv6, + discv5_port: discv5_port_ipv4, + discv5_port_ipv6, discv5_lookup_interval, discv5_bootstrap_lookup_interval, discv5_bootstrap_lookup_countdown, .. } = self.discovery; + builder .discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( - discv5_addr, - discv5_port, - )))) + discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( + discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), + discv5_addr_ipv6 + .map(|addr| SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)), + )) .build(), ) .lookup_interval(discv5_lookup_interval) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index da7e58cb7f61a..2a246d3d57d0c 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -3,20 +3,26 @@ use std::{ collections::HashSet, fmt::Debug, - net::{IpAddr, Ipv4Addr, SocketAddr}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, }; use derive_more::Display; use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord}; +use tracing::warn; use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId}; -/// The default address for discv5 via UDP. +/// The default address for discv5 via UDP is IPv4. /// /// Default is 0.0.0.0, all interfaces. See [`discv5::ListenConfig`] default. -pub const DEFAULT_DISCOVERY_V5_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); +pub const DEFAULT_DISCOVERY_V5_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED; + +/// The default IPv6 address for discv5 via UDP. +/// +/// Default is ::, all interfaces. +pub const DEFAULT_DISCOVERY_V5_ADDR_IPV6: Ipv6Addr = Ipv6Addr::UNSPECIFIED; /// The default port for discv5 via UDP. /// @@ -40,7 +46,7 @@ pub const DEFAULT_COUNT_BOOTSTRAP_LOOKUPS: u64 = 100; pub const DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL: u64 = 5; /// Builds a [`Config`]. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct ConfigBuilder { /// Config used by [`discv5::Discv5`]. Contains the discovery listen socket. discv5_config: Option, @@ -51,10 +57,11 @@ pub struct ConfigBuilder { /// /// Defaults to L1 mainnet if not set. fork: Option<(&'static [u8], ForkId)>, - /// RLPx TCP port to advertise. Note: so long as `reth_network` handles [`NodeRecord`]s as - /// opposed to [`Enr`](enr::Enr)s, TCP is limited to same IP address as UDP, since - /// [`NodeRecord`] doesn't supply an extra field for and alternative TCP address. - tcp_port: u16, + /// RLPx TCP socket to advertise. + /// + /// NOTE: IP address of RLPx socket overwrites IP address of same IP version in + /// [`discv5::ListenConfig`]. + tcp_socket: SocketAddr, /// List of `(key, rlp-encoded-value)` tuples that should be advertised in local node record /// (in addition to tcp port, udp port and fork). other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, @@ -77,7 +84,7 @@ impl ConfigBuilder { discv5_config, bootstrap_nodes, fork, - tcp_port, + tcp_socket, other_enr_kv_pairs, lookup_interval, bootstrap_lookup_interval, @@ -89,7 +96,7 @@ impl ConfigBuilder { discv5_config: Some(discv5_config), bootstrap_nodes, fork: fork.map(|(key, fork_id)| (key, fork_id.fork_id)), - tcp_port, + tcp_socket, other_enr_kv_pairs, lookup_interval: Some(lookup_interval), bootstrap_lookup_interval: Some(bootstrap_lookup_interval), @@ -152,9 +159,11 @@ impl ConfigBuilder { self } - /// Sets the tcp port to advertise in the local [`Enr`](discv5::enr::Enr). - pub fn tcp_port(mut self, port: u16) -> Self { - self.tcp_port = port; + /// Sets the tcp socket to advertise in the local [`Enr`](discv5::enr::Enr). The IP address of + /// this socket will overwrite the discovery address of the same IP version, if one is + /// configured. + pub fn tcp_socket(mut self, socket: SocketAddr) -> Self { + self.tcp_socket = socket; self } @@ -201,7 +210,7 @@ impl ConfigBuilder { discv5_config, bootstrap_nodes, fork, - tcp_port, + tcp_socket, other_enr_kv_pairs, lookup_interval, bootstrap_lookup_interval, @@ -209,9 +218,12 @@ impl ConfigBuilder { discovered_peer_filter, } = self; - let discv5_config = discv5_config + let mut discv5_config = discv5_config .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); + discv5_config.listen_config = + amend_listen_config_wrt_rlpx(&discv5_config.listen_config, tcp_socket.ip()); + let fork = fork.map(|(key, fork_id)| (key, fork_id.into())); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); @@ -227,7 +239,7 @@ impl ConfigBuilder { discv5_config, bootstrap_nodes, fork, - tcp_port, + tcp_socket, other_enr_kv_pairs, lookup_interval, bootstrap_lookup_interval, @@ -248,8 +260,11 @@ pub struct Config { /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", [ForkId])`. pub(super) fork: Option<(&'static [u8], EnrForkIdEntry)>, - /// RLPx TCP port to advertise. - pub(super) tcp_port: u16, + /// RLPx TCP socket to advertise. + /// + /// NOTE: IP address of RLPx socket overwrites IP address of same IP version in + /// [`discv5::ListenConfig`]. + pub(super) tcp_socket: SocketAddr, /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to /// peers by including in local node record. pub(super) other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, @@ -266,9 +281,20 @@ pub struct Config { } impl Config { - /// Returns a new [`ConfigBuilder`], with the RLPx TCP port set to the given port. - pub fn builder(rlpx_tcp_port: u16) -> ConfigBuilder { - ConfigBuilder::default().tcp_port(rlpx_tcp_port) + /// Returns a new [`ConfigBuilder`], with the RLPx TCP port and IP version configured w.r.t. + /// the given socket. + pub fn builder(rlpx_tcp_socket: SocketAddr) -> ConfigBuilder { + ConfigBuilder { + discv5_config: None, + bootstrap_nodes: HashSet::new(), + fork: None, + tcp_socket: rlpx_tcp_socket, + other_enr_kv_pairs: Vec::new(), + lookup_interval: None, + bootstrap_lookup_interval: None, + bootstrap_lookup_countdown: None, + discovered_peer_filter: None, + } } } @@ -286,12 +312,104 @@ impl Config { /// Returns the RLPx (TCP) socket contained in the [`discv5::Config`]. This socket will be /// advertised to peers in the local [`Enr`](discv5::enr::Enr). - pub fn rlpx_socket(&self) -> SocketAddr { - let port = self.tcp_port; - match self.discv5_config.listen_config { - ListenConfig::Ipv4 { ip, .. } => (ip, port).into(), - ListenConfig::Ipv6 { ip, .. } => (ip, port).into(), - ListenConfig::DualStack { ipv4, .. } => (ipv4, port).into(), + pub fn rlpx_socket(&self) -> &SocketAddr { + &self.tcp_socket + } +} + +/// Returns the IPv4 discovery socket if one is configured. +pub fn ipv4(listen_config: &ListenConfig) -> Option { + match listen_config { + ListenConfig::Ipv4 { ip, port } | + ListenConfig::DualStack { ipv4: ip, ipv4_port: port, .. } => { + Some(SocketAddrV4::new(*ip, *port)) + } + ListenConfig::Ipv6 { .. } => None, + } +} + +/// Returns the IPv6 discovery socket if one is configured. +pub fn ipv6(listen_config: &ListenConfig) -> Option { + match listen_config { + ListenConfig::Ipv4 { .. } => None, + ListenConfig::Ipv6 { ip, port } | + ListenConfig::DualStack { ipv6: ip, ipv6_port: port, .. } => { + Some(SocketAddrV6::new(*ip, *port, 0, 0)) + } + } +} + +/// Returns the amended [`discv5::ListenConfig`] based on the RLPx IP address. The ENR is limited +/// to one IP address per IP version (atm, may become spec'd how to advertise different addresses). +/// The RLPx address overwrites the discv5 address w.r.t. IP version. +pub fn amend_listen_config_wrt_rlpx( + listen_config: &ListenConfig, + rlpx_addr: IpAddr, +) -> ListenConfig { + let discv5_socket_ipv4 = ipv4(listen_config); + let discv5_socket_ipv6 = ipv6(listen_config); + + let discv5_port_ipv4 = + discv5_socket_ipv4.map(|socket| socket.port()).unwrap_or(DEFAULT_DISCOVERY_V5_PORT); + let discv5_addr_ipv4 = discv5_socket_ipv4.map(|socket| *socket.ip()); + let discv5_port_ipv6 = + discv5_socket_ipv6.map(|socket| socket.port()).unwrap_or(DEFAULT_DISCOVERY_V5_PORT); + let discv5_addr_ipv6 = discv5_socket_ipv6.map(|socket| *socket.ip()); + + let (discv5_socket_ipv4, discv5_socket_ipv6) = discv5_sockets_wrt_rlpx_addr( + rlpx_addr, + discv5_addr_ipv4, + discv5_port_ipv4, + discv5_addr_ipv6, + discv5_port_ipv6, + ); + + ListenConfig::from_two_sockets(discv5_socket_ipv4, discv5_socket_ipv6) +} + +/// Returns the sockets that can be used for discv5 with respect to the RLPx address. ENR specs only +/// acknowledge one address per IP version. +pub fn discv5_sockets_wrt_rlpx_addr( + rlpx_addr: IpAddr, + discv5_addr_ipv4: Option, + discv5_port_ipv4: u16, + discv5_addr_ipv6: Option, + discv5_port_ipv6: u16, +) -> (Option, Option) { + match rlpx_addr { + IpAddr::V4(rlpx_addr) => { + let discv5_socket_ipv6 = + discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); + + if let Some(discv5_addr) = discv5_addr_ipv4 { + warn!(target: "discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" + ); + } + + // overwrite discv5 ipv4 addr with RLPx address. this is since there is no + // spec'd way to advertise a different address for rlpx and discovery in the + // ENR. + (Some(SocketAddrV4::new(rlpx_addr, discv5_port_ipv4)), discv5_socket_ipv6) + } + IpAddr::V6(rlpx_addr) => { + let discv5_socket_ipv4 = + discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); + + if let Some(discv5_addr) = discv5_addr_ipv6 { + warn!(target: "discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" + ); + } + + // overwrite discv5 ipv6 addr with RLPx address. this is since there is no + // spec'd way to advertise a different address for rlpx and discovery in the + // ENR. + (discv5_socket_ipv4, Some(SocketAddrV6::new(rlpx_addr, discv5_port_ipv6, 0, 0))) } } } @@ -351,7 +469,7 @@ mod test { fn parse_boot_nodes() { const OP_SEPOLIA_CL_BOOTNODES: &str ="enr:-J64QBwRIWAco7lv6jImSOjPU_W266lHXzpAS5YOh7WmgTyBZkgLgOwo_mxKJq3wz2XRbsoBItbv1dCyjIoNq67mFguGAYrTxM42gmlkgnY0gmlwhBLSsHKHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDmoWSi8hcsRpQf2eJsNUx-sqv6fH4btmo2HsAzZFAKnKDdGNwgiQGg3VkcIIkBg,enr:-J64QFa3qMsONLGphfjEkeYyF6Jkil_jCuJmm7_a42ckZeUQGLVzrzstZNb1dgBp1GGx9bzImq5VxJLP-BaptZThGiWGAYrTytOvgmlkgnY0gmlwhGsV-zeHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDahfSECTIS_cXyZ8IyNf4leANlZnrsMEWTkEYxf4GMCmDdGNwgiQGg3VkcIIkBg"; - let config = Config::builder(30303) + let config = Config::builder((Ipv4Addr::UNSPECIFIED, 30303).into()) .add_cl_serialized_signed_boot_nodes(OP_SEPOLIA_CL_BOOTNODES) .build(); @@ -371,7 +489,7 @@ mod test { #[test] fn parse_enodes() { - let config = Config::builder(30303) + let config = Config::builder((Ipv4Addr::UNSPECIFIED, 30303).into()) .add_serialized_unsigned_boot_nodes(BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET) .build(); @@ -382,4 +500,34 @@ mod test { assert!(bootstrap_nodes.contains(&node.to_string())); } } + + #[test] + fn overwrite_ipv4_addr() { + let rlpx_addr: Ipv4Addr = "192.168.0.1".parse().unwrap(); + + let listen_config = ListenConfig::default(); + + let amended_config = amend_listen_config_wrt_rlpx(&listen_config, rlpx_addr.into()); + + let config_socket_ipv4 = ipv4(&amended_config).unwrap(); + + assert_eq!(*config_socket_ipv4.ip(), rlpx_addr); + assert_eq!(config_socket_ipv4.port(), DEFAULT_DISCOVERY_V5_PORT); + assert_eq!(ipv6(&amended_config), ipv6(&listen_config)); + } + + #[test] + fn overwrite_ipv6_addr() { + let rlpx_addr: Ipv6Addr = "fe80::1".parse().unwrap(); + + let listen_config = ListenConfig::default(); + + let amended_config = amend_listen_config_wrt_rlpx(&listen_config, rlpx_addr.into()); + + let config_socket_ipv6 = ipv6(&amended_config).unwrap(); + + assert_eq!(*config_socket_ipv6.ip(), rlpx_addr); + assert_eq!(config_socket_ipv6.port(), DEFAULT_DISCOVERY_V5_PORT); + assert_eq!(ipv4(&amended_config), ipv4(&listen_config)); + } } diff --git a/crates/net/discv5/src/error.rs b/crates/net/discv5/src/error.rs index 1656208986950..27763146481c2 100644 --- a/crates/net/discv5/src/error.rs +++ b/crates/net/discv5/src/error.rs @@ -35,4 +35,7 @@ pub enum Error { /// An error from underlying [`discv5::Discv5`] node. #[error("sigp/discv5 error, {0}")] Discv5Error(discv5::Error), + /// The [`ListenConfig`](discv5::ListenConfig) has been misconfigured. + #[error("misconfigured listen config, RLPx TCP address must also be supported by discv5")] + ListenConfigMisconfigured, } diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 8e156dde1bffe..826556fb07f40 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -39,8 +39,8 @@ pub use discv5::{self, IpMode}; pub use config::{ BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR, - DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, - DEFAULT_SECONDS_LOOKUP_INTERVAL, + DEFAULT_DISCOVERY_V5_ADDR_IPV6, DEFAULT_DISCOVERY_V5_PORT, + DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; pub use enr::enr_to_discv4_id; pub use error::Error; @@ -66,8 +66,8 @@ pub const DEFAULT_MIN_TARGET_KBUCKET_INDEX: usize = 0; pub struct Discv5 { /// sigp/discv5 node. discv5: Arc, - /// [`IpMode`] of the the node. - ip_mode: IpMode, + /// [`IpMode`] of the the RLPx network. + rlpx_ip_mode: IpMode, /// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'. fork_key: Option<&'static [u8]>, /// Filter applied to a discovered peers before passing it up to app. @@ -162,7 +162,7 @@ impl Discv5 { // // 1. make local enr from listen config // - let (enr, bc_enr, fork_key, ip_mode) = build_local_enr(sk, &discv5_config); + let (enr, bc_enr, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config); trace!(target: "net::discv5", ?enr, @@ -214,7 +214,7 @@ impl Discv5 { ); Ok(( - Self { discv5, ip_mode, fork_key, discovered_peer_filter, metrics }, + Self { discv5, rlpx_ip_mode, fork_key, discovered_peer_filter, metrics }, discv5_updates, bc_enr, )) @@ -328,7 +328,7 @@ impl Discv5 { } /// Tries to convert an [`Enr`](discv5::Enr) into the backwards compatible type [`NodeRecord`], - /// w.r.t. local [`IpMode`]. Uses source socket as udp socket. + /// w.r.t. local RLPx [`IpMode`]. Uses source socket as udp socket. pub fn try_into_reachable( &self, enr: &discv5::Enr, @@ -336,13 +336,15 @@ impl Discv5 { ) -> Result { let id = enr_to_discv4_id(enr).ok_or(Error::IncompatibleKeyType)?; - // since we, on bootstrap, set tcp4 in local ENR for `IpMode::Dual`, we prefer tcp4 here - // too - let Some(tcp_port) = (match self.ip_mode() { - IpMode::Ip4 | IpMode::DualStack => enr.tcp4(), + if enr.tcp4().is_none() && enr.tcp6().is_none() { + return Err(Error::UnreachableRlpx) + } + let Some(tcp_port) = (match self.rlpx_ip_mode { + IpMode::Ip4 => enr.tcp4(), IpMode::Ip6 => enr.tcp6(), + _ => unimplemented!("dual-stack support not implemented for rlpx"), }) else { - return Err(Error::IpVersionMismatchRlpx(self.ip_mode())) + return Err(Error::IpVersionMismatchRlpx(self.rlpx_ip_mode)) }; Ok(NodeRecord { address: socket.ip(), tcp_port, udp_port: socket.port(), id }) @@ -385,9 +387,9 @@ impl Discv5 { // Complementary //////////////////////////////////////////////////////////////////////////////////////////////// - /// Returns the [`IpMode`] of the local node. + /// Returns the RLPx [`IpMode`] of the local node. pub fn ip_mode(&self) -> IpMode { - self.ip_mode + self.rlpx_ip_mode } /// Returns the key to use to identify the [`ForkId`] kv-pair on the [`Enr`](discv5::Enr). @@ -418,43 +420,45 @@ pub fn build_local_enr( ) -> (Enr, NodeRecord, Option<&'static [u8]>, IpMode) { let mut builder = discv5::enr::Enr::builder(); - let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; + let Config { discv5_config, fork, tcp_socket, other_enr_kv_pairs, .. } = config; - let (ip_mode, socket) = match discv5_config.listen_config { + let socket = match discv5_config.listen_config { ListenConfig::Ipv4 { ip, port } => { if ip != Ipv4Addr::UNSPECIFIED { builder.ip4(ip); } builder.udp4(port); - builder.tcp4(*tcp_port); + builder.tcp4(tcp_socket.port()); - (IpMode::Ip4, (ip, port).into()) + (ip, port).into() } ListenConfig::Ipv6 { ip, port } => { if ip != Ipv6Addr::UNSPECIFIED { builder.ip6(ip); } builder.udp6(port); - builder.tcp6(*tcp_port); + builder.tcp6(tcp_socket.port()); - (IpMode::Ip6, (ip, port).into()) + (ip, port).into() } ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { if ipv4 != Ipv4Addr::UNSPECIFIED { builder.ip4(ipv4); } builder.udp4(ipv4_port); - builder.tcp4(*tcp_port); + builder.tcp4(tcp_socket.port()); if ipv6 != Ipv6Addr::UNSPECIFIED { builder.ip6(ipv6); } builder.udp6(ipv6_port); - (IpMode::DualStack, (ipv6, ipv6_port).into()) + (ipv6, ipv6_port).into() } }; + let rlpx_ip_mode = if tcp_socket.is_ipv4() { IpMode::Ip4 } else { IpMode::Ip6 }; + // identifies which network node is on let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| { builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into()); @@ -473,7 +477,7 @@ pub fn build_local_enr( // backwards compatible enr let bc_enr = NodeRecord::from_secret_key(socket, sk); - (enr, bc_enr, network_stack_id, ip_mode) + (enr, bc_enr, network_stack_id, rlpx_ip_mode) } /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. @@ -660,7 +664,7 @@ mod test { ) .unwrap(), ), - ip_mode: IpMode::Ip4, + rlpx_ip_mode: IpMode::Ip4, fork_key: None, discovered_peer_filter: MustNotIncludeKeys::default(), metrics: Discv5Metrics::default(), @@ -673,9 +677,10 @@ mod test { let secret_key = SecretKey::new(&mut thread_rng()); let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port_discv5}").parse().unwrap(); + let rlpx_addr: SocketAddr = "127.0.0.1:30303".parse().unwrap(); let discv5_listen_config = ListenConfig::from(discv5_addr); - let discv5_config = Config::builder(30303) + let discv5_config = Config::builder(rlpx_addr) .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) .build(); @@ -867,7 +872,9 @@ mod test { const TCP_PORT: u16 = 30303; let fork_id = MAINNET.latest_fork_id(); - let config = Config::builder(TCP_PORT).fork(NetworkStackId::ETH, fork_id).build(); + let config = Config::builder((Ipv4Addr::UNSPECIFIED, TCP_PORT).into()) + .fork(NetworkStackId::ETH, fork_id) + .build(); let sk = SecretKey::new(&mut thread_rng()); let (enr, _, _, _) = build_local_enr(&sk, &config); diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 40d88f991e53c..9e0f055ead104 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -121,16 +121,15 @@ impl NetworkConfig { self, f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::Config, ) -> Self { - let rlpx_port = self.listener_addr.port(); let network_stack_id = NetworkStackId::id(&self.chain_spec); let fork_id = self.chain_spec.latest_fork_id(); let boot_nodes = self.boot_nodes.clone(); - let mut builder = - reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); + let mut builder = reth_discv5::Config::builder(self.listener_addr) + .add_unsigned_boot_nodes(boot_nodes.into_iter()); if let Some(id) = network_stack_id { - builder = builder.fork(id, fork_id); + builder = builder.fork(id, fork_id) } self.set_discovery_v5(f(builder)) diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 67d6594547add..bb456d4eaea2e 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -369,7 +369,7 @@ mod tests { let discv4_config = Discv4ConfigBuilder::default().external_ip_resolver(None).build(); let discv5_listen_config = discv5::ListenConfig::from(discv5_addr); - let discv5_config = reth_discv5::Config::builder(0) + let discv5_config = reth_discv5::Config::builder(discv5_addr) .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) .build(); diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 0d5206e7f77c6..7b1b9d0d586f7 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -5,7 +5,7 @@ use clap::Args; use reth_config::Config; use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; use reth_discv5::{ - DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR, DEFAULT_DISCOVERY_V5_PORT, + DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; use reth_net_nat::NatResolver; @@ -19,7 +19,11 @@ use reth_network::{ }; use reth_primitives::{mainnet_nodes, ChainSpec, NodeRecord}; use secp256k1::SecretKey; -use std::{net::IpAddr, path::PathBuf, sync::Arc}; +use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + path::PathBuf, + sync::Arc, +}; /// Parameters for configuring the network more granularity via CLI #[derive(Debug, Clone, Args, PartialEq, Eq)] @@ -227,31 +231,40 @@ pub struct DiscoveryArgs { #[arg(id = "discovery.port", long = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] pub port: u16, - /// The UDP address to use for devp2p peer discovery version 5. - #[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", - default_value_t = DEFAULT_DISCOVERY_V5_ADDR)] - pub discv5_addr: IpAddr, + /// The UDP IPv4 address to use for devp2p peer discovery version 5. + #[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", default_value = None)] + pub discv5_addr: Option, + + /// The UDP IPv6 address to use for devp2p peer discovery version 5. + #[arg(id = "discovery.v5.addr.ipv6", long = "discovery.v5.addr.ipv6", value_name = "DISCOVERY_V5_ADDR_IPV6", default_value = None)] + pub discv5_addr_ipv6: Option, - /// The UDP port to use for devp2p peer discovery version 5. + /// The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is + /// IPv4, or `--discv5.addr` is set. #[arg(id = "discovery.v5.port", long = "discovery.v5.port", value_name = "DISCOVERY_V5_PORT", default_value_t = DEFAULT_DISCOVERY_V5_PORT)] pub discv5_port: u16, + /// The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is + /// IPv6, or `--discv5.addr.ipv6` is set. + #[arg(id = "discovery.v5.port.ipv6", long = "discovery.v5.port.ipv6", value_name = "DISCOVERY_V5_PORT_IPV6", + default_value = None, default_value_t = DEFAULT_DISCOVERY_V5_PORT)] + pub discv5_port_ipv6: u16, + /// The interval in seconds at which to carry out periodic lookup queries, for the whole /// run of the program. - #[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", - default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)] + #[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)] pub discv5_lookup_interval: u64, /// The interval in seconds at which to carry out boost lookup queries, for a fixed number of /// times, at bootstrap. - #[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval", + #[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval", default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)] pub discv5_bootstrap_lookup_interval: u64, /// The number of times to carry out boost lookup queries at bootstrap. - #[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown", - default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)] + #[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown", + default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)] pub discv5_bootstrap_lookup_countdown: u64, } @@ -289,8 +302,10 @@ impl Default for DiscoveryArgs { enable_discv5_discovery: cfg!(feature = "optimism"), addr: DEFAULT_DISCOVERY_ADDR, port: DEFAULT_DISCOVERY_PORT, - discv5_addr: DEFAULT_DISCOVERY_V5_ADDR, + discv5_addr: None, + discv5_addr_ipv6: None, discv5_port: DEFAULT_DISCOVERY_V5_PORT, + discv5_port_ipv6: DEFAULT_DISCOVERY_V5_PORT, discv5_lookup_interval: DEFAULT_SECONDS_LOOKUP_INTERVAL, discv5_bootstrap_lookup_interval: DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, discv5_bootstrap_lookup_countdown: DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index a4301b8046a79..dd7bd1ccb3f40 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -26,7 +26,11 @@ use reth_provider::{ }; use reth_tasks::TaskExecutor; use secp256k1::SecretKey; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ + net::{SocketAddr, SocketAddrV4, SocketAddrV6}, + path::PathBuf, + sync::Arc, +}; use tracing::*; /// The default prometheus recorder handle. We use a global static to ensure that it is only @@ -482,19 +486,26 @@ impl NodeConfig { // due to unsatisfied trait bounds config.discovery_v5_with_config_builder(|builder| { let DiscoveryArgs { - discv5_addr, - discv5_port, + discv5_addr: discv5_addr_ipv4, + discv5_addr_ipv6, + discv5_port: discv5_port_ipv4, + discv5_port_ipv6, discv5_lookup_interval, discv5_bootstrap_lookup_interval, discv5_bootstrap_lookup_countdown, .. } = self.network.discovery; + + let discv5_port_ipv4 = discv5_port_ipv4 + self.instance - 1; + let discv5_port_ipv6 = discv5_port_ipv6 + self.instance - 1; + builder .discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( - discv5_addr, - discv5_port + self.instance - 1, - )))) + discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( + discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), + discv5_addr_ipv6 + .map(|addr| SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)), + )) .build(), ) .lookup_interval(discv5_lookup_interval) From 47dade7c12cc73dc839461be4b134329ea769404 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 6 May 2024 14:14:49 -0400 Subject: [PATCH 489/700] fix: set mainnet timestamps in op chainspecs for consensus checks (#8129) --- crates/primitives/src/chain/spec.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 823548d272165..de56ff1fe8b67 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -286,7 +286,9 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { ), (Hardfork::Bedrock, ForkCondition::Block(105235063)), (Hardfork::Regolith, ForkCondition::Timestamp(0)), + (Hardfork::Shanghai, ForkCondition::Timestamp(1704992401)), (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)), + (Hardfork::Cancun, ForkCondition::Timestamp(1710374401)), (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)), ]), base_fee_params: BaseFeeParamsKind::Variable( From 077f7310c707b07d52086a7887f3e92438106e42 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 6 May 2024 21:29:00 +0200 Subject: [PATCH 490/700] fix: ensure valid parent hash in prepare_invalid_response (#8123) Co-authored-by: Roman Krasiuk --- crates/consensus/beacon/src/engine/mod.rs | 31 +++++++++++++++++++---- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index d3c5bfe09d6d3..4e3550cd3f64c 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -702,7 +702,7 @@ where /// - null if client software cannot determine the ancestor of the invalid payload satisfying /// the above conditions. fn latest_valid_hash_for_invalid_payload( - &self, + &mut self, parent_hash: B256, insert_err: Option<&InsertBlockErrorKind>, ) -> Option { @@ -712,12 +712,31 @@ where } // Check if parent exists in side chain or in canonical chain. + // TODO: handle find_block_by_hash errors. if matches!(self.blockchain.find_block_by_hash(parent_hash, BlockSource::Any), Ok(Some(_))) { Some(parent_hash) } else { - // TODO: attempt to iterate over ancestors in the invalid cache + // iterate over ancestors in the invalid cache // until we encounter the first valid ancestor + let mut current_hash = parent_hash; + let mut current_header = self.invalid_headers.get(¤t_hash); + while let Some(header) = current_header { + current_hash = header.parent_hash; + current_header = self.invalid_headers.get(¤t_hash); + + // If current_header is None, then the current_hash does not have an invalid + // ancestor in the cache, check its presence in blockchain tree + if current_header.is_none() && + matches!( + // TODO: handle find_block_by_hash errors. + self.blockchain.find_block_by_hash(current_hash, BlockSource::Any), + Ok(Some(_)) + ) + { + return Some(current_hash) + } + } None } } @@ -725,7 +744,7 @@ where /// Prepares the invalid payload response for the given hash, checking the /// database for the parent hash and populating the payload status with the latest valid hash /// according to the engine api spec. - fn prepare_invalid_response(&self, mut parent_hash: B256) -> PayloadStatus { + fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> PayloadStatus { // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal // PoW block, which we need to identify by looking at the parent's block difficulty if let Ok(Some(parent)) = self.blockchain.header_by_hash_or_number(parent_hash.into()) { @@ -734,10 +753,12 @@ where } } + let valid_parent_hash = + self.latest_valid_hash_for_invalid_payload(parent_hash, None).unwrap_or_default(); PayloadStatus::from_status(PayloadStatusEnum::Invalid { validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), }) - .with_latest_valid_hash(parent_hash) + .with_latest_valid_hash(valid_parent_hash) } /// Checks if the given `check` hash points to an invalid header, inserting the given `head` @@ -1089,7 +1110,7 @@ where /// /// This validation **MUST** be instantly run in all cases even during active sync process. fn ensure_well_formed_payload( - &self, + &mut self, payload: ExecutionPayload, cancun_fields: Option, ) -> Result { From c79c18874593f2554a09be82edac72a0c4230b19 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 6 May 2024 21:51:02 +0200 Subject: [PATCH 491/700] fix(discv5): no address cli arg (#8130) --- crates/net/network/src/config.rs | 7 ++++++- crates/node-core/src/node_config.rs | 27 +++++++++++++++++++++++---- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 9e0f055ead104..c2a7b32389dcd 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -141,11 +141,16 @@ impl NetworkConfig { self } - /// Sets the address for the incoming connection listener. + /// Sets the address for the incoming RLPx connection listener. pub fn set_listener_addr(mut self, listener_addr: SocketAddr) -> Self { self.listener_addr = listener_addr; self } + + /// Returns the address for the incoming RLPx connection listener. + pub fn listener_addr(&self) -> &SocketAddr { + &self.listener_addr + } } impl NetworkConfig diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index dd7bd1ccb3f40..5cb28c87307a0 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -27,7 +27,7 @@ use reth_provider::{ use reth_tasks::TaskExecutor; use secp256k1::SecretKey; use std::{ - net::{SocketAddr, SocketAddrV4, SocketAddrV6}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, path::PathBuf, sync::Arc, }; @@ -482,13 +482,14 @@ impl NodeConfig { return config } + let rlpx_addr = config.listener_addr().ip(); // work around since discv5 config builder can't be integrated into network config builder // due to unsatisfied trait bounds config.discovery_v5_with_config_builder(|builder| { let DiscoveryArgs { - discv5_addr: discv5_addr_ipv4, + discv5_addr, discv5_addr_ipv6, - discv5_port: discv5_port_ipv4, + discv5_port, discv5_port_ipv6, discv5_lookup_interval, discv5_bootstrap_lookup_interval, @@ -496,7 +497,9 @@ impl NodeConfig { .. } = self.network.discovery; - let discv5_port_ipv4 = discv5_port_ipv4 + self.instance - 1; + let discv5_addr_ipv4 = discv5_addr.or_else(|| ipv4(rlpx_addr)); + let discv5_addr_ipv6 = discv5_addr_ipv6.or_else(|| ipv6(rlpx_addr)); + let discv5_port_ipv4 = discv5_port + self.instance - 1; let discv5_port_ipv6 = discv5_port_ipv6 + self.instance - 1; builder @@ -548,3 +551,19 @@ impl Default for NodeConfig { } } } + +/// Returns the address if this is an [`Ipv4Addr`]. +pub fn ipv4(ip: IpAddr) -> Option { + match ip { + IpAddr::V4(ip) => Some(ip), + IpAddr::V6(_) => None, + } +} + +/// Returns the address if this is an [`Ipv6Addr`]. +pub fn ipv6(ip: IpAddr) -> Option { + match ip { + IpAddr::V4(_) => None, + IpAddr::V6(ip) => Some(ip), + } +} From 6f72c0ab5a28b18355089764d672ea08688e7e11 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 6 May 2024 21:51:11 +0200 Subject: [PATCH 492/700] cli(op): simplify init-state cmd (#8126) --- bin/reth/src/commands/init_state.rs | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs index e0558be321b2e..ef640e01cf120 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/bin/reth/src/commands/init_state.rs @@ -10,7 +10,7 @@ use crate::{ use clap::Parser; use reth_config::config::EtlConfig; use reth_db::{database::Database, init_db}; -use reth_node_core::init::{init_from_state_dump, init_genesis}; +use reth_node_core::init::init_from_state_dump; use reth_primitives::{ChainSpec, B256}; use reth_provider::ProviderFactory; @@ -44,7 +44,7 @@ pub struct InitStateCommand { /// JSONL file with state dump. /// - /// Must contain accounts in following format, additional account fields are ignored. Can + /// Must contain accounts in following format, additional account fields are ignored. Must /// also contain { "root": \ } as first line. /// { /// "balance": "\", @@ -59,8 +59,8 @@ pub struct InitStateCommand { /// /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until /// and including the non-genesis block to init chain at. See 'import' command. - #[arg(long, value_name = "STATE_DUMP_FILE", verbatim_doc_comment, default_value = None)] - state: Option, + #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] + state: PathBuf, #[command(flatten)] db: DatabaseArgs, @@ -69,7 +69,7 @@ pub struct InitStateCommand { impl InitStateCommand { /// Execute the `init` command pub async fn execute(self) -> eyre::Result<()> { - info!(target: "reth::cli", "reth init starting"); + info!(target: "reth::cli", "Reth init-state starting"); // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); @@ -84,12 +84,9 @@ impl InitStateCommand { EtlConfig::default_file_size(), ); - info!(target: "reth::cli", "Writing genesis block"); + info!(target: "reth::cli", "Initiating state dump"); - let hash = match self.state { - Some(path) => init_at_state(path, provider_factory, etl_config)?, - None => init_genesis(provider_factory)?, - }; + let hash = init_at_state(self.state, provider_factory, etl_config)?; info!(target: "reth::cli", hash = ?hash, "Genesis block written"); Ok(()) From 5f82993c23164ce8ccdc7bf3ae5085205383a5c8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 May 2024 21:52:16 +0200 Subject: [PATCH 493/700] chore: replace beacon types (#8125) --- Cargo.lock | 168 +++--- Cargo.toml | 41 +- crates/rpc/rpc-types/Cargo.toml | 1 + crates/rpc/rpc-types/src/beacon/constants.rs | 17 - .../src/beacon/events/attestation.rs | 30 - .../beacon/events/light_client_finality.rs | 54 -- .../beacon/events/light_client_optimistic.rs | 24 - crates/rpc/rpc-types/src/beacon/events/mod.rs | 403 ------------- crates/rpc/rpc-types/src/beacon/header.rs | 125 ---- crates/rpc/rpc-types/src/beacon/mod.rs | 19 - crates/rpc/rpc-types/src/beacon/payload.rs | 569 ------------------ .../rpc/rpc-types/src/beacon/withdrawals.rs | 70 --- crates/rpc/rpc-types/src/lib.rs | 1 - crates/rpc/rpc-types/src/relay/mod.rs | 16 +- examples/beacon-api-sse/Cargo.toml | 1 + examples/beacon-api-sse/src/main.rs | 3 +- 16 files changed, 119 insertions(+), 1423 deletions(-) delete mode 100644 crates/rpc/rpc-types/src/beacon/constants.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/events/attestation.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/events/mod.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/header.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/mod.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/payload.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/withdrawals.rs diff --git a/Cargo.lock b/Cargo.lock index 36fa163dde544..866a3816736e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -71,7 +71,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.14", "once_cell", "version_check", "zerocopy", @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "c-kzg", "serde", ] @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -171,17 +171,17 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.8", + "winnow 0.6.7", ] [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "arbitrary", "c-kzg", "derive_more", @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "serde", "serde_json", ] @@ -222,7 +222,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "k256", "serde_json", @@ -300,7 +300,7 @@ dependencies = [ "derive_arbitrary", "derive_more", "ethereum_ssz", - "getrandom 0.2.15", + "getrandom 0.2.14", "hex-literal", "itoa", "k256", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -366,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -426,24 +426,36 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "serde", ] +[[package]] +name = "alloy-rpc-types-beacon" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +dependencies = [ + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-primitives", + "alloy-rpc-types-engine", + "serde", + "serde_with", +] + [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -456,11 +468,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "serde", "serde_json", ] @@ -468,7 +480,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "serde", @@ -478,7 +490,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "serde", @@ -488,7 +500,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "async-trait", @@ -501,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -557,7 +569,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ - "winnow 0.6.8", + "winnow 0.6.7", ] [[package]] @@ -576,7 +588,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -594,7 +606,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -1087,6 +1099,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" name = "beacon-api-sse" version = "0.0.0" dependencies = [ + "alloy-rpc-types-beacon", "clap", "futures-util", "mev-share-sse", @@ -1546,9 +1559,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.97" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" +checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" dependencies = [ "jobserver", "libc", @@ -2964,7 +2977,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -3318,9 +3331,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "js-sys", @@ -4594,7 +4607,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.15", + "getrandom 0.2.14", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -6132,7 +6145,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.14", ] [[package]] @@ -6226,7 +6239,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.14", "libredox", "thiserror", ] @@ -6586,8 +6599,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "arbitrary", "bytes", @@ -6792,9 +6805,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7536,8 +7549,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7804,8 +7817,9 @@ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rpc-types-anvil", + "alloy-rpc-types-beacon", "alloy-rpc-types-engine", "alloy-rpc-types-trace", "arbitrary", @@ -7828,7 +7842,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7934,7 +7948,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "reth-primitives", "secp256k1", ] @@ -8068,10 +8082,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=7d810bc#7d810bc44c08fe8ec90ebef556883c2531ebf111" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=7168ac5#7168ac55682fb420da7a82ed94bfb0c30a034113" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", @@ -8173,7 +8187,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.14", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -8575,11 +8589,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ - "bitflags 2.5.0", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -8588,9 +8602,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -9598,7 +9612,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.8", + "winnow 0.6.7", ] [[package]] @@ -10060,7 +10074,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.14", ] [[package]] @@ -10463,9 +10477,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" +checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index f8d3dcac2f631..e6edbe5b2c147 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -278,14 +278,9 @@ reth-node-events = { path = "crates/node/events" } reth-testing-utils = { path = "testing/testing-utils" } # revm -revm = { version = "8.0.0", features = [ - "std", - "secp256k1", -], default-features = false } -revm-primitives = { version = "3.1.0", features = [ - "std", -], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "7d810bc" } +revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } +revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "7168ac5" } # eth alloy-chains = "0.1.15" @@ -294,20 +289,21 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "c3ea7bc" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "77c1240" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } # misc auto_impl = "1" @@ -382,10 +378,7 @@ secp256k1 = { version = "0.28", default-features = false, features = [ "recovery", ] } # TODO: Remove `k256` feature: https://github.com/sigp/enr/pull/74 -enr = { version = "0.12.0", default-features = false, features = [ - "k256", - "rust-secp256k1", -] } +enr = { version = "0.12.0", default-features = false, features = ["k256", "rust-secp256k1"] } # for eip-4844 c-kzg = "1.0.0" diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 1426b50f868e3..83ad91f5c4586 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -18,6 +18,7 @@ alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde"] } alloy-rpc-types = { workspace = true, features = ["jsonrpsee-types"] } alloy-rpc-types-anvil.workspace = true alloy-rpc-types-trace.workspace = true +alloy-rpc-types-beacon.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } ethereum_ssz_derive = { version = "0.5", optional = true } ethereum_ssz = { version = "0.5", optional = true } diff --git a/crates/rpc/rpc-types/src/beacon/constants.rs b/crates/rpc/rpc-types/src/beacon/constants.rs deleted file mode 100644 index 945a4ba20450d..0000000000000 --- a/crates/rpc/rpc-types/src/beacon/constants.rs +++ /dev/null @@ -1,17 +0,0 @@ -/// The Domain Separation Tag for hash_to_point in Ethereum beacon chain BLS12-381 signatures. -/// -/// This is also the name of the ciphersuite that defines beacon chain BLS signatures. -/// -/// See: -/// -/// -pub const BLS_DST_SIG: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; - -/// The number of bytes in a BLS12-381 public key. -pub const BLS_PUBLIC_KEY_BYTES_LEN: usize = 48; - -/// The number of bytes in a BLS12-381 secret key. -pub const BLS_SECRET_KEY_BYTES_LEN: usize = 32; - -/// The number of bytes in a BLS12-381 signature. -pub const BLS_SIGNATURE_BYTES_LEN: usize = 96; diff --git a/crates/rpc/rpc-types/src/beacon/events/attestation.rs b/crates/rpc/rpc-types/src/beacon/events/attestation.rs deleted file mode 100644 index c789a46713ad5..0000000000000 --- a/crates/rpc/rpc-types/src/beacon/events/attestation.rs +++ /dev/null @@ -1,30 +0,0 @@ -use alloy_primitives::B256; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestationData { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub index: u64, - pub beacon_block_root: B256, - pub source: Source, - pub target: Target, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Source { - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub root: B256, -} -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Target { - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub root: B256, -} diff --git a/crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs b/crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs deleted file mode 100644 index 10928c7a780c1..0000000000000 --- a/crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs +++ /dev/null @@ -1,54 +0,0 @@ -use alloy_primitives::{Bytes, B256}; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientFinalityData { - pub attested_header: AttestedHeader, - pub finalized_header: FinalizedHeader, - pub finality_branch: Vec, - pub sync_aggregate: SyncAggregate, - #[serde_as(as = "DisplayFromStr")] - pub signature_slot: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestedHeader { - pub beacon: Beacon, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Beacon { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - pub parent_root: B256, - pub state_root: B256, - pub body_root: B256, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct FinalizedHeader { - pub beacon: Beacon2, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Beacon2 { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - pub parent_root: B256, - pub state_root: B256, - pub body_root: B256, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SyncAggregate { - pub sync_committee_bits: Bytes, - pub sync_committee_signature: Bytes, -} diff --git a/crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs b/crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs deleted file mode 100644 index af310f8cc80ab..0000000000000 --- a/crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::beacon::header::BeaconBlockHeader; -use alloy_primitives::Bytes; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientOptimisticData { - pub attested_header: AttestedHeader, - pub sync_aggregate: SyncAggregate, - #[serde_as(as = "DisplayFromStr")] - pub signature_slot: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestedHeader { - pub beacon: BeaconBlockHeader, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SyncAggregate { - pub sync_committee_bits: Bytes, - pub sync_committee_signature: Bytes, -} diff --git a/crates/rpc/rpc-types/src/beacon/events/mod.rs b/crates/rpc/rpc-types/src/beacon/events/mod.rs deleted file mode 100644 index 501494a91f0e5..0000000000000 --- a/crates/rpc/rpc-types/src/beacon/events/mod.rs +++ /dev/null @@ -1,403 +0,0 @@ -//! Support for the Beacon API events -//! -//! See also [ethereum-beacon-API eventstream](https://ethereum.github.io/beacon-APIs/#/Events/eventstream) - -use crate::engine::PayloadAttributes; -use alloy_primitives::{Address, Bytes, B256}; -use attestation::AttestationData; -use light_client_finality::LightClientFinalityData; -use light_client_optimistic::LightClientOptimisticData; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -pub mod attestation; -pub mod light_client_finality; -pub mod light_client_optimistic; - -/// Topic variant for the eventstream API -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum BeaconNodeEventTopic { - PayloadAttributes, - Head, - Block, - Attestation, - VoluntaryExit, - BlsToExecutionChange, - FinalizedCheckpoint, - ChainReorg, - ContributionAndProof, - LightClientFinalityUpdate, - LightClientOptimisticUpdate, - BlobSidecar, -} - -impl BeaconNodeEventTopic { - /// Returns the identifier value for the eventstream query - pub fn query_value(&self) -> &'static str { - match self { - BeaconNodeEventTopic::PayloadAttributes => "payload_attributes", - BeaconNodeEventTopic::Head => "head", - BeaconNodeEventTopic::Block => "block", - BeaconNodeEventTopic::Attestation => "attestation", - BeaconNodeEventTopic::VoluntaryExit => "voluntary_exit", - BeaconNodeEventTopic::BlsToExecutionChange => "bls_to_execution_change", - BeaconNodeEventTopic::FinalizedCheckpoint => "finalized_checkpoint", - BeaconNodeEventTopic::ChainReorg => "chain_reorg", - BeaconNodeEventTopic::ContributionAndProof => "contribution_and_proof", - BeaconNodeEventTopic::LightClientFinalityUpdate => "light_client_finality_update", - BeaconNodeEventTopic::LightClientOptimisticUpdate => "light_client_optimistic_update", - BeaconNodeEventTopic::BlobSidecar => "blob_sidecar", - } - } -} - -/// Event for the `payload_attributes` topic of the beacon API node event stream. -/// -/// This event gives block builders and relays sufficient information to construct or verify a block -/// at `proposal_slot`. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct PayloadAttributesEvent { - /// the identifier of the beacon hard fork at `proposal_slot`, e.g `"bellatrix"`, `"capella"`. - pub version: String, - /// Wrapped data of the event. - pub data: PayloadAttributesData, -} - -/// Event for the `Head` topic of the beacon API node event stream. -/// -/// The node has finished processing, resulting in a new head. previous_duty_dependent_root is -/// \`get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch - 1) - 1)\` and -/// current_duty_dependent_root is \`get_block_root_at_slot(state, -/// compute_start_slot_at_epoch(epoch) -/// - 1)\`. Both dependent roots use the genesis block root in the case of underflow. -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeadEvent { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub block: B256, - pub state: B256, - pub epoch_transition: bool, - pub previous_duty_dependent_root: B256, - pub current_duty_dependent_root: B256, - pub execution_optimistic: bool, -} - -/// Event for the `Block` topic of the beacon API node event stream. -/// -/// The node has received a valid block (from P2P or API) -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlockEvent { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub block: B256, - pub execution_optimistic: bool, -} - -/// Event for the `Attestation` topic of the beacon API node event stream. -/// -/// The node has received a valid attestation (from P2P or API) -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestationEvent { - pub aggregation_bits: Bytes, - pub signature: Bytes, - pub data: AttestationData, -} - -/// Event for the `VoluntaryExit` topic of the beacon API node event stream. -/// -/// The node has received a valid voluntary exit (from P2P or API) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct VoluntaryExitEvent { - pub message: VoluntaryExitMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct VoluntaryExitMessage { - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - #[serde_as(as = "DisplayFromStr")] - pub validator_index: u64, -} - -/// Event for the `BlsToExecutionChange` topic of the beacon API node event stream. -/// -/// The node has received a BLS to execution change (from P2P or API) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlsToExecutionChangeEvent { - pub message: BlsToExecutionChangeMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlsToExecutionChangeMessage { - #[serde_as(as = "DisplayFromStr")] - pub validator_index: u64, - pub from_bls_pubkey: String, - pub to_execution_address: Address, -} - -/// Event for the `Deposit` topic of the beacon API node event stream. -/// -/// Finalized checkpoint has been updated -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct FinalizedCheckpointEvent { - pub block: B256, - pub state: B256, - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub execution_optimistic: bool, -} - -/// Event for the `ChainReorg` topic of the beacon API node event stream. -/// -/// The node has reorganized its chain -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ChainReorgEvent { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub depth: u64, - pub old_head_block: B256, - pub new_head_block: B256, - pub old_head_state: B256, - pub new_head_state: B256, - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub execution_optimistic: bool, -} - -/// Event for the `ContributionAndProof` topic of the beacon API node event stream. -/// -/// The node has received a valid sync committee SignedContributionAndProof (from P2P or API) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ContributionAndProofEvent { - pub message: ContributionAndProofMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ContributionAndProofMessage { - #[serde_as(as = "DisplayFromStr")] - pub aggregator_index: u64, - pub contribution: Contribution, - pub selection_proof: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Contribution { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub beacon_block_root: B256, - #[serde_as(as = "DisplayFromStr")] - pub subcommittee_index: u64, - pub aggregation_bits: Bytes, - pub signature: Bytes, -} - -/// Event for the `LightClientFinalityUpdate` topic of the beacon API node event stream. -/// -/// The node's latest known `LightClientFinalityUpdate` has been updated -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientFinalityUpdateEvent { - pub version: String, - pub data: LightClientFinalityData, -} - -/// Event for the `LightClientOptimisticUpdate` topic of the beacon API node event stream. -/// -/// The node's latest known `LightClientOptimisticUpdate` has been updated -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientOptimisticUpdateEvent { - pub version: String, - pub data: LightClientOptimisticData, -} - -/// Event for the `BlobSidecar` topic of the beacon API node event stream. -/// -/// The node has received a BlobSidecar (from P2P or API) that passes all gossip validations on the -/// blob_sidecar_{subnet_id} topic -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlobSidecarEvent { - pub block_root: B256, - #[serde_as(as = "DisplayFromStr")] - pub index: u64, - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub kzg_commitment: Bytes, - pub versioned_hash: B256, -} - -impl PayloadAttributesEvent { - /// Returns the payload attributes - pub fn attributes(&self) -> &PayloadAttributes { - &self.data.payload_attributes - } -} - -/// Data of the event that contains the payload attributes -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct PayloadAttributesData { - /// The slot at which a block using these payload attributes may be built - #[serde_as(as = "DisplayFromStr")] - pub proposal_slot: u64, - /// the beacon block root of the parent block to be built upon. - pub parent_block_root: B256, - /// the execution block number of the parent block. - #[serde_as(as = "DisplayFromStr")] - pub parent_block_number: u64, - /// the execution block hash of the parent block. - pub parent_block_hash: B256, - /// The execution block number of the parent block. - /// the validator index of the proposer at `proposal_slot` on the chain identified by - /// `parent_block_root`. - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - /// Beacon API encoding of `PayloadAttributesV` as defined by the `execution-apis` - /// specification - /// - /// Note: this uses the beacon API format which uses snake-case and quoted decimals rather than - /// big-endian hex. - #[serde(with = "crate::beacon::payload::beacon_api_payload_attributes")] - pub payload_attributes: PayloadAttributes, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_payload_attributes_event() { - let s = r#"{"version":"capella","data":{"proposal_slot":"173332","proposer_index":"649112","parent_block_root":"0x5a49069647f6bf8f25d76b55ce920947654ade4ba1c6ab826d16712dd62b42bf","parent_block_number":"161093","parent_block_hash":"0x608b3d140ecb5bbcd0019711ac3704ece7be8e6d100816a55db440c1bcbb0251","payload_attributes":{"timestamp":"1697982384","prev_randao":"0x3142abd98055871ebf78f0f8e758fd3a04df3b6e34d12d09114f37a737f8f01e","suggested_fee_recipient":"0x0000000000000000000000000000000000000001","withdrawals":[{"index":"2461612","validator_index":"853570","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"45016211"},{"index":"2461613","validator_index":"853571","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5269785"},{"index":"2461614","validator_index":"853572","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5275106"},{"index":"2461615","validator_index":"853573","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5235962"},{"index":"2461616","validator_index":"853574","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5252171"},{"index":"2461617","validator_index":"853575","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5221319"},{"index":"2461618","validator_index":"853576","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5260879"},{"index":"2461619","validator_index":"853577","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5285244"},{"index":"2461620","validator_index":"853578","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5266681"},{"index":"2461621","validator_index":"853579","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5271322"},{"index":"2461622","validator_index":"853580","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5231327"},{"index":"2461623","validator_index":"853581","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5276761"},{"index":"2461624","validator_index":"853582","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5246244"},{"index":"2461625","validator_index":"853583","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5261011"},{"index":"2461626","validator_index":"853584","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5276477"},{"index":"2461627","validator_index":"853585","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5275319"}]}}}"#; - - let event: PayloadAttributesEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - #[test] - fn serde_head_event() { - let s = r#"{"slot":"10", "block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "state":"0x600e852a08c1200654ddf11025f1ceacb3c2e74bdd5c630cde0838b2591b69f9", "epoch_transition":false, "previous_duty_dependent_root":"0x5e0043f107cb57913498fbf2f99ff55e730bf1e151f02f221e977c91a90a0e91", "current_duty_dependent_root":"0x5e0043f107cb57913498fbf2f99ff55e730bf1e151f02f221e977c91a90a0e91", "execution_optimistic": false}"#; - - let event: HeadEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_block_event() { - let s = r#"{"slot":"10", "block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "execution_optimistic": false}"#; - - let event: BlockEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - #[test] - fn serde_attestation_event() { - let s = r#"{"aggregation_bits":"0x01", "signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505", "data":{"slot":"1", "index":"1", "beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "source":{"epoch":"1", "root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}, "target":{"epoch":"1", "root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}}"#; - - let event: AttestationEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_voluntary_exit_event() { - let s = r#"{"message":{"epoch":"1", "validator_index":"1"}, "signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}"#; - - let event: VoluntaryExitEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_bls_to_execution_change_event() { - let s = r#"{"message":{"validator_index":"1", "from_bls_pubkey":"0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95", "to_execution_address":"0x9be8d619c56699667c1fedcd15f6b14d8b067f72"}, "signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}"#; - - let event: BlsToExecutionChangeEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_finalize_checkpoint_event() { - let s = r#"{"block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "state":"0x600e852a08c1200654ddf11025f1ceacb3c2e74bdd5c630cde0838b2591b69f9", "epoch":"2", "execution_optimistic": false }"#; - - let event: FinalizedCheckpointEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_chain_reorg_event() { - let s = r#"{"slot":"200", "depth":"50", "old_head_block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "new_head_block":"0x76262e91970d375a19bfe8a867288d7b9cde43c8635f598d93d39d041706fc76", "old_head_state":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "new_head_state":"0x600e852a08c1200654ddf11025f1ceacb3c2e74bdd5c630cde0838b2591b69f9", "epoch":"2", "execution_optimistic": false}"#; - - let event: ChainReorgEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_contribution_and_proof_event() { - let s = r#"{"message": {"aggregator_index": "997", "contribution": {"slot": "168097", "beacon_block_root": "0x56f1fd4262c08fa81e27621c370e187e621a67fc80fe42340b07519f84b42ea1", "subcommittee_index": "0", "aggregation_bits": "0xffffffffffffffffffffffffffffffff", "signature": "0x85ab9018e14963026476fdf784cc674da144b3dbdb47516185438768774f077d882087b90ad642469902e782a8b43eed0cfc1b862aa9a473b54c98d860424a702297b4b648f3f30bdaae8a8b7627d10d04cb96a2cc8376af3e54a9aa0c8145e3"}, "selection_proof": "0x87c305f04bfe5db27c2b19fc23e00d7ac496ec7d3e759cbfdd1035cb8cf6caaa17a36a95a08ba78c282725e7b66a76820ca4eb333822bd399ceeb9807a0f2926c67ce67cfe06a0b0006838203b493505a8457eb79913ce1a3bcd1cc8e4ef30ed"}, "signature": "0xac118511474a94f857300b315c50585c32a713e4452e26a6bb98cdb619936370f126ed3b6bb64469259ee92e69791d9e12d324ce6fd90081680ce72f39d85d50b0ff977260a8667465e613362c6d6e6e745e1f9323ec1d6f16041c4e358839ac"}"#; - - let event: ContributionAndProofEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_light_client_finality_update_event() { - let s = r#"{"version":"phase0", "data": {"attested_header": {"beacon": {"slot":"1", "proposer_index":"1", "parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}, "finalized_header": {"beacon": {"slot":"1", "proposer_index":"1", "parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}, "finality_branch": ["0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"], "sync_aggregate": {"sync_committee_bits":"0x01", "sync_committee_signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}, "signature_slot":"1"}}"#; - - let event: LightClientFinalityUpdateEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - #[test] - fn serde_light_client_optimistic_update_event() { - let s = r#"{"version":"phase0", "data": {"attested_header": {"beacon": {"slot":"1", "proposer_index":"1", "parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}, "sync_aggregate": {"sync_committee_bits":"0x01", "sync_committee_signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}, "signature_slot":"1"}}"#; - - let event: LightClientOptimisticUpdateEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_blob_sidecar_event() { - let s = r#"{"block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "index": "1", "slot": "1", "kzg_commitment": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505", "versioned_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}"#; - - let event: BlobSidecarEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } -} diff --git a/crates/rpc/rpc-types/src/beacon/header.rs b/crates/rpc/rpc-types/src/beacon/header.rs deleted file mode 100644 index 9843d33512117..0000000000000 --- a/crates/rpc/rpc-types/src/beacon/header.rs +++ /dev/null @@ -1,125 +0,0 @@ -//! Beacon block header types. -//! -//! See also - -use alloy_primitives::{Bytes, B256}; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -/// The response to a request for beacon block headers: `getBlockHeaders` -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeadersResponse { - /// True if the response references an unverified execution payload. Optimistic information may - /// be invalidated at a later time. If the field is not present, assume the False value. - pub execution_optimistic: bool, - /// True if the response references the finalized history of the chain, as determined by fork - /// choice. If the field is not present, additional calls are necessary to compare the epoch of - /// the requested information with the finalized checkpoint. - pub finalized: bool, - /// Container for the header data. - pub data: Vec, -} - -/// The response to a request for a __single__ beacon block header: `headers/{id}` -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeaderResponse { - /// True if the response references an unverified execution payload. Optimistic information may - /// be invalidated at a later time. If the field is not present, assume the False value. - pub execution_optimistic: bool, - /// True if the response references the finalized history of the chain, as determined by fork - /// choice. If the field is not present, additional calls are necessary to compare the epoch of - /// the requested information with the finalized checkpoint. - pub finalized: bool, - /// Container for the header data. - pub data: HeaderData, -} - -/// Container type for a beacon block header. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeaderData { - /// root hash of the block - pub root: B256, - /// Whether the block is part of the canonical chain - pub canonical: bool, - /// The `SignedBeaconBlockHeader` object envelope from the CL spec. - pub header: Header, -} - -/// [BeaconBlockHeader] with a signature. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Header { - /// The `BeaconBlockHeader` object from the CL spec. - pub message: BeaconBlockHeader, - pub signature: Bytes, -} - -/// The header of a beacon block. -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BeaconBlockHeader { - /// The slot to which this block corresponds. - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - /// Index of validator in validator registry. - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - /// The signing merkle root of the parent BeaconBlock. - pub parent_root: B256, - /// The tree hash merkle root of the BeaconState for the BeaconBlock. - pub state_root: B256, - /// The tree hash merkle root of the BeaconBlockBody for the BeaconBlock - pub body_root: B256, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_headers_response() { - let s = r#"{ - "execution_optimistic": false, - "finalized": false, - "data": [ - { - "root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "canonical": true, - "header": { - "message": { - "slot": "1", - "proposer_index": "1", - "parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2" - }, - "signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" - } - } - ] -}"#; - let _header_response: HeadersResponse = serde_json::from_str(s).unwrap(); - } - - #[test] - fn serde_header_response() { - let s = r#"{ - "execution_optimistic": false, - "finalized": false, - "data": { - "root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "canonical": true, - "header": { - "message": { - "slot": "1", - "proposer_index": "1", - "parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2" - }, - "signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" - } - } -}"#; - let _header_response: HeaderResponse = serde_json::from_str(s).unwrap(); - } -} diff --git a/crates/rpc/rpc-types/src/beacon/mod.rs b/crates/rpc/rpc-types/src/beacon/mod.rs deleted file mode 100644 index 1184d2e43b873..0000000000000 --- a/crates/rpc/rpc-types/src/beacon/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -//! Types for the Ethereum 2.0 RPC protocol (beacon chain). - -#![allow(missing_docs)] - -use alloy_primitives::FixedBytes; -use constants::{BLS_PUBLIC_KEY_BYTES_LEN, BLS_SIGNATURE_BYTES_LEN}; - -pub mod constants; -/// Beacon API events support. -pub mod events; -pub mod header; -pub mod payload; -pub mod withdrawals; - -/// BLS signature type -pub type BlsSignature = FixedBytes; - -/// BLS public key type -pub type BlsPublicKey = FixedBytes; diff --git a/crates/rpc/rpc-types/src/beacon/payload.rs b/crates/rpc/rpc-types/src/beacon/payload.rs deleted file mode 100644 index 2bc4cde781b64..0000000000000 --- a/crates/rpc/rpc-types/src/beacon/payload.rs +++ /dev/null @@ -1,569 +0,0 @@ -//! Payload support for the beacon API. -//! -//! Internal helper module to deserialize/serialize the payload attributes for the beacon API, which -//! uses snake case and quoted decimals. -//! -//! This is necessary because we don't want to allow a mixture of both formats, hence `serde` -//! aliases are not an option. -//! -//! See also - -#![allow(missing_docs)] - -use crate::{ - beacon::{withdrawals::BeaconWithdrawal, BlsPublicKey}, - engine::{ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3}, - Withdrawal, -}; -use alloy_primitives::{Address, Bloom, Bytes, B256, U256}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde_with::{serde_as, DeserializeAs, DisplayFromStr, SerializeAs}; -use std::borrow::Cow; - -/// Response object of GET `/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}` -/// -/// See also -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct GetExecutionPayloadHeaderResponse { - pub version: String, - pub data: ExecutionPayloadHeaderData, -} - -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadHeaderData { - pub message: ExecutionPayloadHeaderMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadHeaderMessage { - pub header: ExecutionPayloadHeader, - #[serde_as(as = "DisplayFromStr")] - pub value: U256, - pub pubkey: BlsPublicKey, -} - -/// The header of the execution payload. -#[serde_as] -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadHeader { - pub parent_hash: B256, - pub fee_recipient: Address, - pub state_root: B256, - pub receipts_root: B256, - pub logs_bloom: Bloom, - pub prev_randao: B256, - #[serde_as(as = "DisplayFromStr")] - pub block_number: String, - #[serde_as(as = "DisplayFromStr")] - pub gas_limit: u64, - #[serde_as(as = "DisplayFromStr")] - pub gas_used: u64, - #[serde_as(as = "DisplayFromStr")] - pub timestamp: u64, - pub extra_data: Bytes, - #[serde_as(as = "DisplayFromStr")] - pub base_fee_per_gas: U256, - pub block_hash: B256, - pub transactions_root: B256, -} - -#[serde_as] -#[derive(Serialize, Deserialize)] -struct BeaconPayloadAttributes { - #[serde_as(as = "DisplayFromStr")] - timestamp: u64, - prev_randao: B256, - suggested_fee_recipient: Address, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde_as(as = "Option>")] - withdrawals: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - parent_beacon_block_root: Option, -} - -/// Optimism Payload Attributes -#[serde_as] -#[derive(Serialize, Deserialize)] -struct BeaconOptimismPayloadAttributes { - #[serde(flatten)] - payload_attributes: BeaconPayloadAttributes, - #[serde(default, skip_serializing_if = "Option::is_none")] - transactions: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - no_tx_pool: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - #[serde_as(as = "Option")] - gas_limit: Option, -} - -/// A helper module for serializing and deserializing optimism payload attributes for the beacon -/// API. -/// -/// See docs for [beacon_api_payload_attributes]. -pub mod beacon_api_payload_attributes_optimism { - use super::*; - use crate::engine::{OptimismPayloadAttributes, PayloadAttributes}; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &OptimismPayloadAttributes, - serializer: S, - ) -> Result - where - S: Serializer, - { - let beacon_api_payload_attributes = BeaconPayloadAttributes { - timestamp: payload_attributes.payload_attributes.timestamp, - prev_randao: payload_attributes.payload_attributes.prev_randao, - suggested_fee_recipient: payload_attributes.payload_attributes.suggested_fee_recipient, - withdrawals: payload_attributes.payload_attributes.withdrawals.clone(), - parent_beacon_block_root: payload_attributes - .payload_attributes - .parent_beacon_block_root, - }; - - let op_beacon_api_payload_attributes = BeaconOptimismPayloadAttributes { - payload_attributes: beacon_api_payload_attributes, - transactions: payload_attributes.transactions.clone(), - no_tx_pool: payload_attributes.no_tx_pool, - gas_limit: payload_attributes.gas_limit, - }; - - op_beacon_api_payload_attributes.serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let beacon_api_payload_attributes = - BeaconOptimismPayloadAttributes::deserialize(deserializer)?; - Ok(OptimismPayloadAttributes { - payload_attributes: PayloadAttributes { - timestamp: beacon_api_payload_attributes.payload_attributes.timestamp, - prev_randao: beacon_api_payload_attributes.payload_attributes.prev_randao, - suggested_fee_recipient: beacon_api_payload_attributes - .payload_attributes - .suggested_fee_recipient, - withdrawals: beacon_api_payload_attributes.payload_attributes.withdrawals, - parent_beacon_block_root: beacon_api_payload_attributes - .payload_attributes - .parent_beacon_block_root, - }, - transactions: beacon_api_payload_attributes.transactions, - no_tx_pool: beacon_api_payload_attributes.no_tx_pool, - gas_limit: beacon_api_payload_attributes.gas_limit, - }) - } -} - -/// A helper module for serializing and deserializing the payload attributes for the beacon API. -/// -/// The beacon API encoded object has equivalent fields to the -/// [PayloadAttributes](crate::engine::PayloadAttributes) with two differences: -/// 1) `snake_case` identifiers must be used rather than `camelCase`; -/// 2) integers must be encoded as quoted decimals rather than big-endian hex. -pub mod beacon_api_payload_attributes { - use super::*; - use crate::engine::PayloadAttributes; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &PayloadAttributes, - serializer: S, - ) -> Result - where - S: Serializer, - { - let beacon_api_payload_attributes = BeaconPayloadAttributes { - timestamp: payload_attributes.timestamp, - prev_randao: payload_attributes.prev_randao, - suggested_fee_recipient: payload_attributes.suggested_fee_recipient, - withdrawals: payload_attributes.withdrawals.clone(), - parent_beacon_block_root: payload_attributes.parent_beacon_block_root, - }; - beacon_api_payload_attributes.serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let beacon_api_payload_attributes = BeaconPayloadAttributes::deserialize(deserializer)?; - Ok(PayloadAttributes { - timestamp: beacon_api_payload_attributes.timestamp, - prev_randao: beacon_api_payload_attributes.prev_randao, - suggested_fee_recipient: beacon_api_payload_attributes.suggested_fee_recipient, - withdrawals: beacon_api_payload_attributes.withdrawals, - parent_beacon_block_root: beacon_api_payload_attributes.parent_beacon_block_root, - }) - } -} - -#[serde_as] -#[derive(Debug, Serialize, Deserialize)] -struct BeaconExecutionPayloadV1<'a> { - parent_hash: Cow<'a, B256>, - fee_recipient: Cow<'a, Address>, - state_root: Cow<'a, B256>, - receipts_root: Cow<'a, B256>, - logs_bloom: Cow<'a, Bloom>, - prev_randao: Cow<'a, B256>, - #[serde_as(as = "DisplayFromStr")] - block_number: u64, - #[serde_as(as = "DisplayFromStr")] - gas_limit: u64, - #[serde_as(as = "DisplayFromStr")] - gas_used: u64, - #[serde_as(as = "DisplayFromStr")] - timestamp: u64, - extra_data: Cow<'a, Bytes>, - #[serde_as(as = "DisplayFromStr")] - base_fee_per_gas: U256, - block_hash: Cow<'a, B256>, - transactions: Cow<'a, Vec>, -} - -impl<'a> From> for ExecutionPayloadV1 { - fn from(payload: BeaconExecutionPayloadV1<'a>) -> Self { - let BeaconExecutionPayloadV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = payload; - ExecutionPayloadV1 { - parent_hash: parent_hash.into_owned(), - fee_recipient: fee_recipient.into_owned(), - state_root: state_root.into_owned(), - receipts_root: receipts_root.into_owned(), - logs_bloom: logs_bloom.into_owned(), - prev_randao: prev_randao.into_owned(), - block_number, - gas_limit, - gas_used, - timestamp, - extra_data: extra_data.into_owned(), - base_fee_per_gas, - block_hash: block_hash.into_owned(), - transactions: transactions.into_owned(), - } - } -} - -impl<'a> From<&'a ExecutionPayloadV1> for BeaconExecutionPayloadV1<'a> { - fn from(value: &'a ExecutionPayloadV1) -> Self { - let ExecutionPayloadV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = value; - - BeaconExecutionPayloadV1 { - parent_hash: Cow::Borrowed(parent_hash), - fee_recipient: Cow::Borrowed(fee_recipient), - state_root: Cow::Borrowed(state_root), - receipts_root: Cow::Borrowed(receipts_root), - logs_bloom: Cow::Borrowed(logs_bloom), - prev_randao: Cow::Borrowed(prev_randao), - block_number: *block_number, - gas_limit: *gas_limit, - gas_used: *gas_used, - timestamp: *timestamp, - extra_data: Cow::Borrowed(extra_data), - base_fee_per_gas: *base_fee_per_gas, - block_hash: Cow::Borrowed(block_hash), - transactions: Cow::Borrowed(transactions), - } - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_payload_v1 { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayloadV1, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayloadV1::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayloadV1::deserialize(deserializer).map(Into::into) - } -} - -#[serde_as] -#[derive(Debug, Serialize, Deserialize)] -struct BeaconExecutionPayloadV2<'a> { - /// Inner V1 payload - #[serde(flatten)] - payload_inner: BeaconExecutionPayloadV1<'a>, - /// Array of [`Withdrawal`] enabled with V2 - /// See - #[serde_as(as = "Vec")] - withdrawals: Vec, -} - -impl<'a> From> for ExecutionPayloadV2 { - fn from(payload: BeaconExecutionPayloadV2<'a>) -> Self { - let BeaconExecutionPayloadV2 { payload_inner, withdrawals } = payload; - ExecutionPayloadV2 { payload_inner: payload_inner.into(), withdrawals } - } -} - -impl<'a> From<&'a ExecutionPayloadV2> for BeaconExecutionPayloadV2<'a> { - fn from(value: &'a ExecutionPayloadV2) -> Self { - let ExecutionPayloadV2 { payload_inner, withdrawals } = value; - BeaconExecutionPayloadV2 { - payload_inner: payload_inner.into(), - withdrawals: withdrawals.clone(), - } - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_payload_v2 { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayloadV2, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayloadV2::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayloadV2::deserialize(deserializer).map(Into::into) - } -} - -#[serde_as] -#[derive(Debug, Serialize, Deserialize)] -struct BeaconExecutionPayloadV3<'a> { - /// Inner V1 payload - #[serde(flatten)] - payload_inner: BeaconExecutionPayloadV2<'a>, - #[serde_as(as = "DisplayFromStr")] - blob_gas_used: u64, - #[serde_as(as = "DisplayFromStr")] - excess_blob_gas: u64, -} - -impl<'a> From> for ExecutionPayloadV3 { - fn from(payload: BeaconExecutionPayloadV3<'a>) -> Self { - let BeaconExecutionPayloadV3 { payload_inner, blob_gas_used, excess_blob_gas } = payload; - ExecutionPayloadV3 { payload_inner: payload_inner.into(), blob_gas_used, excess_blob_gas } - } -} - -impl<'a> From<&'a ExecutionPayloadV3> for BeaconExecutionPayloadV3<'a> { - fn from(value: &'a ExecutionPayloadV3) -> Self { - let ExecutionPayloadV3 { payload_inner, blob_gas_used, excess_blob_gas } = value; - BeaconExecutionPayloadV3 { - payload_inner: payload_inner.into(), - blob_gas_used: *blob_gas_used, - excess_blob_gas: *excess_blob_gas, - } - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_payload_v3 { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayloadV3, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayloadV3::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayloadV3::deserialize(deserializer).map(Into::into) - } -} - -/// Represents all possible payload versions. -#[derive(Debug, Serialize)] -#[serde(untagged)] -enum BeaconExecutionPayload<'a> { - /// V1 payload - V1(BeaconExecutionPayloadV1<'a>), - /// V2 payload - V2(BeaconExecutionPayloadV2<'a>), - /// V3 payload - V3(BeaconExecutionPayloadV3<'a>), -} - -// Deserializes untagged ExecutionPayload by trying each variant in falling order -impl<'de> Deserialize<'de> for BeaconExecutionPayload<'de> { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - #[serde(untagged)] - enum BeaconExecutionPayloadDesc<'a> { - V3(BeaconExecutionPayloadV3<'a>), - V2(BeaconExecutionPayloadV2<'a>), - V1(BeaconExecutionPayloadV1<'a>), - } - match BeaconExecutionPayloadDesc::deserialize(deserializer)? { - BeaconExecutionPayloadDesc::V3(payload) => Ok(Self::V3(payload)), - BeaconExecutionPayloadDesc::V2(payload) => Ok(Self::V2(payload)), - BeaconExecutionPayloadDesc::V1(payload) => Ok(Self::V1(payload)), - } - } -} - -impl<'a> From> for ExecutionPayload { - fn from(payload: BeaconExecutionPayload<'a>) -> Self { - match payload { - BeaconExecutionPayload::V1(payload) => { - ExecutionPayload::V1(ExecutionPayloadV1::from(payload)) - } - BeaconExecutionPayload::V2(payload) => { - ExecutionPayload::V2(ExecutionPayloadV2::from(payload)) - } - BeaconExecutionPayload::V3(payload) => { - ExecutionPayload::V3(ExecutionPayloadV3::from(payload)) - } - } - } -} - -impl<'a> From<&'a ExecutionPayload> for BeaconExecutionPayload<'a> { - fn from(value: &'a ExecutionPayload) -> Self { - match value { - ExecutionPayload::V1(payload) => { - BeaconExecutionPayload::V1(BeaconExecutionPayloadV1::from(payload)) - } - ExecutionPayload::V2(payload) => { - BeaconExecutionPayload::V2(BeaconExecutionPayloadV2::from(payload)) - } - ExecutionPayload::V3(payload) => { - BeaconExecutionPayload::V3(BeaconExecutionPayloadV3::from(payload)) - } - ExecutionPayload::V4(_payload) => { - // TODO(onbjerg): Implement `ExecutionPayloadV4` support - todo!() - } - } - } -} - -impl<'a> SerializeAs for BeaconExecutionPayload<'a> { - fn serialize_as(source: &ExecutionPayload, serializer: S) -> Result - where - S: Serializer, - { - beacon_payload::serialize(source, serializer) - } -} - -impl<'de> DeserializeAs<'de, ExecutionPayload> for BeaconExecutionPayload<'de> { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - beacon_payload::deserialize(deserializer) - } -} - -pub mod beacon_payload { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayload, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayload::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayload::deserialize(deserializer).map(Into::into) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_get_payload_header_response() { - let s = r#"{"version":"bellatrix","data":{"message":{"header":{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"1","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"value":"1","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}"#; - let resp: GetExecutionPayloadHeaderResponse = serde_json::from_str(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(resp).unwrap()); - } - - #[test] - fn serde_payload_header() { - let s = r#"{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"1","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}"#; - let header: ExecutionPayloadHeader = serde_json::from_str(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(header).unwrap()); - } -} diff --git a/crates/rpc/rpc-types/src/beacon/withdrawals.rs b/crates/rpc/rpc-types/src/beacon/withdrawals.rs deleted file mode 100644 index ea2930c5fc4dc..0000000000000 --- a/crates/rpc/rpc-types/src/beacon/withdrawals.rs +++ /dev/null @@ -1,70 +0,0 @@ -use crate::Withdrawal; -use alloy_primitives::Address; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde_with::{serde_as, DeserializeAs, DisplayFromStr, SerializeAs}; - -/// Same as [Withdrawal] but respects the Beacon API format which uses snake-case and quoted -/// decimals. -#[serde_as] -#[derive(Serialize, Deserialize, Clone)] -pub(crate) struct BeaconWithdrawal { - #[serde_as(as = "DisplayFromStr")] - index: u64, - #[serde_as(as = "DisplayFromStr")] - validator_index: u64, - address: Address, - #[serde_as(as = "DisplayFromStr")] - amount: u64, -} - -impl SerializeAs for BeaconWithdrawal { - fn serialize_as(source: &Withdrawal, serializer: S) -> Result - where - S: Serializer, - { - beacon_withdrawals::serialize(source, serializer) - } -} - -impl<'de> DeserializeAs<'de, Withdrawal> for BeaconWithdrawal { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - beacon_withdrawals::deserialize(deserializer) - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_withdrawals { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize(payload_attributes: &Withdrawal, serializer: S) -> Result - where - S: Serializer, - { - let withdrawal = BeaconWithdrawal { - index: payload_attributes.index, - validator_index: payload_attributes.validator_index, - address: payload_attributes.address, - amount: payload_attributes.amount, - }; - withdrawal.serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let withdrawal = BeaconWithdrawal::deserialize(deserializer)?; - Ok(Withdrawal { - index: withdrawal.index, - validator_index: withdrawal.validator_index, - address: withdrawal.address, - amount: withdrawal.amount, - }) - } -} diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 01ed0f911097f..5966a9b72c6ea 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -10,7 +10,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -pub mod beacon; mod eth; mod mev; mod net; diff --git a/crates/rpc/rpc-types/src/relay/mod.rs b/crates/rpc/rpc-types/src/relay/mod.rs index 8fed94b79bfeb..35daa1b79b092 100644 --- a/crates/rpc/rpc-types/src/relay/mod.rs +++ b/crates/rpc/rpc-types/src/relay/mod.rs @@ -1,12 +1,10 @@ //! Relay API bindings: -use crate::{ - beacon::{BlsPublicKey, BlsSignature}, - engine::{ - BlobsBundleV1, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, - }, +use crate::engine::{ + BlobsBundleV1, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, }; use alloy_primitives::{Address, B256, U256}; +use alloy_rpc_types_beacon::beacon::{BlsPublicKey, BlsSignature}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; @@ -104,7 +102,7 @@ pub struct SignedBidSubmissionV1 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "crate::beacon::payload::beacon_payload_v1")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v1")] pub execution_payload: ExecutionPayloadV1, /// The signature associated with the submission. pub signature: BlsSignature, @@ -118,7 +116,7 @@ pub struct SignedBidSubmissionV2 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "crate::beacon::payload::beacon_payload_v2")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v2")] pub execution_payload: ExecutionPayloadV2, /// The signature associated with the submission. pub signature: BlsSignature, @@ -132,7 +130,7 @@ pub struct SignedBidSubmissionV3 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "crate::beacon::payload::beacon_payload_v3")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v3")] pub execution_payload: ExecutionPayloadV3, /// The Deneb block bundle for this bid. pub blobs_bundle: BlobsBundleV1, @@ -146,7 +144,7 @@ pub struct SubmitBlockRequest { /// The BidTrace message associated with the block submission. pub message: BidTrace, /// The execution payload for the block submission. - #[serde(with = "crate::beacon::payload::beacon_payload")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload")] pub execution_payload: ExecutionPayload, /// The signature associated with the block submission. pub signature: BlsSignature, diff --git a/examples/beacon-api-sse/Cargo.toml b/examples/beacon-api-sse/Cargo.toml index 87a882c6cc2aa..4582f2598cae8 100644 --- a/examples/beacon-api-sse/Cargo.toml +++ b/examples/beacon-api-sse/Cargo.toml @@ -8,6 +8,7 @@ license.workspace = true [dependencies] reth.workspace = true reth-node-ethereum.workspace = true +alloy-rpc-types-beacon.workspace = true clap.workspace = true tracing.workspace = true diff --git a/examples/beacon-api-sse/src/main.rs b/examples/beacon-api-sse/src/main.rs index 38dada132e432..0cd4d4e78dd78 100644 --- a/examples/beacon-api-sse/src/main.rs +++ b/examples/beacon-api-sse/src/main.rs @@ -17,10 +17,11 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_rpc_types_beacon::beacon::events::PayloadAttributesEvent; use clap::Parser; use futures_util::stream::StreamExt; use mev_share_sse::{client::EventStream, EventClient}; -use reth::{cli::Cli, rpc::types::beacon::events::PayloadAttributesEvent}; +use reth::cli::Cli; use reth_node_ethereum::EthereumNode; use std::net::{IpAddr, Ipv4Addr}; use tracing::{info, warn}; From 5b4e10cbdcb66dd4b6a20175c31f9b93659774f0 Mon Sep 17 00:00:00 2001 From: guha-rahul <52607971+guha-rahul@users.noreply.github.com> Date: Tue, 7 May 2024 15:56:59 +0530 Subject: [PATCH 494/700] Add helpful text for tx pool flags in reth --help (#8134) --- crates/node-core/src/args/network.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 7b1b9d0d586f7..9ff93c5a9d233 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -93,7 +93,7 @@ pub struct NetworkArgs { /// `GetPooledTransactions` request. Spec'd at 2 MiB. /// /// . - #[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, help = "Sets the soft limit for the byte size of pooled transactions response. Specified at 2 MiB by default. This is a spec'd value that should only be set for experimental purposes on a testnet.")] + #[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, help = "Sets the soft limit for the byte size of pooled transactions response. Specified at 2 MiB by default. This is a spec'd value that should only be set for experimental purposes on a testnet.",long_help = None)] pub soft_limit_byte_size_pooled_transactions_response: usize, /// Default soft limit for the byte size of a `PooledTransactions` response on assembling a @@ -101,7 +101,7 @@ pub struct NetworkArgs { /// than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when /// assembling a `PooledTransactions` response. Default /// is 128 KiB. - #[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ)] + #[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,help = "Sets the soft limit for the byte size of a single pooled transactions response when packing multiple responses into a single packet for a `GetPooledTransactions` request. Specified at 128 Kib by default.",long_help = None)] pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, } From cbc6f268c0316cacc68b407d18d7e697399b700e Mon Sep 17 00:00:00 2001 From: guha-rahul <52607971+guha-rahul@users.noreply.github.com> Date: Tue, 7 May 2024 18:08:23 +0530 Subject: [PATCH 495/700] replace reth BlobTransactionSidecar with alloy's (#8135) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 - .../src/commands/debug_cmd/build_block.rs | 2 +- crates/primitives/Cargo.toml | 3 +- crates/primitives/benches/validate_blob_tx.rs | 14 +- crates/primitives/src/eip4844.rs | 16 +- crates/primitives/src/lib.rs | 9 +- crates/primitives/src/transaction/eip4844.rs | 57 +--- crates/primitives/src/transaction/mod.rs | 7 +- crates/primitives/src/transaction/pooled.rs | 2 - crates/primitives/src/transaction/sidecar.rs | 272 ++---------------- crates/transaction-pool/src/blobstore/disk.rs | 15 +- .../transaction-pool/src/test_utils/mock.rs | 7 +- examples/exex/rollup/src/execution.rs | 7 +- 13 files changed, 60 insertions(+), 352 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 866a3816736e8..2ee45d88ac80d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7583,7 +7583,6 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "sha2 0.10.8", "strum", "sucds", "tempfile", diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 22361aada5612..dd0bfa09209a3 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -213,7 +213,7 @@ impl Command { ))?; let sidecar: BlobTransactionSidecar = - blobs_bundle.pop_sidecar(blob_versioned_hashes.len()).into(); + blobs_bundle.pop_sidecar(blob_versioned_hashes.len()); // first construct the tx, calculating the length of the tx with sidecar before // insertion diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 675c7167f6e0c..f4be57f9c65aa 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -45,7 +45,6 @@ once_cell.workspace = true rayon.workspace = true serde.workspace = true serde_json.workspace = true -sha2 = { version = "0.10.7", optional = true } tempfile = { workspace = true, optional = true } thiserror.workspace = true zstd = { version = "0.13", features = ["experimental"], optional = true } @@ -105,7 +104,7 @@ arbitrary = [ "dep:proptest-derive", "zstd-codec", ] -c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg", "dep:sha2", "dep:tempfile"] +c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg", "dep:tempfile", "alloy-eips/kzg"] zstd-codec = ["dep:zstd"] clap = ["dep:clap"] optimism = [ diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 0bc2f04c6913c..ec62353fb688d 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,6 +1,7 @@ #![allow(missing_docs)] + use alloy_primitives::hex; -use c_kzg::{KzgCommitment, KzgSettings}; +use c_kzg::KzgSettings; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, }; @@ -10,8 +11,7 @@ use proptest::{ test_runner::{RngAlgorithm, TestRng, TestRunner}, }; use reth_primitives::{ - constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, eip4844::kzg_to_versioned_hash, - BlobTransactionSidecar, TxEip4844, + constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, BlobTransactionSidecar, TxEip4844, }; use revm_primitives::MAX_BLOB_NUMBER_PER_BLOCK; use std::sync::Arc; @@ -62,13 +62,7 @@ fn validate_blob_tx( } } - tx.blob_versioned_hashes = blob_sidecar - .commitments - .iter() - .map(|commitment| { - kzg_to_versioned_hash(KzgCommitment::from_bytes(&commitment.into_inner()).unwrap()) - }) - .collect(); + tx.blob_versioned_hashes = blob_sidecar.versioned_hashes().collect(); (tx, blob_sidecar) }; diff --git a/crates/primitives/src/eip4844.rs b/crates/primitives/src/eip4844.rs index 4f65cc7ee0632..0d228528f78b3 100644 --- a/crates/primitives/src/eip4844.rs +++ b/crates/primitives/src/eip4844.rs @@ -1,21 +1,9 @@ //! Helpers for working with EIP-4844 blob fee. -#[cfg(feature = "c-kzg")] -use crate::{constants::eip4844::VERSIONED_HASH_VERSION_KZG, B256}; -#[cfg(feature = "c-kzg")] -use sha2::{Digest, Sha256}; - // re-exports from revm for calculating blob fee pub use crate::revm_primitives::{ calc_blob_gasprice, calc_excess_blob_gas as calculate_excess_blob_gas, }; -/// Calculates the versioned hash for a KzgCommitment -/// -/// Specified in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension) -#[cfg(feature = "c-kzg")] -pub fn kzg_to_versioned_hash(commitment: c_kzg::KzgCommitment) -> B256 { - let mut res = Sha256::digest(commitment.as_slice()); - res[0] = VERSIONED_HASH_VERSION_KZG; - B256::new(res.into()) -} +#[doc(inline)] +pub use alloy_eips::eip4844::kzg_to_versioned_hash; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 3473ef82e0e3f..2cd71ae20b29d 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -85,13 +85,14 @@ pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts}; pub use static_file::StaticFileSegment; pub use storage::StorageEntry; -#[cfg(feature = "c-kzg")] pub use transaction::{ - BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError, - FromRecoveredPooledTransaction, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, + BlobTransaction, BlobTransactionSidecar, FromRecoveredPooledTransaction, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, }; +#[cfg(feature = "c-kzg")] +pub use transaction::BlobTransactionValidationError; + pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, AccessList, AccessListItem, IntoRecoveredTransaction, InvalidTransactionError, Signature, diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 8356d678833c5..f2130ce50eb9f 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -8,13 +8,7 @@ use reth_codecs::{main_codec, Compact}; use std::mem; #[cfg(feature = "c-kzg")] -use crate::eip4844::kzg_to_versioned_hash; -#[cfg(feature = "c-kzg")] -use crate::kzg::{self, KzgCommitment, KzgProof, KzgSettings}; -#[cfg(feature = "c-kzg")] -use crate::transaction::sidecar::*; -#[cfg(feature = "c-kzg")] -use std::ops::Deref; +use crate::kzg::KzgSettings; /// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) /// @@ -112,57 +106,16 @@ impl TxEip4844 { /// commitments, and proofs. Each blob data element is verified against its commitment and /// proof. /// - /// Returns [BlobTransactionValidationError::InvalidProof] if any blob KZG proof in the response + /// Returns `InvalidProof` if any blob KZG proof in the response /// fails to verify, or if the versioned hashes in the transaction do not match the actual /// commitment versioned hashes. #[cfg(feature = "c-kzg")] pub fn validate_blob( &self, - sidecar: &BlobTransactionSidecar, + sidecar: &crate::BlobTransactionSidecar, proof_settings: &KzgSettings, - ) -> Result<(), BlobTransactionValidationError> { - // Ensure the versioned hashes and commitments have the same length - if self.blob_versioned_hashes.len() != sidecar.commitments.len() { - return Err(kzg::Error::MismatchLength(format!( - "There are {} versioned commitment hashes and {} commitments", - self.blob_versioned_hashes.len(), - sidecar.commitments.len() - )) - .into()) - } - - // zip and iterate, calculating versioned hashes - for (versioned_hash, commitment) in - self.blob_versioned_hashes.iter().zip(sidecar.commitments.iter()) - { - // convert to KzgCommitment - let commitment = KzgCommitment::from(*commitment.deref()); - - // calculate & verify the versioned hash - // https://eips.ethereum.org/EIPS/eip-4844#execution-layer-validation - let calculated_versioned_hash = kzg_to_versioned_hash(commitment); - if *versioned_hash != calculated_versioned_hash { - return Err(BlobTransactionValidationError::WrongVersionedHash { - have: *versioned_hash, - expected: calculated_versioned_hash, - }) - } - } - - // Verify as a batch - let res = KzgProof::verify_blob_kzg_proof_batch( - sidecar.blobs.as_slice(), - sidecar.commitments.as_slice(), - sidecar.proofs.as_slice(), - proof_settings, - ) - .map_err(BlobTransactionValidationError::KZGError)?; - - if res { - Ok(()) - } else { - Err(BlobTransactionValidationError::InvalidProof) - } + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { + sidecar.validate(&self.blob_versioned_hashes, proof_settings) } /// Returns the total gas for all blobs in this transaction. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 7b79a85a21a96..95407537b271c 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -25,12 +25,12 @@ pub use error::{ }; pub use legacy::TxLegacy; pub use meta::TransactionMeta; -#[cfg(feature = "c-kzg")] pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; #[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] pub use sidecar::generate_blob_sidecar; #[cfg(feature = "c-kzg")] -pub use sidecar::{BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError}; +pub use sidecar::BlobTransactionValidationError; +pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; pub use signature::{extract_chain_id, Signature}; pub use tx_type::{ @@ -45,9 +45,7 @@ mod eip4844; mod error; mod legacy; mod meta; -#[cfg(feature = "c-kzg")] mod pooled; -#[cfg(feature = "c-kzg")] mod sidecar; mod signature; mod tx_type; @@ -1698,7 +1696,6 @@ impl TryFromRecoveredTransaction for TransactionSignedEcRecovered { /// /// This is a conversion trait that'll ensure transactions received via P2P can be converted to the /// transaction type that the transaction pool uses. -#[cfg(feature = "c-kzg")] pub trait FromRecoveredPooledTransaction { /// Converts to this type from the given [`PooledTransactionsElementEcRecovered`]. fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self; diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 5588d45a78fba..8323de4705b5f 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,8 +1,6 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. -#![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] - use super::error::TransactionConversionError; use crate::{ Address, BlobTransaction, BlobTransactionSidecar, Bytes, Signature, Transaction, diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 4c2751a86aa36..b4c82b35a83d5 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,48 +1,16 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] -#[cfg(any(test, feature = "arbitrary"))] use crate::{ - constants::eip4844::{FIELD_ELEMENTS_PER_BLOB, MAINNET_KZG_TRUSTED_SETUP}, - kzg::{KzgCommitment, KzgProof, BYTES_PER_FIELD_ELEMENT}, -}; -use crate::{ - keccak256, - kzg::{ - self, Blob, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_PROOF, - }, - Signature, Transaction, TransactionSigned, TxEip4844, TxHash, B256, EIP4844_TX_TYPE_ID, + keccak256, Signature, Transaction, TransactionSigned, TxEip4844, TxHash, EIP4844_TX_TYPE_ID, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; -use bytes::BufMut; -#[cfg(any(test, feature = "arbitrary"))] -use proptest::{ - arbitrary::{any as proptest_any, ParamsFor}, - collection::vec as proptest_vec, - strategy::{BoxedStrategy, Strategy}, -}; use serde::{Deserialize, Serialize}; -/// An error that can occur when validating a [BlobTransaction]. -#[derive(Debug, thiserror::Error)] -pub enum BlobTransactionValidationError { - /// Proof validation failed. - #[error("invalid KZG proof")] - InvalidProof, - /// An error returned by [`kzg`]. - #[error("KZG error: {0:?}")] - KZGError(#[from] kzg::Error), - /// The inner transaction is not a blob transaction. - #[error("unable to verify proof for non blob transaction: {0}")] - NotBlobTransaction(u8), - /// The versioned hash is incorrect. - #[error("wrong versioned hash: have {have}, expected {expected}")] - WrongVersionedHash { - /// The versioned hash we got - have: B256, - /// The versioned hash we expected - expected: B256, - }, -} +#[doc(inline)] +pub use alloy_eips::eip4844::BlobTransactionSidecar; + +#[cfg(feature = "c-kzg")] +pub use alloy_eips::eip4844::BlobTransactionValidationError; /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. @@ -83,9 +51,10 @@ impl BlobTransaction { /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// /// See also [TxEip4844::validate_blob] + #[cfg(feature = "c-kzg")] pub fn validate( &self, - proof_settings: &KzgSettings, + proof_settings: &c_kzg::KzgSettings, ) -> Result<(), BlobTransactionValidationError> { self.transaction.validate_blob(&self.sidecar, proof_settings) } @@ -168,7 +137,7 @@ impl BlobTransaction { self.signature.encode(out); // Encode the blobs, commitments, and proofs - self.sidecar.encode_inner(out); + self.sidecar.encode(out); } /// Outputs the length of the RLP encoding of the blob transaction, including the tx type byte, @@ -274,7 +243,7 @@ impl BlobTransaction { } // All that's left are the blobs, commitments, and proofs - let sidecar = BlobTransactionSidecar::decode_inner(data)?; + let sidecar = BlobTransactionSidecar::decode(data)?; // # Calculating the hash // @@ -306,204 +275,21 @@ impl BlobTransaction { } } -/// This represents a set of blobs, and its corresponding commitments and proofs. -#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] -#[repr(C)] -pub struct BlobTransactionSidecar { - /// The blob data. - pub blobs: Vec, - /// The blob commitments. - pub commitments: Vec, - /// The blob proofs. - pub proofs: Vec, -} - -impl BlobTransactionSidecar { - /// Creates a new [BlobTransactionSidecar] using the given blobs, commitments, and proofs. - pub fn new(blobs: Vec, commitments: Vec, proofs: Vec) -> Self { - Self { blobs, commitments, proofs } - } - - /// Encodes the inner [BlobTransactionSidecar] fields as RLP bytes, without a RLP header. - /// - /// This encodes the fields in the following order: - /// - `blobs` - /// - `commitments` - /// - `proofs` - #[inline] - pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut) { - BlobTransactionSidecarRlp::wrap_ref(self).encode(out); - } - - /// Outputs the RLP length of the [BlobTransactionSidecar] fields, without a RLP header. - pub fn fields_len(&self) -> usize { - BlobTransactionSidecarRlp::wrap_ref(self).fields_len() - } - - /// Decodes the inner [BlobTransactionSidecar] fields from RLP bytes, without a RLP header. - /// - /// This decodes the fields in the following order: - /// - `blobs` - /// - `commitments` - /// - `proofs` - #[inline] - pub(crate) fn decode_inner(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(BlobTransactionSidecarRlp::decode(buf)?.unwrap()) - } - - /// Calculates a size heuristic for the in-memory size of the [BlobTransactionSidecar]. - #[inline] - pub fn size(&self) -> usize { - self.blobs.len() * BYTES_PER_BLOB + // blobs - self.commitments.len() * BYTES_PER_COMMITMENT + // commitments - self.proofs.len() * BYTES_PER_PROOF // proofs - } -} - -impl From for BlobTransactionSidecar { - fn from(value: reth_rpc_types::BlobTransactionSidecar) -> Self { - // SAFETY: Same repr and size - unsafe { std::mem::transmute(value) } - } -} - -impl From for reth_rpc_types::BlobTransactionSidecar { - fn from(value: BlobTransactionSidecar) -> Self { - // SAFETY: Same repr and size - unsafe { std::mem::transmute(value) } - } -} - -impl Encodable for BlobTransactionSidecar { - /// Encodes the inner [BlobTransactionSidecar] fields as RLP bytes, without a RLP header. - fn encode(&self, out: &mut dyn BufMut) { - self.encode_inner(out) - } - - fn length(&self) -> usize { - self.fields_len() - } -} - -impl Decodable for BlobTransactionSidecar { - /// Decodes the inner [BlobTransactionSidecar] fields from RLP bytes, without a RLP header. - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Self::decode_inner(buf) - } -} - -// Wrapper for c-kzg rlp -#[repr(C)] -struct BlobTransactionSidecarRlp { - blobs: Vec<[u8; BYTES_PER_BLOB]>, - commitments: Vec<[u8; BYTES_PER_COMMITMENT]>, - proofs: Vec<[u8; BYTES_PER_PROOF]>, -} - -const _: [(); std::mem::size_of::()] = - [(); std::mem::size_of::()]; - -const _: [(); std::mem::size_of::()] = - [(); std::mem::size_of::()]; - -impl BlobTransactionSidecarRlp { - fn wrap_ref(other: &BlobTransactionSidecar) -> &Self { - // SAFETY: Same repr and size - unsafe { &*(other as *const BlobTransactionSidecar).cast::() } - } - - fn unwrap(self) -> BlobTransactionSidecar { - // SAFETY: Same repr and size - unsafe { std::mem::transmute(self) } - } - - fn encode(&self, out: &mut dyn bytes::BufMut) { - // Encode the blobs, commitments, and proofs - self.blobs.encode(out); - self.commitments.encode(out); - self.proofs.encode(out); - } - - fn fields_len(&self) -> usize { - self.blobs.length() + self.commitments.length() + self.proofs.length() - } - - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Self { - blobs: Decodable::decode(buf)?, - commitments: Decodable::decode(buf)?, - proofs: Decodable::decode(buf)?, - }) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for BlobTransactionSidecar { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let mut arr = [0u8; BYTES_PER_BLOB]; - - // Note: the "fix" for this is kinda pointless. - #[allow(clippy::large_stack_frames)] - let blobs: Vec = (0..u.int_in_range(1..=16)?) - .map(|_| { - arr = arbitrary::Arbitrary::arbitrary(u).unwrap(); - - // Ensure that each blob is canonical by ensuring each field element contained in - // the blob is < BLS_MODULUS - for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { - arr[i * BYTES_PER_FIELD_ELEMENT] = 0; - } - - Blob::from(arr) - }) - .collect(); - - Ok(generate_blob_sidecar(blobs)) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl proptest::arbitrary::Arbitrary for BlobTransactionSidecar { - type Parameters = ParamsFor; - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - proptest_vec(proptest_vec(proptest_any::(), BYTES_PER_BLOB), 1..=5) - .prop_map(move |blobs| { - let blobs = blobs - .into_iter() - .map(|mut blob| { - let mut arr = [0u8; BYTES_PER_BLOB]; - - // Ensure that each blob is canonical by ensuring each field element - // contained in the blob is < BLS_MODULUS - for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { - blob[i * BYTES_PER_FIELD_ELEMENT] = 0; - } - - arr.copy_from_slice(blob.as_slice()); - arr.into() - }) - .collect(); - - generate_blob_sidecar(blobs) - }) - .boxed() - } - - type Strategy = BoxedStrategy; -} - /// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. -#[cfg(any(test, feature = "arbitrary"))] -pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { +#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] +pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { + use crate::constants::eip4844::MAINNET_KZG_TRUSTED_SETUP; + use c_kzg::{KzgCommitment, KzgProof}; + let kzg_settings = MAINNET_KZG_TRUSTED_SETUP.clone(); - let commitments: Vec = blobs + let commitments: Vec = blobs .iter() .map(|blob| KzgCommitment::blob_to_kzg_commitment(&blob.clone(), &kzg_settings).unwrap()) .map(|commitment| commitment.to_bytes()) .collect(); - let proofs: Vec = blobs + let proofs: Vec = blobs .iter() .zip(commitments.iter()) .map(|(blob, commitment)| { @@ -512,18 +298,15 @@ pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { .map(|proof| proof.to_bytes()) .collect(); - BlobTransactionSidecar { blobs, commitments, proofs } + BlobTransactionSidecar::from_kzg(blobs, commitments, proofs) } -#[cfg(test)] +#[cfg(all(test, feature = "c-kzg"))] mod tests { - use crate::{ - hex, - kzg::{Blob, Bytes48}, - transaction::sidecar::generate_blob_sidecar, - BlobTransactionSidecar, - }; - use std::{fs, path::PathBuf}; + use super::*; + use crate::{hex, kzg::Blob}; + use alloy_eips::eip4844::Bytes48; + use std::{fs, path::PathBuf, str::FromStr}; #[test] fn test_blob_transaction_sidecar_generation() { @@ -550,7 +333,7 @@ mod tests { assert_eq!( sidecar.commitments, vec![ - Bytes48::from_hex(json_value.get("commitment").unwrap().as_str().unwrap()).unwrap() + Bytes48::from_str(json_value.get("commitment").unwrap().as_str().unwrap()).unwrap() ] ); } @@ -624,7 +407,7 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode_inner(&mut encoded_rlp); + sidecar.encode(&mut encoded_rlp); // Assert the equality between the expected RLP from the JSON and the encoded RLP assert_eq!(json_value.get("rlp").unwrap().as_str().unwrap(), hex::encode(&encoded_rlp)); @@ -655,11 +438,10 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode_inner(&mut encoded_rlp); + sidecar.encode(&mut encoded_rlp); // Decode the RLP-encoded data back into a BlobTransactionSidecar - let decoded_sidecar = - BlobTransactionSidecar::decode_inner(&mut encoded_rlp.as_slice()).unwrap(); + let decoded_sidecar = BlobTransactionSidecar::decode(&mut encoded_rlp.as_slice()).unwrap(); // Assert the equality between the original BlobTransactionSidecar and the decoded one assert_eq!(sidecar, decoded_sidecar); diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index ae6ff97b28d86..5f44c87f5a3ad 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -445,7 +445,6 @@ pub enum OpenDiskFileBlobStore { #[cfg(test)] mod tests { use super::*; - use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use std::sync::atomic::Ordering; fn tmp_store() -> (DiskFileBlobStore, tempfile::TempDir) { @@ -455,11 +454,15 @@ mod tests { } fn rng_blobs(num: usize) -> Vec<(TxHash, BlobTransactionSidecar)> { - let mut runner = TestRunner::new(Default::default()); - prop::collection::vec(any::<(TxHash, BlobTransactionSidecar)>(), num) - .new_tree(&mut runner) - .unwrap() - .current() + let mut rng = rand::thread_rng(); + (0..num) + .map(|_| { + let tx = TxHash::random_with(&mut rng); + let blob = + BlobTransactionSidecar { blobs: vec![], commitments: vec![], proofs: vec![] }; + (tx, blob) + }) + .collect() } #[test] diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 17ad1f7c340ca..948c47109a1a7 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -14,7 +14,6 @@ use rand::{ }; use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, - eip4844::kzg_to_versioned_hash, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, Bytes, ChainId, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, @@ -987,11 +986,7 @@ impl From for Transaction { to, value, access_list, - blob_versioned_hashes: sidecar - .commitments - .into_iter() - .map(|commitment| kzg_to_versioned_hash((*commitment).into())) - .collect(), + blob_versioned_hashes: sidecar.versioned_hashes().collect(), max_fee_per_blob_gas, input, }), diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs index f7a98382e07b9..1403833d30ec3 100644 --- a/examples/exex/rollup/src/execution.rs +++ b/examples/exex/rollup/src/execution.rs @@ -1,3 +1,4 @@ +use crate::{db::Database, RollupContract, CHAIN_ID, CHAIN_SPEC}; use alloy_consensus::{Blob, SidecarCoder, SimpleCoder}; use alloy_rlp::Decodable as _; use eyre::OptionExt; @@ -20,8 +21,6 @@ use reth_revm::{ }; use reth_tracing::tracing::debug; -use crate::{db::Database, RollupContract, CHAIN_ID, CHAIN_SPEC}; - /// Execute a rollup block and return (block with recovered senders)[BlockWithSenders], (bundle /// state)[BundleState] and list of (receipts)[Receipt]. pub async fn execute_block( @@ -154,7 +153,7 @@ async fn decode_transactions( let blobs = blobs .into_iter() // Convert blob KZG commitments to versioned hashes - .map(|(blob, commitment)| (blob, kzg_to_versioned_hash((*commitment).into()))) + .map(|(blob, commitment)| (blob, kzg_to_versioned_hash(commitment.as_slice()))) // Filter only blobs that are present in the block data .filter(|(_, hash)| blob_hashes.contains(hash)) .map(|(blob, _)| Blob::from(*blob)) @@ -461,7 +460,7 @@ mod tests { SidecarBuilder::::from_slice(&encoded_transactions).build()?; let blob_hashes = alloy_rlp::encode(sidecar.versioned_hashes().collect::>()); - let mut mock_transaction = MockTransaction::eip4844_with_sidecar(sidecar.into()); + let mut mock_transaction = MockTransaction::eip4844_with_sidecar(sidecar); let transaction = sign_tx_with_key_pair(key_pair, Transaction::from(mock_transaction.clone())); mock_transaction.set_hash(transaction.hash); From f281bbdccda2e08b1d5db651f25c7735ea1d63ed Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 May 2024 15:29:16 +0200 Subject: [PATCH 496/700] fix(op): stages checkpoints init-state (#8021) --- bin/reth/src/commands/import.rs | 15 ++------ crates/node-core/src/init.rs | 31 +++++++++-------- crates/primitives/src/stage/checkpoints.rs | 40 ++++++++++++++++++++++ crates/primitives/src/stage/id.rs | 11 ++++++ 4 files changed, 70 insertions(+), 27 deletions(-) diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 0d5b242751558..3496077aefac0 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -43,17 +43,6 @@ use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; use tracing::{debug, error, info}; -/// Stages that require state. -const STATE_STAGES: &[StageId] = &[ - StageId::Execution, - StageId::MerkleUnwind, - StageId::AccountHashing, - StageId::StorageHashing, - StageId::MerkleExecute, - StageId::IndexStorageHistory, - StageId::IndexAccountHistory, -]; - /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] pub struct ImportCommand { @@ -171,7 +160,7 @@ impl ImportCommand { provider_factory.static_file_provider(), PruneModes::default(), ), - true, + self.no_state, ) .await?; @@ -307,7 +296,7 @@ where config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), ExExManagerHandle::empty(), )) - .disable_all_if(STATE_STAGES, || should_exec), + .disable_all_if(&StageId::STATE_REQUIRED, || should_exec), ) .build(provider_factory, static_file_producer); diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 8a7751e4e9173..b09e29e53b938 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -2,23 +2,20 @@ use reth_codecs::Compact; use reth_config::config::EtlConfig; -use reth_db::{ - database::Database, - tables, - transaction::{DbTx, DbTxMut}, -}; +use reth_db::{database::Database, tables, transaction::DbTxMut}; use reth_etl::Collector; use reth_interfaces::{db::DatabaseError, provider::ProviderResult}; use reth_primitives::{ - stage::StageId, Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, - StaticFileSegment, StorageEntry, B256, U256, + stage::{StageCheckpoint, StageId}, + Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, StaticFileSegment, + StorageEntry, B256, U256, }; use reth_provider::{ bundle_state::{BundleStateInit, RevertsInit}, providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, - ProviderFactory, StaticFileProviderFactory, + ProviderFactory, StageCheckpointWriter, StaticFileProviderFactory, }; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; use serde::{Deserialize, Serialize}; @@ -114,18 +111,18 @@ pub fn init_genesis(factory: ProviderFactory) -> Result(&tx, &static_file_provider, chain.clone())?; + insert_genesis_header::(tx, &static_file_provider, chain.clone())?; - insert_genesis_state::(&tx, alloc.len(), alloc.iter())?; + insert_genesis_state::(tx, alloc.len(), alloc.iter())?; // insert sync stage - for stage in StageId::ALL.iter() { - tx.put::(stage.to_string(), Default::default())?; + for stage in StageId::ALL { + provider_rw.save_stage_checkpoint(stage, Default::default())?; } - tx.commit()?; + provider_rw.commit()?; static_file_provider.commit()?; Ok(hash) @@ -343,6 +340,11 @@ pub fn init_from_state_dump( ); } + // insert sync stages for stages that require state + for stage in StageId::STATE_REQUIRED { + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?; + } + provider_rw.commit()?; Ok(hash) @@ -524,6 +526,7 @@ mod tests { cursor::DbCursorRO, models::{storage_sharded_key::StorageShardedKey, ShardedKey}, table::{Table, TableRow}, + transaction::DbTx, DatabaseEnv, }; use reth_primitives::{ diff --git a/crates/primitives/src/stage/checkpoints.rs b/crates/primitives/src/stage/checkpoints.rs index 461e15401e637..d9c10605c174d 100644 --- a/crates/primitives/src/stage/checkpoints.rs +++ b/crates/primitives/src/stage/checkpoints.rs @@ -6,6 +6,8 @@ use bytes::Buf; use reth_codecs::{main_codec, Compact}; use std::ops::RangeInclusive; +use super::StageId; + /// Saves the progress of Merkle stage. #[derive(Default, Debug, Clone, PartialEq)] pub struct MerkleCheckpoint { @@ -201,6 +203,25 @@ impl StageCheckpoint { self } + /// Sets the block range, if checkpoint uses block range. + pub fn with_block_range(mut self, stage_id: &StageId, from: u64, to: u64) -> Self { + self.stage_checkpoint = Some(match stage_id { + StageId::Execution => StageUnitCheckpoint::Execution(ExecutionCheckpoint::default()), + StageId::AccountHashing => { + StageUnitCheckpoint::Account(AccountHashingCheckpoint::default()) + } + StageId::StorageHashing => { + StageUnitCheckpoint::Storage(StorageHashingCheckpoint::default()) + } + StageId::IndexStorageHistory | StageId::IndexAccountHistory => { + StageUnitCheckpoint::IndexHistory(IndexHistoryCheckpoint::default()) + } + _ => return self, + }); + _ = self.stage_checkpoint.map(|mut checkpoint| checkpoint.set_block_range(from, to)); + self + } + /// Get the underlying [`EntitiesCheckpoint`], if any, to determine the number of entities /// processed, and the number of total entities to process. pub fn entities(&self) -> Option { @@ -244,6 +265,25 @@ pub enum StageUnitCheckpoint { IndexHistory(IndexHistoryCheckpoint), } +impl StageUnitCheckpoint { + /// Sets the block range. Returns old block range, or `None` if checkpoint doesn't use block + /// range. + pub fn set_block_range(&mut self, from: u64, to: u64) -> Option { + match self { + Self::Account(AccountHashingCheckpoint { ref mut block_range, .. }) | + Self::Storage(StorageHashingCheckpoint { ref mut block_range, .. }) | + Self::Execution(ExecutionCheckpoint { ref mut block_range, .. }) | + Self::IndexHistory(IndexHistoryCheckpoint { ref mut block_range, .. }) => { + let old_range = *block_range; + *block_range = CheckpointBlockRange { from, to }; + + Some(old_range) + } + _ => None, + } + } +} + #[cfg(test)] impl Default for StageUnitCheckpoint { fn default() -> Self { diff --git a/crates/primitives/src/stage/id.rs b/crates/primitives/src/stage/id.rs index d4926fea15b5b..2779c260801c7 100644 --- a/crates/primitives/src/stage/id.rs +++ b/crates/primitives/src/stage/id.rs @@ -53,6 +53,17 @@ impl StageId { StageId::Finish, ]; + /// Stages that require state. + pub const STATE_REQUIRED: [StageId; 7] = [ + StageId::Execution, + StageId::MerkleUnwind, + StageId::AccountHashing, + StageId::StorageHashing, + StageId::MerkleExecute, + StageId::IndexStorageHistory, + StageId::IndexAccountHistory, + ]; + /// Return stage id formatted as string. pub fn as_str(&self) -> &str { match self { From bcb0bff382c0fb4ce0b7761a974b9d5998a2c5a2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 May 2024 17:28:42 +0200 Subject: [PATCH 497/700] chore: rm redundant optimism feature (#8136) --- crates/consensus/common/Cargo.toml | 3 --- crates/node-core/Cargo.toml | 1 - crates/revm/Cargo.toml | 1 - 3 files changed, 5 deletions(-) diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 5e5a6ef579146..af93788ee67d5 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -21,6 +21,3 @@ reth-consensus.workspace=true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } mockall = "0.12" - -[features] -optimism = ["reth-primitives/optimism"] diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index e19b4d242fa80..ef5d63b3fffad 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -107,7 +107,6 @@ optimism = [ "reth-rpc-engine-api/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-consensus-common/optimism", "reth-beacon-consensus/optimism", ] diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 151d53a978f8b..ca52c7c90a5a4 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -37,7 +37,6 @@ optimism = [ "revm/optimism", "reth-primitives/optimism", "reth-provider/optimism", - "reth-consensus-common/optimism", "reth-interfaces/optimism", ] js-tracer = ["revm-inspectors/js-tracer"] From 05e434eae3c60208118ddc2e27d6b00b3d6e851e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 May 2024 17:28:53 +0200 Subject: [PATCH 498/700] feat: rm txmeta associated type (#8138) --- crates/ethereum/evm/src/execute.rs | 12 ++---------- crates/ethereum/evm/src/lib.rs | 9 ++------- crates/evm/src/lib.rs | 17 +++++------------ crates/optimism/evm/src/execute.rs | 18 ++++-------------- crates/optimism/evm/src/lib.rs | 13 +++++-------- crates/revm/src/test_utils.rs | 19 ++++++++----------- examples/custom-evm/src/main.rs | 11 +++-------- 7 files changed, 29 insertions(+), 70 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index db361f35d54c0..c80e476bcbdab 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -10,7 +10,7 @@ use reth_evm::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }, - ConfigureEvm, ConfigureEvmEnv, + ConfigureEvm, }; use reth_interfaces::{ executor::{BlockExecutionError, BlockValidationError}, @@ -62,7 +62,6 @@ impl EthExecutorProvider { impl EthExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { fn eth_executor(&self, db: DB) -> EthBlockExecutor where @@ -79,7 +78,6 @@ where impl BlockExecutorProvider for EthExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { type Executor> = EthBlockExecutor; @@ -117,7 +115,6 @@ struct EthEvmExecutor { impl EthEvmExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { /// Executes the transactions in the block and returns the receipts. /// @@ -158,7 +155,7 @@ where .into()) } - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, ()); + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { @@ -238,8 +235,6 @@ impl EthBlockExecutor { impl EthBlockExecutor where EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { /// Configures a new evm configuration and block environment for the given block. @@ -353,7 +348,6 @@ where impl Executor for EthBlockExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, DB: Database, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; @@ -403,8 +397,6 @@ impl EthBatchExecutor { impl BatchExecutor for EthBatchExecutor where EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 0c8506ff7cda5..7799cf4107eed 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -12,7 +12,7 @@ use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ revm::{config::revm_spec, env::fill_tx_env}, revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, ChainSpec, Head, Header, Transaction, U256, + Address, ChainSpec, Head, Header, TransactionSigned, U256, }; use reth_revm::{Database, EvmBuilder}; pub mod execute; @@ -27,12 +27,7 @@ pub mod dao_fork; pub struct EthEvmConfig; impl ConfigureEvmEnv for EthEvmConfig { - type TxMeta = (); - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, _meta: ()) - where - T: AsRef, - { + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { fill_tx_env(tx_env, transaction, sender) } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index c69e33d652a67..94cac8bccd4eb 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -8,7 +8,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_primitives::{revm::env::fill_block_env, Address, ChainSpec, Header, Transaction, U256}; +use reth_primitives::{ + revm::env::fill_block_env, Address, ChainSpec, Header, TransactionSigned, U256, +}; use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv}; @@ -92,17 +94,8 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This represents the set of methods used to configure the EVM's environment before block /// execution. pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { - /// The type of the transaction metadata that should be used to fill fields in the transaction - /// environment. - /// - /// On ethereum mainnet, this is `()`, and on optimism these are the L1 fee fields and - /// additional L1 block info. - type TxMeta; - - /// Fill transaction environment from a [Transaction] and the given sender address. - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta) - where - T: AsRef; + /// Fill transaction environment from a [TransactionSigned] and the given sender address. + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); /// Fill [CfgEnvWithHandlerCfg] fields according to the chain spec and given header fn fill_cfg_env( diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index c6bb5c7cf2338..f729ceda1c744 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -9,15 +9,15 @@ use reth_evm::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }, - ConfigureEvm, ConfigureEvmEnv, + ConfigureEvm, }; use reth_interfaces::{ executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, }; use reth_primitives::{ - BlockNumber, BlockWithSenders, Bytes, ChainSpec, GotExpected, Hardfork, Header, PruneModes, - Receipt, Receipts, TxType, Withdrawals, U256, + BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, + Receipts, TxType, Withdrawals, U256, }; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, @@ -56,7 +56,6 @@ impl OpExecutorProvider { impl OpExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { fn op_executor(&self, db: DB) -> OpBlockExecutor where @@ -73,7 +72,6 @@ where impl BlockExecutorProvider for OpExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { type Executor> = OpBlockExecutor; @@ -110,7 +108,6 @@ struct OpEvmExecutor { impl OpEvmExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { /// Executes the transactions in the block and returns the receipts. /// @@ -182,9 +179,7 @@ where .transpose() .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; - let mut buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut buf); - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, buf.into()); + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { @@ -274,8 +269,6 @@ impl OpBlockExecutor { impl OpBlockExecutor where EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { /// Configures a new evm configuration and block environment for the given block. @@ -375,7 +368,6 @@ where impl Executor for OpBlockExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, DB: Database, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; @@ -428,8 +420,6 @@ impl OpBatchExecutor { impl BatchExecutor for OpBatchExecutor where EvmConfig: ConfigureEvm, - // TODO: get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 748eeab7b3726..31d39fcb6ac4b 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -13,7 +13,7 @@ use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ revm::{config::revm_spec, env::fill_op_tx_env}, revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, Bytes, ChainSpec, Head, Header, Transaction, U256, + Address, ChainSpec, Head, Header, TransactionSigned, U256, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; @@ -32,13 +32,10 @@ pub use error::OptimismBlockExecutionError; pub struct OptimismEvmConfig; impl ConfigureEvmEnv for OptimismEvmConfig { - type TxMeta = Bytes; - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Bytes) - where - T: AsRef, - { - fill_op_tx_env(tx_env, transaction, sender, meta); + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + let mut buf = Vec::with_capacity(transaction.length_without_header()); + transaction.encode_enveloped(&mut buf); + fill_op_tx_env(tx_env, transaction, sender, buf.into()); } fn fill_cfg_env( diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 73df4ea4b4dc5..48e6e7c4d01ea 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -2,7 +2,7 @@ use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_interfaces::provider::ProviderResult; use reth_primitives::{ keccak256, revm::config::revm_spec, trie::AccountProof, Account, Address, BlockNumber, - Bytecode, Bytes, ChainSpec, Head, Header, StorageKey, Transaction, B256, U256, + Bytecode, Bytes, ChainSpec, Head, Header, StorageKey, TransactionSigned, B256, U256, }; #[cfg(not(feature = "optimism"))] @@ -114,20 +114,17 @@ impl StateProvider for StateProviderTest { pub struct TestEvmConfig; impl ConfigureEvmEnv for TestEvmConfig { - #[cfg(not(feature = "optimism"))] - type TxMeta = (); - #[cfg(feature = "optimism")] - type TxMeta = Bytes; - #[allow(unused_variables)] - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta) - where - T: AsRef, - { + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { #[cfg(not(feature = "optimism"))] fill_tx_env(tx_env, transaction, sender); + #[cfg(feature = "optimism")] - fill_op_tx_env(tx_env, transaction, sender, meta); + { + let mut buf = Vec::with_capacity(transaction.length_without_header()); + transaction.encode_enveloped(&mut buf); + fill_op_tx_env(tx_env, transaction, sender, buf.into()); + } } fn fill_cfg_env( diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index d2c016add2f6c..9572e38be73f9 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -20,7 +20,7 @@ use reth::{ use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; -use reth_primitives::{Chain, ChainSpec, Genesis, Header, Transaction}; +use reth_primitives::{Chain, ChainSpec, Genesis, Header, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; @@ -61,13 +61,8 @@ impl MyEvmConfig { } impl ConfigureEvmEnv for MyEvmConfig { - type TxMeta = (); - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta) - where - T: AsRef, - { - EthEvmConfig::fill_tx_env(tx_env, transaction, sender, meta) + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + EthEvmConfig::fill_tx_env(tx_env, transaction, sender) } fn fill_cfg_env( From a2623e83642fe696b87189209b17b22d88a07a47 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 7 May 2024 16:46:11 +0100 Subject: [PATCH 499/700] fix: ensures that pruning data from static files only happens on calling `commit()` (#8101) --- crates/stages/src/stages/bodies.rs | 10 +- crates/stages/src/stages/execution.rs | 6 +- crates/stages/src/stages/merkle.rs | 1 + .../static-file/src/static_file_producer.rs | 11 +- .../src/providers/database/provider.rs | 5 +- .../src/providers/static_file/writer.rs | 106 +++++++++++++++--- 6 files changed, 114 insertions(+), 25 deletions(-) diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index 5080b9b9ee689..bce56880a9537 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -146,8 +146,13 @@ impl Stage for BodyStage { // If static files are ahead, then we didn't reach the database commit in a previous // stage run. So, our only solution is to unwind the static files and proceed from the // database expected height. - Ordering::Greater => static_file_producer - .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?, + Ordering::Greater => { + static_file_producer + .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } // If static files are behind, then there was some corruption or loss of files. This // error will trigger an unwind, that will bring the database to the same height as the // static files. @@ -576,6 +581,7 @@ mod tests { let mut static_file_producer = static_file_provider.latest_writer(StaticFileSegment::Transactions).unwrap(); static_file_producer.prune_transactions(1, checkpoint.block_number).unwrap(); + static_file_producer.commit().unwrap(); } // Unwind all of it let unwind_to = 1; diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 0db907211ddda..6d2eb2a5d210a 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -169,7 +169,11 @@ where let static_file_producer = if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { - Some(prepare_static_file_producer(provider, start_block)?) + let mut producer = prepare_static_file_producer(provider, start_block)?; + // Since there might be a database <-> static file inconsistency (read + // `prepare_static_file_producer` for context), we commit the change straight away. + producer.commit()?; + Some(producer) } else { None }; diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 77fcf2e15ba5b..cdf33b40f2f0a 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -582,6 +582,7 @@ mod tests { let hash = last_header.hash_slow(); writer.prune_headers(1).unwrap(); + writer.commit().unwrap(); writer.append_header(last_header, U256::ZERO, hash).unwrap(); writer.commit().unwrap(); diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index 0b0720e21044a..c7a365c9afab2 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -272,12 +272,13 @@ mod tests { db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); // Unwind headers from static_files and manually insert them into the database, so we're // able to check that static_file_producer works - db.factory - .static_file_provider() + let static_file_provider = db.factory.static_file_provider(); + let mut static_file_writer = static_file_provider .latest_writer(StaticFileSegment::Headers) - .expect("get static file writer for headers") - .prune_headers(blocks.len() as u64) - .expect("prune headers"); + .expect("get static file writer for headers"); + static_file_writer.prune_headers(blocks.len() as u64).unwrap(); + static_file_writer.commit().expect("prune headers"); + let tx = db.factory.db_ref().tx_mut().expect("init tx"); blocks.iter().for_each(|block| { TestStageDB::insert_header(None, &tx, &block.header, U256::ZERO) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 2cae000ce8720..428645f1ae8c6 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1112,7 +1112,10 @@ impl HeaderSyncGapProvider for DatabaseProvider { Ordering::Greater => { let mut static_file_producer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - static_file_producer.prune_headers(next_static_file_block_num - next_block)? + static_file_producer.prune_headers(next_static_file_block_num - next_block)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()? } Ordering::Less => { // There's either missing or corrupted files. diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index d1aa8560f24e1..3a0f2d03174d3 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -30,10 +30,17 @@ pub struct StaticFileProviderRW { /// stored in a [dashmap::DashMap] inside the parent [StaticFileProvider].which is an [Arc]. /// If we were to use an [Arc] here, we would create a reference cycle. reader: Weak, + /// A [`NippyJarWriter`] instance. writer: NippyJarWriter, + /// Path to opened file. data_path: PathBuf, + /// Reusable buffer for encoding appended data. buf: Vec, + /// Metrics. metrics: Option>, + /// On commit, does the instructed pruning: number of lines, and if it applies, the last block + /// it ends at. + prune_on_commit: Option<(u64, Option)>, } impl StaticFileProviderRW { @@ -45,7 +52,14 @@ impl StaticFileProviderRW { metrics: Option>, ) -> ProviderResult { let (writer, data_path) = Self::open(segment, block, reader.clone(), metrics.clone())?; - Ok(Self { writer, data_path, buf: Vec::with_capacity(100), reader, metrics }) + Ok(Self { + writer, + data_path, + buf: Vec::with_capacity(100), + reader, + metrics, + prune_on_commit: None, + }) } fn open( @@ -100,6 +114,18 @@ impl StaticFileProviderRW { pub fn commit(&mut self) -> ProviderResult<()> { let start = Instant::now(); + // Truncates the data file if instructed to. + if let Some((to_delete, last_block_number)) = self.prune_on_commit.take() { + match self.writer.user_header().segment() { + StaticFileSegment::Headers => self.prune_header_data(to_delete)?, + StaticFileSegment::Transactions => self + .prune_transaction_data(to_delete, last_block_number.expect("should exist"))?, + StaticFileSegment::Receipts => { + self.prune_receipt_data(to_delete, last_block_number.expect("should exist"))? + } + } + } + // Commits offsets and new user_header to disk self.writer.commit().map_err(|e| ProviderError::NippyJar(e.to_string()))?; @@ -372,6 +398,7 @@ impl StaticFileProviderRW { hash: BlockHash, ) -> ProviderResult { let start = Instant::now(); + self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); @@ -404,6 +431,7 @@ impl StaticFileProviderRW { tx: TransactionSignedNoHash, ) -> ProviderResult { let start = Instant::now(); + self.ensure_no_queued_prune()?; let result = self.append_with_tx_number(StaticFileSegment::Transactions, tx_num, tx)?; @@ -430,6 +458,7 @@ impl StaticFileProviderRW { receipt: Receipt, ) -> ProviderResult { let start = Instant::now(); + self.ensure_no_queued_prune()?; let result = self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt)?; @@ -444,13 +473,64 @@ impl StaticFileProviderRW { Ok(result) } - /// Removes the last `number` of transactions from static files. + /// Adds an instruction to prune `to_delete`transactions during commit. /// - /// # Note - /// Commits to the configuration file at the end. + /// Note: `last_block` refers to the block the unwinds ends at. pub fn prune_transactions( &mut self, - number: u64, + to_delete: u64, + last_block: BlockNumber, + ) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Transactions); + self.queue_prune(to_delete, Some(last_block)) + } + + /// Adds an instruction to prune `to_delete` receipts during commit. + /// + /// Note: `last_block` refers to the block the unwinds ends at. + pub fn prune_receipts( + &mut self, + to_delete: u64, + last_block: BlockNumber, + ) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Receipts); + self.queue_prune(to_delete, Some(last_block)) + } + + /// Adds an instruction to prune `to_delete` headers during commit. + pub fn prune_headers(&mut self, to_delete: u64) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Headers); + self.queue_prune(to_delete, None) + } + + /// Adds an instruction to prune `to_delete` elements during commit. + /// + /// Note: `last_block` refers to the block the unwinds ends at if dealing with transaction-based + /// data. + fn queue_prune( + &mut self, + to_delete: u64, + last_block: Option, + ) -> ProviderResult<()> { + self.ensure_no_queued_prune()?; + self.prune_on_commit = Some((to_delete, last_block)); + Ok(()) + } + + /// Returns Error if there is a pruning instruction that needs to be applied. + fn ensure_no_queued_prune(&self) -> ProviderResult<()> { + if self.prune_on_commit.is_some() { + return Err(ProviderError::NippyJar( + "Pruning should be comitted before appending or pruning more data".to_string(), + )); + } + Ok(()) + } + + /// Removes the last `to_delete` transactions from the data file. + fn prune_transaction_data( + &mut self, + to_delete: u64, last_block: BlockNumber, ) -> ProviderResult<()> { let start = Instant::now(); @@ -458,7 +538,7 @@ impl StaticFileProviderRW { let segment = StaticFileSegment::Transactions; debug_assert!(self.writer.user_header().segment() == segment); - self.truncate(segment, number, Some(last_block))?; + self.truncate(segment, to_delete, Some(last_block))?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -471,11 +551,8 @@ impl StaticFileProviderRW { Ok(()) } - /// Prunes `to_delete` number of receipts from static_files. - /// - /// # Note - /// Commits to the configuration file at the end. - pub fn prune_receipts( + /// Prunes the last `to_delete` receipts from the data file. + fn prune_receipt_data( &mut self, to_delete: u64, last_block: BlockNumber, @@ -498,11 +575,8 @@ impl StaticFileProviderRW { Ok(()) } - /// Prunes `to_delete` number of headers from static_files. - /// - /// # Note - /// Commits to the configuration file at the end. - pub fn prune_headers(&mut self, to_delete: u64) -> ProviderResult<()> { + /// Prunes the last `to_delete` headers from the data file. + fn prune_header_data(&mut self, to_delete: u64) -> ProviderResult<()> { let start = Instant::now(); let segment = StaticFileSegment::Headers; From 00f9acb94eeac76c4d204bb37011007797daa6ea Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 May 2024 18:55:46 +0200 Subject: [PATCH 500/700] chore: remove reth-revm optimism (#8141) --- Cargo.lock | 4 +- bin/reth/Cargo.toml | 1 - crates/blockchain-tree/Cargo.toml | 2 +- crates/consensus/beacon/Cargo.toml | 1 - crates/optimism/evm/Cargo.toml | 1 - crates/optimism/node/Cargo.toml | 1 - crates/payload/optimism/Cargo.toml | 1 - crates/revm/Cargo.toml | 12 +-- crates/revm/src/lib.rs | 1 - crates/revm/src/test_utils.rs | 98 +--------------------- crates/rpc/rpc/Cargo.toml | 4 +- crates/rpc/rpc/src/eth/api/call.rs | 3 +- crates/rpc/rpc/src/eth/api/transactions.rs | 6 +- crates/rpc/rpc/src/eth/error.rs | 2 +- crates/rpc/rpc/src/trace.rs | 10 +-- 15 files changed, 18 insertions(+), 129 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ee45d88ac80d..76ab2dcccda60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5387,7 +5387,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.60", @@ -7653,13 +7653,11 @@ name = "reth-revm" version = "0.2.0-beta.6" dependencies = [ "reth-consensus-common", - "reth-evm", "reth-interfaces", "reth-primitives", "reth-provider", "reth-trie", "revm", - "revm-inspectors", "tracing", ] diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 3f5d788347b8b..37b26686f2a5d 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -133,7 +133,6 @@ min-trace-logs = ["tracing/release_max_level_trace"] optimism = [ "reth-primitives/optimism", - "reth-revm/optimism", "reth-interfaces/optimism", "reth-rpc/optimism", "reth-provider/optimism", diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 912f593dc4c95..70ce9a2901c26 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -50,4 +50,4 @@ assert_matches.workspace = true [features] test-utils = [] -optimism = ["reth-primitives/optimism", "reth-interfaces/optimism", "reth-provider/optimism", "reth-revm/optimism"] +optimism = ["reth-primitives/optimism", "reth-interfaces/optimism", "reth-provider/optimism"] diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 8fb9d3ec3b4e6..7b106b2d32cdc 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -71,6 +71,5 @@ optimism = [ "reth-provider/optimism", "reth-blockchain-tree/optimism", "reth-beacon-consensus-core/optimism", - "reth-revm/optimism", "reth-rpc/optimism" ] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 4e5fd2f19ff59..a1c3a168bdab2 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -33,7 +33,6 @@ reth-revm = { workspace = true, features = ["test-utils"] } optimism = [ "reth-primitives/optimism", "reth-provider/optimism", - "reth-revm/optimism", "reth-interfaces/optimism", "revm-primitives/optimism", ] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index be8791c7886bd..9432ce9edb5b5 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -64,7 +64,6 @@ optimism = [ "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-rpc/optimism", - "reth-revm/optimism", "reth-evm-optimism/optimism", "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", diff --git a/crates/payload/optimism/Cargo.toml b/crates/payload/optimism/Cargo.toml index c58d0ecb583e2..567c02833b7c0 100644 --- a/crates/payload/optimism/Cargo.toml +++ b/crates/payload/optimism/Cargo.toml @@ -37,7 +37,6 @@ sha2.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-revm/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-evm-optimism/optimism", diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index ca52c7c90a5a4..2b621ed76ec06 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -17,26 +17,16 @@ reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true reth-consensus-common.workspace = true -reth-evm = { workspace = true, optional = true } reth-trie = { workspace = true, optional = true } # revm revm.workspace = true -revm-inspectors.workspace = true # common tracing.workspace = true [dev-dependencies] -reth-evm.workspace = true reth-trie.workspace = true [features] -test-utils = ["dep:reth-trie", "dep:reth-evm"] -optimism = [ - "revm/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-interfaces/optimism", -] -js-tracer = ["revm-inspectors/js-tracer"] +test-utils = ["dep:reth-trie"] diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 7f950afb0f962..8e54195670106 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -22,4 +22,3 @@ pub mod test_utils; // Convenience re-exports. pub use revm::{self, *}; -pub use revm_inspectors::*; diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 48e6e7c4d01ea..8c4d1894c5d06 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,31 +1,13 @@ -use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_interfaces::provider::ProviderResult; use reth_primitives::{ - keccak256, revm::config::revm_spec, trie::AccountProof, Account, Address, BlockNumber, - Bytecode, Bytes, ChainSpec, Head, Header, StorageKey, TransactionSigned, B256, U256, + keccak256, trie::AccountProof, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, + B256, U256, }; - -#[cfg(not(feature = "optimism"))] -use reth_primitives::revm::env::fill_tx_env; use reth_provider::{AccountReader, BlockHashReader, StateProvider, StateRootProvider}; use reth_trie::updates::TrieUpdates; -use revm::{ - db::BundleState, - primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, -}; +use revm::db::BundleState; use std::collections::HashMap; -#[cfg(feature = "optimism")] -use { - reth_primitives::revm::env::fill_op_tx_env, - revm::{inspector_handle_register, GetInspector}, -}; - -use revm::{ - primitives::{HandlerCfg, SpecId}, - Database, Evm, EvmBuilder, -}; - /// Mock state for testing #[derive(Debug, Default, Clone, Eq, PartialEq)] pub struct StateProviderTest { @@ -107,77 +89,3 @@ impl StateProvider for StateProviderTest { unimplemented!("proof generation is not supported") } } - -/// Test EVM configuration. -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] -pub struct TestEvmConfig; - -impl ConfigureEvmEnv for TestEvmConfig { - #[allow(unused_variables)] - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - #[cfg(not(feature = "optimism"))] - fill_tx_env(tx_env, transaction, sender); - - #[cfg(feature = "optimism")] - { - let mut buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut buf); - fill_op_tx_env(tx_env, transaction, sender, buf.into()); - } - } - - fn fill_cfg_env( - cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, - total_difficulty: U256, - ) { - let spec_id = revm_spec( - chain_spec, - Head { - number: header.number, - timestamp: header.timestamp, - difficulty: header.difficulty, - total_difficulty, - hash: Default::default(), - }, - ); - - cfg_env.chain_id = chain_spec.chain().id(); - cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; - - cfg_env.handler_cfg.spec_id = spec_id; - #[cfg(feature = "optimism")] - { - cfg_env.handler_cfg.is_optimism = chain_spec.is_optimism(); - } - } -} - -impl ConfigureEvm for TestEvmConfig { - type DefaultExternalContext<'a> = (); - - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { - #[cfg(feature = "optimism")] - let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }; - #[cfg(not(feature = "optimism"))] - let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST }; - EvmBuilder::default().with_db(db).with_handler_cfg(handler_cfg).build() - } - - #[cfg(feature = "optimism")] - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> - where - DB: Database + 'a, - I: GetInspector, - { - let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }; - EvmBuilder::default() - .with_db(db) - .with_external_context(inspector) - .with_handler_cfg(handler_cfg) - .append_handler_register(inspector_handle_register) - .build() - } -} diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 513c7da134e9b..224866be6e232 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -21,11 +21,11 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true reth-rpc-engine-api.workspace = true -reth-revm = { workspace = true, features = ["js-tracer"] } +reth-revm.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true -revm-inspectors.workspace = true +revm-inspectors = { workspace = true, features = ["js-tracer"] } reth-evm.workspace = true reth-network-types.workspace = true diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 8ef2af2f52acf..acd5c30e87348 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -18,7 +18,7 @@ use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, Bytes, TxKind, use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, }; -use reth_revm::{access_list::AccessListInspector, database::StateProviderDatabase}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_types::{ state::StateOverride, AccessListWithGasUsed, Bundle, EthCallResponse, StateContext, TransactionRequest, @@ -31,6 +31,7 @@ use revm::{ }, DatabaseCommit, }; +use revm_inspectors::access_list::AccessListInspector; use tracing::trace; // Gas per transaction not creating a contract. diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 75470e1fe8e78..721cef3db1f8a 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -24,10 +24,7 @@ use reth_primitives::{ use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, }; -use reth_revm::{ - database::StateProviderDatabase, - tracing::{TracingInspector, TracingInspectorConfig}, -}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_types::{ transaction::{ EIP1559TransactionRequest, EIP2930TransactionRequest, EIP4844TransactionRequest, @@ -47,6 +44,7 @@ use revm::{ }, GetInspector, Inspector, }; +use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::future::Future; use crate::eth::revm_utils::FillableTransaction; diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 203b5bbd70e5a..305536aab33d9 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -5,7 +5,6 @@ use alloy_sol_types::decode_revert_reason; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}; use reth_interfaces::RethError; use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes, U256}; -use reth_revm::tracing::{js::JsInspectorError, MuxError}; use reth_rpc_types::{ error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, }; @@ -14,6 +13,7 @@ use reth_transaction_pool::error::{ PoolTransactionError, }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, OutOfGasError}; +use revm_inspectors::tracing::{js::JsInspectorError, MuxError}; use std::time::Duration; /// Result alias diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 5ee089a91f589..7104409146f7c 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -11,10 +11,7 @@ use reth_primitives::{ revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, SealedHeader, B256, U256, }; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_revm::{ - database::StateProviderDatabase, - tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, -}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; use reth_rpc_types::{ state::StateOverride, @@ -31,7 +28,10 @@ use revm::{ db::{CacheDB, DatabaseCommit}, primitives::EnvWithHandlerCfg, }; -use revm_inspectors::opcode::OpcodeGasInspector; +use revm_inspectors::{ + opcode::OpcodeGasInspector, + tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, +}; use std::{collections::HashSet, sync::Arc}; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; From e172a8e38a81d43bbace5fe6f9bddf5d0b6d3d94 Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Tue, 7 May 2024 23:11:03 +0530 Subject: [PATCH 501/700] Move and rename ```BeaconConsensus``` to ```EthBeaconConsensus``` (#8140) Co-authored-by: Matthias Seitz --- Cargo.lock | 20 +++++++++---------- Cargo.toml | 4 ++-- .../src/commands/debug_cmd/build_block.rs | 5 +++-- bin/reth/src/commands/debug_cmd/execution.rs | 5 +++-- bin/reth/src/commands/debug_cmd/merkle.rs | 5 +++-- .../src/commands/debug_cmd/replay_engine.rs | 5 +++-- bin/reth/src/commands/import.rs | 4 ++-- bin/reth/src/commands/import_op.rs | 4 ++-- bin/reth/src/commands/stage/run.rs | 4 ++-- bin/reth/src/commands/stage/unwind.rs | 4 ++-- crates/consensus/beacon/Cargo.toml | 4 ++-- crates/consensus/beacon/src/engine/sync.rs | 6 +++--- .../consensus/beacon/src/engine/test_utils.rs | 6 +++--- crates/consensus/beacon/src/lib.rs | 2 +- .../consensus}/Cargo.toml | 2 +- .../consensus}/src/lib.rs | 8 ++++---- crates/node/builder/src/launch/mod.rs | 4 ++-- 17 files changed, 48 insertions(+), 44 deletions(-) rename crates/{consensus/beacon-core => ethereum/consensus}/Cargo.toml (91%) rename crates/{consensus/beacon-core => ethereum/consensus}/src/lib.rs (96%) diff --git a/Cargo.lock b/Cargo.lock index 76ab2dcccda60..164865acbceb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6516,13 +6516,13 @@ dependencies = [ "assert_matches", "futures", "metrics", - "reth-beacon-consensus-core", "reth-blockchain-tree", "reth-config", "reth-consensus", "reth-db", "reth-downloaders", "reth-engine-primitives", + "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-evm", "reth-evm-ethereum", @@ -6551,15 +6551,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-beacon-consensus-core" -version = "0.2.0-beta.6" -dependencies = [ - "reth-consensus", - "reth-consensus-common", - "reth-primitives", -] - [[package]] name = "reth-blockchain-tree" version = "0.2.0-beta.6" @@ -6931,6 +6922,15 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "reth-ethereum-consensus" +version = "0.2.0-beta.6" +dependencies = [ + "reth-consensus", + "reth-consensus-common", + "reth-primitives", +] + [[package]] name = "reth-ethereum-engine-primitives" version = "0.2.0-beta.6" diff --git a/Cargo.toml b/Cargo.toml index e6edbe5b2c147..dca4a1eb8530c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ members = [ "crates/config/", "crates/consensus/auto-seal/", "crates/consensus/beacon/", - "crates/consensus/beacon-core/", + "crates/ethereum/consensus/", "crates/consensus/common/", "crates/consensus/consensus/", "crates/ethereum-forks/", @@ -208,7 +208,7 @@ reth = { path = "bin/reth" } reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } -reth-beacon-consensus-core = { path = "crates/consensus/beacon-core" } +reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index dd0bfa09209a3..72cc9e1fa5328 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -14,7 +14,7 @@ use eyre::Context; use reth_basic_payload_builder::{ BuildArguments, BuildOutcome, Cancelled, PayloadBuilder, PayloadConfig, }; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; @@ -160,7 +160,8 @@ impl Command { data_dir.static_files(), )?; - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); let executor = block_executor!(self.chain.clone()); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 33b07368a48bf..50e93dfbca836 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -12,7 +12,7 @@ use crate::{ }; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; @@ -220,7 +220,8 @@ impl Command { debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(provider_factory.clone())?; - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); // Configure and build network let network_secret_path = diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index c42cbdd4df667..3d94a3a43a512 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -12,7 +12,7 @@ use crate::{ }; use backon::{ConstantBuilder, Retryable}; use clap::Parser; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; use reth_config::Config; use reth_consensus::Consensus; @@ -156,7 +156,8 @@ impl Command { info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); // build the full block client - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); let block_range_client = FullBlockClient::new(fetch_client, consensus); // get best block number diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index da2e458be2a18..b86e707a86740 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -10,7 +10,7 @@ use crate::{ use clap::Parser; use eyre::Context; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; -use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensus, BeaconConsensusEngine}; +use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensusEngine, EthBeaconConsensus}; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; @@ -122,7 +122,8 @@ impl Command { let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); let executor = block_executor!(self.chain.clone()); diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 3496077aefac0..f73cf3c175f8a 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -12,7 +12,7 @@ use crate::{ use clap::Parser; use eyre::Context; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; use reth_db::{database::Database, init_db, tables, transaction::DbTx}; @@ -129,7 +129,7 @@ impl ImportCommand { init_genesis(provider_factory.clone())?; - let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); info!(target: "reth::cli", "Consensus engine initialized"); // open file diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 1c5a74015d5a0..3147f9b1092f1 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -10,7 +10,7 @@ use crate::{ version::SHORT_VERSION, }; use clap::Parser; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_db::{init_db, tables, transaction::DbTx}; @@ -107,7 +107,7 @@ impl ImportOpCommand { init_genesis(provider_factory.clone())?; - let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); info!(target: "reth::cli", "Consensus engine initialized"); // open file diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 562b7e1b3e60e..59d26fc293069 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -14,7 +14,7 @@ use crate::{ version::SHORT_VERSION, }; use clap::Parser; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; use reth_db::init_db; @@ -169,7 +169,7 @@ impl Command { let (mut exec_stage, mut unwind_stage): (Box>, Option>>) = match self.stage { StageEnum::Bodies => { - let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); let mut config = config; config.peers.trusted_nodes_only = self.network.trusted_only; diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index c6dea1a0596e2..b7998d0875b99 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -1,7 +1,7 @@ //! Unwinding a certain block range use clap::{Parser, Subcommand}; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{Config, PruneConfig}; use reth_consensus::Consensus; use reth_db::{database::Database, open_db}; @@ -165,7 +165,7 @@ impl Command { .await?; let consensus: Arc = - Arc::new(BeaconConsensus::new(provider_factory.chain_spec())); + Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); // building network downloaders using the fetch client let fetch_client = network.fetch_client().await?; diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 7b106b2d32cdc..659ef02c175f0 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] # reth -reth-beacon-consensus-core.workspace = true +reth-ethereum-consensus.workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-stages-api.workspace = true @@ -70,6 +70,6 @@ optimism = [ "reth-interfaces/optimism", "reth-provider/optimism", "reth-blockchain-tree/optimism", - "reth-beacon-consensus-core/optimism", + "reth-ethereum-consensus/optimism", "reth-rpc/optimism" ] diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 9e206176a28f9..fd78f461a6e1b 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -1,8 +1,8 @@ //! Sync management for the engine implementation. use crate::{ - engine::metrics::EngineSyncMetrics, BeaconConsensus, BeaconConsensusEngineEvent, - ConsensusEngineLiveSyncProgress, + engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, + ConsensusEngineLiveSyncProgress, EthBeaconConsensus, }; use futures::FutureExt; use reth_db::database::Database; @@ -81,7 +81,7 @@ where Self { full_block_client: FullBlockClient::new( client, - Arc::new(BeaconConsensus::new(chain_spec)), + Arc::new(EthBeaconConsensus::new(chain_spec)), ), pipeline_task_spawner, pipeline_state: PipelineState::Idle(Some(pipeline)), diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 27fc6b44cfa51..6cad1b471842c 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -1,7 +1,7 @@ use crate::{ - engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensus, BeaconConsensusEngine, + engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensusEngine, BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, - BeaconOnNewPayloadError, MIN_BLOCKS_FOR_PIPELINE_RUN, + BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, @@ -322,7 +322,7 @@ where let consensus: Arc = match self.base_config.consensus { TestConsensusConfig::Real => { - Arc::new(BeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) } TestConsensusConfig::Test => Arc::new(TestConsensus::default()), }; diff --git a/crates/consensus/beacon/src/lib.rs b/crates/consensus/beacon/src/lib.rs index 5a9e1da4ab9ca..f62a75f94d516 100644 --- a/crates/consensus/beacon/src/lib.rs +++ b/crates/consensus/beacon/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use reth_beacon_consensus_core::BeaconConsensus; +pub use reth_ethereum_consensus::EthBeaconConsensus; mod engine; pub use engine::*; diff --git a/crates/consensus/beacon-core/Cargo.toml b/crates/ethereum/consensus/Cargo.toml similarity index 91% rename from crates/consensus/beacon-core/Cargo.toml rename to crates/ethereum/consensus/Cargo.toml index b5c778b05ec5b..f3ff5d4d36e64 100644 --- a/crates/consensus/beacon-core/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "reth-beacon-consensus-core" +name = "reth-ethereum-consensus" version.workspace = true edition.workspace = true rust-version.workspace = true diff --git a/crates/consensus/beacon-core/src/lib.rs b/crates/ethereum/consensus/src/lib.rs similarity index 96% rename from crates/consensus/beacon-core/src/lib.rs rename to crates/ethereum/consensus/src/lib.rs index 6ced95dbc41e0..ed283f0262a86 100644 --- a/crates/consensus/beacon-core/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -19,19 +19,19 @@ use std::{sync::Arc, time::SystemTime}; /// /// This consensus engine does basic checks as outlined in the execution specs. #[derive(Debug)] -pub struct BeaconConsensus { +pub struct EthBeaconConsensus { /// Configuration chain_spec: Arc, } -impl BeaconConsensus { - /// Create a new instance of [BeaconConsensus] +impl EthBeaconConsensus { + /// Create a new instance of [EthBeaconConsensus] pub fn new(chain_spec: Arc) -> Self { Self { chain_spec } } } -impl Consensus for BeaconConsensus { +impl Consensus for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validation::validate_header_standalone(header, &self.chain_spec)?; Ok(()) diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index e8c5b2967eb50..221434758187c 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -11,7 +11,7 @@ use futures::{future, future::Either, stream, stream_select, StreamExt}; use reth_auto_seal_consensus::AutoSealConsensus; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, - BeaconConsensus, BeaconConsensusEngine, + BeaconConsensusEngine, EthBeaconConsensus, }; use reth_blockchain_tree::{ noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, @@ -116,7 +116,7 @@ where let consensus: Arc = if ctx.is_dev() { Arc::new(AutoSealConsensus::new(ctx.chain_spec())) } else { - Arc::new(BeaconConsensus::new(ctx.chain_spec())) + Arc::new(EthBeaconConsensus::new(ctx.chain_spec())) }; debug!(target: "reth::cli", "Spawning stages metrics listener task"); From 1188898dad00ab657bce829160dc4f8520478581 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 May 2024 20:05:56 +0200 Subject: [PATCH 502/700] fix(net): max inflight requests (#8139) --- .../net/network/src/transactions/fetcher.rs | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index f26b1abe2193b..7c60b54979980 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -1294,27 +1294,26 @@ pub enum VerificationOutcome { /// Tracks stats about the [`TransactionFetcher`]. #[derive(Debug)] pub struct TransactionFetcherInfo { - /// Currently active outgoing [`GetPooledTransactions`] requests. + /// Max inflight [`GetPooledTransactions`] requests. pub max_inflight_requests: usize, - /// Soft limit for the byte size of the expected - /// [`PooledTransactions`] response on packing a - /// [`GetPooledTransactions`] request with hashes. - pub(super) soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, - /// Soft limit for the byte size of a [`PooledTransactions`] - /// response on assembling a [`GetPooledTransactions`] - /// request. Spec'd at 2 MiB. + /// Soft limit for the byte size of the expected [`PooledTransactions`] response, upon packing + /// a [`GetPooledTransactions`] request with hashes (by default less than 2 MiB worth of + /// transactions is requested). + pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, + /// Soft limit for the byte size of a [`PooledTransactions`] response, upon assembling the + /// response. Spec'd at 2 MiB, but can be adjusted for research purpose. pub soft_limit_byte_size_pooled_transactions_response: usize, } impl TransactionFetcherInfo { /// Creates a new max pub fn new( - max_inflight_transaction_requests: usize, + max_inflight_requests: usize, soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, soft_limit_byte_size_pooled_transactions_response: usize, ) -> Self { Self { - max_inflight_requests: max_inflight_transaction_requests, + max_inflight_requests, soft_limit_byte_size_pooled_transactions_response_on_pack_request, soft_limit_byte_size_pooled_transactions_response, } @@ -1324,7 +1323,7 @@ impl TransactionFetcherInfo { impl Default for TransactionFetcherInfo { fn default() -> Self { Self::new( - DEFAULT_MAX_COUNT_INFLIGHT_REQUESTS_ON_FETCH_PENDING_HASHES, + DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS as usize * DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER as usize, DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE ) From 7c4d37b27057105ff196e90558d9083405585eca Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 May 2024 20:08:10 +0200 Subject: [PATCH 503/700] perf(net): constraint algorithm fill request with hashes (#8142) --- .../net/network/src/transactions/constants.rs | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/crates/net/network/src/transactions/constants.rs b/crates/net/network/src/transactions/constants.rs index 9e37f0786f814..107d9758beff0 100644 --- a/crates/net/network/src/transactions/constants.rs +++ b/crates/net/network/src/transactions/constants.rs @@ -153,19 +153,20 @@ pub mod tx_fetcher { /// search is budget constrained. /// /// Default is a sixth of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes - /// (the breadth of the search), divided by [`DEFAULT_MAX_COUNT_FALLBACK_PEERS`], which - /// defaults to 3 peers (the depth of the search), so the 711 lru hashes in the pending hashes - /// cache. + /// (the ideal max number of hashes pending fetch), divided by + /// [`DEFAULT_MAX_COUNT_FALLBACK_PEERS`], which defaults to 3 peers (the depth of the search), + /// so a search breadth of 711 lru hashes in the pending hashes cache. pub const DEFAULT_BUDGET_FIND_IDLE_FALLBACK_PEER: usize = DEFAULT_MAX_COUNT_PENDING_FETCH / 6 / DEFAULT_MAX_COUNT_FALLBACK_PEERS as usize; /// Default budget for finding hashes in the intersection of transactions announced by a peer /// and in the cache of hashes pending fetch, when said search is budget constrained. /// - /// Default is a sixth of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes - /// (the breadth of the search), so 2133 lru hashes in the pending hashes cache. + /// Default is an eight of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes + /// (the ideal max number of hashes pending fetch), so a search breadth of 1 600 lru hashes in + /// the pending hashes cache. pub const DEFAULT_BUDGET_FIND_INTERSECTION_ANNOUNCED_BY_PEER_AND_PENDING_FETCH: usize = - DEFAULT_MAX_COUNT_PENDING_FETCH / 6; + DEFAULT_MAX_COUNT_PENDING_FETCH / 8; /* ====== SCALARS FOR USE ON FETCH PENDING HASHES ====== */ @@ -209,8 +210,8 @@ pub mod tx_fetcher { /// for the intersection of hashes announced by a peer and hashes pending fetch. The max /// inflight requests is configured in [`TransactionFetcherInfo`]. /// - /// Default is 2 requests. - pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_INTERSECTION: usize = 2; + /// Default is 3 requests. + pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_INTERSECTION: usize = 3; // Default divisor to the max pending pool imports when calculating search breadth of the /// search for any idle peer to which to send a request filled with hashes pending fetch. @@ -225,8 +226,8 @@ pub mod tx_fetcher { /// The max pending pool imports is configured in /// [`PendingPoolImportsInfo`](crate::transactions::PendingPoolImportsInfo). /// - /// Default is 3 requests. - pub const DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_INTERSECTION: usize = 3; + /// Default is 4 requests. + pub const DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_INTERSECTION: usize = 4; /* ================== ROUGH MEASURES ================== */ From 9bd74fda9e347eb5d343e8ce9d234a4c8b976072 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 7 May 2024 21:16:04 +0100 Subject: [PATCH 504/700] fix: use `--syncmode=execution-layer` from `op-node` for optimistic pipeline sync (#7552) Co-authored-by: Matthias Seitz --- bin/reth/src/optimism.rs | 32 +------ crates/blockchain-tree/src/block_buffer.rs | 6 +- crates/blockchain-tree/src/blockchain_tree.rs | 59 +++++++++++- crates/blockchain-tree/src/noop.rs | 6 ++ crates/blockchain-tree/src/shareable.rs | 9 ++ .../beacon/src/engine/hooks/controller.rs | 12 ++- crates/consensus/beacon/src/engine/mod.rs | 85 +++++++++++++----- crates/consensus/beacon/src/engine/sync.rs | 18 ++-- crates/e2e-test-utils/src/engine_api.rs | 2 +- crates/e2e-test-utils/src/node.rs | 22 +++-- crates/e2e-test-utils/src/wallet.rs | 5 +- .../interfaces/src/blockchain_tree/error.rs | 15 +++- crates/interfaces/src/blockchain_tree/mod.rs | 7 ++ crates/optimism/node/tests/e2e/p2p.rs | 90 ++++++++++++++----- crates/optimism/node/tests/e2e/utils.rs | 29 ++++-- crates/primitives/src/stage/mod.rs | 44 +++++++++ crates/rpc/rpc/src/eth/error.rs | 7 +- crates/stages-api/src/pipeline/mod.rs | 24 +++-- crates/storage/provider/src/providers/mod.rs | 4 + 19 files changed, 365 insertions(+), 111 deletions(-) diff --git a/bin/reth/src/optimism.rs b/bin/reth/src/optimism.rs index a651314b8c5f8..58171879774a1 100644 --- a/bin/reth/src/optimism.rs +++ b/bin/reth/src/optimism.rs @@ -2,11 +2,7 @@ use clap::Parser; use reth::cli::Cli; -use reth_node_builder::NodeHandle; -use reth_node_optimism::{ - args::RollupArgs, rpc::SequencerClient, OptimismEngineTypes, OptimismNode, -}; -use reth_provider::BlockReaderIdExt; +use reth_node_optimism::{args::RollupArgs, rpc::SequencerClient, OptimismNode}; use std::sync::Arc; // We use jemalloc for performance reasons @@ -27,7 +23,7 @@ fn main() { } if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let NodeHandle { node, node_exit_future } = builder + let handle = builder .node(OptimismNode::new(rollup_args.clone())) .extend_rpc_modules(move |ctx| { // register sequencer tx forwarder @@ -42,29 +38,7 @@ fn main() { .launch() .await?; - // If `enable_genesis_walkback` is set to true, the rollup client will need to - // perform the derivation pipeline from genesis, validating the data dir. - // When set to false, set the finalized, safe, and unsafe head block hashes - // on the rollup client using a fork choice update. This prevents the rollup - // client from performing the derivation pipeline from genesis, and instead - // starts syncing from the current tip in the DB. - if node.chain_spec().is_optimism() && !rollup_args.enable_genesis_walkback { - let client = node.rpc_server_handles.auth.http_client(); - if let Ok(Some(head)) = node.provider.latest_header() { - reth_rpc_api::EngineApiClient::::fork_choice_updated_v2( - &client, - reth_rpc_types::engine::ForkchoiceState { - head_block_hash: head.hash(), - safe_block_hash: head.hash(), - finalized_block_hash: head.hash(), - }, - None, - ) - .await?; - } - } - - node_exit_future.await + handle.node_exit_future.await }) { eprintln!("Error: {err:?}"); std::process::exit(1); diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 23c6ca6815e03..14e89633729ff 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -104,13 +104,13 @@ impl BlockBuffer { removed } - /// Discard all blocks that precede finalized block number from the buffer. - pub fn remove_old_blocks(&mut self, finalized_number: BlockNumber) { + /// Discard all blocks that precede block number from the buffer. + pub fn remove_old_blocks(&mut self, block_number: BlockNumber) { let mut block_hashes_to_remove = Vec::new(); // discard all blocks that are before the finalized number. while let Some(entry) = self.earliest_blocks.first_entry() { - if *entry.key() > finalized_number { + if *entry.key() > block_number { break } let block_hashes = entry.remove(); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 64d311549dd01..689994471200c 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -19,13 +19,14 @@ use reth_interfaces::{ }; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, PruneModes, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, U256, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, }; use reth_provider::{ chain::{ChainSplit, ChainSplitTarget}, BlockExecutionWriter, BlockNumReader, BlockWriter, BundleStateWithReceipts, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, ChainSpecProvider, DisplayBlocksChain, HeaderProvider, ProviderError, + StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use std::{ @@ -783,6 +784,11 @@ where Ok(InsertPayloadOk::Inserted(status)) } + /// Discard all blocks that precede block number from the buffer. + pub fn remove_old_blocks(&mut self, block: BlockNumber) { + self.state.buffered_blocks.remove_old_blocks(block); + } + /// Finalize blocks up until and including `finalized_block`, and remove them from the tree. pub fn finalize_block(&mut self, finalized_block: BlockNumber) { // remove blocks @@ -797,7 +803,7 @@ where } } // clean block buffer. - self.state.buffered_blocks.remove_old_blocks(finalized_block); + self.remove_old_blocks(finalized_block); } /// Reads the last `N` canonical hashes from the database and updates the block indices of the @@ -817,6 +823,16 @@ where ) -> RethResult<()> { self.finalize_block(last_finalized_block); + let last_canonical_hashes = self.update_block_hashes()?; + + self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + + Ok(()) + } + + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them and removes all chains. + pub fn update_block_hashes(&mut self) -> RethResult> { let last_canonical_hashes = self .externals .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; @@ -831,9 +847,22 @@ where } } - self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + Ok(last_canonical_hashes) + } - Ok(()) + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered + /// blocks before the tip. + pub fn update_block_hashes_and_clear_buffered( + &mut self, + ) -> RethResult> { + let chain = self.update_block_hashes()?; + + if let Some((block, _)) = chain.last_key_value() { + self.remove_old_blocks(*block); + } + + Ok(chain) } /// Reads the last `N` canonical hashes from the database and updates the block indices of the @@ -1220,6 +1249,28 @@ where &self, revert_until: BlockNumber, ) -> Result, CanonicalError> { + // This should only happen when an optimistic sync target was re-orged. + // + // Static files generally contain finalized data. The blockchain tree only deals + // with unfinalized data. The only scenario where canonical reverts go past the highest + // static file is when an optimistic sync occured and unfinalized data was written to + // static files. + if self + .externals + .provider_factory + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Headers) + .unwrap_or_default() > + revert_until + { + trace!( + target: "blockchain_tree", + "Reverting optimistic canonical chain to block {}", + revert_until + ); + return Err(CanonicalError::OptimisticTargetRevert(revert_until)) + } + // read data that is needed for new sidechain let provider_rw = self.externals.provider_factory.provider_rw()?; diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index bb99f9b55b2b8..776a153250bca 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -68,6 +68,12 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn make_canonical(&self, block_hash: BlockHash) -> Result { Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) } + + fn update_block_hashes_and_clear_buffered( + &self, + ) -> RethResult> { + Ok(BTreeMap::new()) + } } impl BlockchainTreeViewer for NoopBlockchainTree { diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 061b49f4c450c..77cc53c2d3096 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -83,6 +83,15 @@ where res } + fn update_block_hashes_and_clear_buffered( + &self, + ) -> RethResult> { + let mut tree = self.tree.write(); + let res = tree.update_block_hashes_and_clear_buffered(); + tree.update_chains_metrics(); + res + } + fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()> { trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes"); let mut tree = self.tree.write(); diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 47085be008768..7916928dbe7a6 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -130,10 +130,16 @@ impl EngineHooksController { args: EngineHookContext, db_write_active: bool, ) -> Poll> { - // Hook with DB write access level is not allowed to run due to already running hook with DB - // write access level or active DB write according to passed argument + // Hook with DB write access level is not allowed to run due to any of the following + // reasons: + // - An already running hook with DB write access level + // - Active DB write according to passed argument + // - Missing a finalized block number. We might be on an optimistic sync scenario where we + // cannot skip the FCU with the finalized hash, otherwise CL might misbehave. if hook.db_access_level().is_read_write() && - (self.active_db_write_hook.is_some() || db_write_active) + (self.active_db_write_hook.is_some() || + db_write_active || + args.finalized_block_number.is_none()) { return Poll::Pending } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 4e3550cd3f64c..1057457c77985 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -15,8 +15,9 @@ use reth_interfaces::{ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ - constants::EPOCH_SLOTS, stage::StageId, BlockNumHash, BlockNumber, Head, Header, SealedBlock, - SealedHeader, B256, + constants::EPOCH_SLOTS, + stage::{PipelineTarget, StageId}, + BlockNumHash, BlockNumber, Head, Header, SealedBlock, SealedHeader, B256, }; use reth_provider::{ BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, @@ -316,7 +317,7 @@ where }; if let Some(target) = maybe_pipeline_target { - this.sync.set_pipeline_sync_target(target); + this.sync.set_pipeline_sync_target(target.into()); } Ok((this, handle)) @@ -668,6 +669,21 @@ where // threshold return Some(state.finalized_block_hash) } + + // OPTIMISTIC SYNCING + // + // It can happen when the node is doing an + // optimistic sync, where the CL has no knowledge of the finalized hash, + // but is expecting the EL to sync as high + // as possible before finalizing. + // + // This usually doesn't happen on ETH mainnet since CLs use the more + // secure checkpoint syncing. + // + // However, optimism chains will do this. The risk of a reorg is however + // low. + debug!(target: "consensus::engine", hash=?state.head_block_hash, "Setting head hash as an optimistic pipeline target."); + return Some(state.head_block_hash) } Ok(Some(_)) => { // we're fully synced to the finalized block @@ -981,6 +997,10 @@ where // so we should not warn the user, since this will result in us attempting to sync // to a new target and is considered normal operation during sync } + CanonicalError::OptimisticTargetRevert(block_number) => { + self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(*block_number)); + return PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } _ => { warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); // TODO(mattsse) better error handling before attempting to sync (FCU could be @@ -1011,7 +1031,7 @@ where if self.pipeline_run_threshold == 0 { // use the pipeline to sync to the target trace!(target: "consensus::engine", %target, "Triggering pipeline run to sync missing ancestors of the new head"); - self.sync.set_pipeline_sync_target(target); + self.sync.set_pipeline_sync_target(target.into()); } else { // trigger a full block download for missing hash, or the parent of its lowest buffered // ancestor @@ -1361,7 +1381,7 @@ where ) { // we don't have the block yet and the distance exceeds the allowed // threshold - self.sync.set_pipeline_sync_target(target); + self.sync.set_pipeline_sync_target(target.into()); // we can exit early here because the pipeline will take care of syncing return } @@ -1445,6 +1465,8 @@ where // TODO: do not ignore this let _ = self.blockchain.make_canonical(*target_hash.as_ref()); } + } else if let Some(block_number) = err.optimistic_revert_block_number() { + self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(block_number)); } Err((target.head_block_hash, err)) @@ -1506,13 +1528,7 @@ where // update the canon chain if continuous is enabled if self.sync.run_pipeline_continuously() { - let max_block = ctrl.block_number().unwrap_or_default(); - let max_header = self.blockchain.sealed_header(max_block) - .inspect_err(|error| { - error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); - })? - .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; - self.blockchain.set_canonical_head(max_header); + self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; } let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { @@ -1525,6 +1541,14 @@ where } }; + if sync_target_state.finalized_block_hash.is_zero() { + self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; + self.blockchain.update_block_hashes_and_clear_buffered()?; + self.blockchain.connect_buffered_blocks_to_canonical_hashes()?; + // We are on an optimistic syncing process, better to wait for the next FCU to handle + return Ok(()) + } + // Next, we check if we need to schedule another pipeline run or transition // to live sync via tree. // This can arise if we buffer the forkchoice head, and if the head is an @@ -1580,7 +1604,7 @@ where // the tree update from executing too many blocks and blocking. if let Some(target) = pipeline_target { // run the pipeline to the target since the distance is sufficient - self.sync.set_pipeline_sync_target(target); + self.sync.set_pipeline_sync_target(target.into()); } else if let Some(number) = self.blockchain.block_number(sync_target_state.finalized_block_hash)? { @@ -1592,12 +1616,23 @@ where } else { // We don't have the finalized block in the database, so we need to // trigger another pipeline run. - self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash); + self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash.into()); } Ok(()) } + fn set_canonical_head(&self, max_block: BlockNumber) -> RethResult<()> { + let max_header = self.blockchain.sealed_header(max_block) + .inspect_err(|error| { + error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); + })? + .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; + self.blockchain.set_canonical_head(max_header); + + Ok(()) + } + fn on_hook_result(&self, polled_hook: PolledHook) -> Result<(), BeaconConsensusEngineError> { if let EngineHookEvent::Finished(Err(error)) = &polled_hook.event { error!( @@ -1746,16 +1781,20 @@ where Err(BeaconOnNewPayloadError::Internal(Box::new(error.clone()))); let _ = tx.send(response); return Err(RethError::Canonical(error)) + } else if error.optimistic_revert_block_number().is_some() { + // engine already set the pipeline unwind target on + // `try_make_sync_target_canonical` + PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } else { + // If we could not make the sync target block canonical, + // we should return the error as an invalid payload status. + PayloadStatus::new( + PayloadStatusEnum::Invalid { validation_error: error.to_string() }, + // TODO: return a proper latest valid hash + // See: + self.forkchoice_state_tracker.last_valid_head(), + ) } - - // If we could not make the sync target block canonical, - // we should return the error as an invalid payload status. - PayloadStatus::new( - PayloadStatusEnum::Invalid { validation_error: error.to_string() }, - // TODO: return a proper latest valid hash - // See: - self.forkchoice_state_tracker.last_valid_head(), - ) } }; diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index fd78f461a6e1b..261b6874fd084 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -11,7 +11,7 @@ use reth_interfaces::p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, headers::client::HeadersClient, }; -use reth_primitives::{BlockNumber, ChainSpec, SealedBlock, B256}; +use reth_primitives::{stage::PipelineTarget, BlockNumber, ChainSpec, SealedBlock, B256}; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventListeners; @@ -44,7 +44,7 @@ where /// The pipeline is used for large ranges. pipeline_state: PipelineState, /// Pending target block for the pipeline to sync - pending_pipeline_target: Option, + pending_pipeline_target: Option, /// In-flight full block requests in progress. inflight_full_block_requests: Vec>, /// In-flight full block _range_ requests in progress. @@ -216,8 +216,12 @@ where /// Sets a new target to sync the pipeline to. /// /// But ensures the target is not the zero hash. - pub(crate) fn set_pipeline_sync_target(&mut self, target: B256) { - if target.is_zero() { + pub(crate) fn set_pipeline_sync_target(&mut self, target: PipelineTarget) { + if target.sync_target().is_some_and(|target| target.is_zero()) { + trace!( + target: "consensus::engine::sync", + "Pipeline target cannot be zero hash." + ); // precaution to never sync to the zero hash return } @@ -384,7 +388,7 @@ pub(crate) enum EngineSyncEvent { /// Pipeline started syncing /// /// This is none if the pipeline is triggered without a specific target. - PipelineStarted(Option), + PipelineStarted(Option), /// Pipeline finished /// /// If this is returned, the pipeline is idle. @@ -590,7 +594,7 @@ mod tests { .build(pipeline, chain_spec); let tip = client.highest_block().expect("there should be blocks here"); - sync_controller.set_pipeline_sync_target(tip.hash()); + sync_controller.set_pipeline_sync_target(tip.hash().into()); let sync_future = poll_fn(|cx| sync_controller.poll(cx)); let next_event = poll!(sync_future); @@ -598,7 +602,7 @@ mod tests { // can assert that the first event here is PipelineStarted because we set the sync target, // and we should get Ready because the pipeline should be spawned immediately assert_matches!(next_event, Poll::Ready(EngineSyncEvent::PipelineStarted(Some(target))) => { - assert_eq!(target, tip.hash()); + assert_eq!(target.sync_target().unwrap(), tip.hash()); }); // the next event should be the pipeline finishing in a good state diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 13b735aea8bdb..fefd7d6ff6a91 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -63,7 +63,7 @@ impl EngineApiTestContext { ) .await?; - assert!(submission.status == expected_status); + assert_eq!(submission.status, expected_status); Ok(submission.latest_valid_hash.unwrap_or_default()) } diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 668af6034336f..0ae20664a7575 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -5,7 +5,6 @@ use crate::{ use alloy_rpc_types::BlockNumberOrTag; use eyre::Ok; - use futures_util::Future; use reth::{ api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, @@ -171,10 +170,7 @@ where if check { if let Some(latest_block) = self.inner.provider.block_by_number(number)? { - if latest_block.hash_slow() != expected_block_hash { - // TODO: only if its awaiting a reorg - continue - } + assert_eq!(latest_block.hash_slow(), expected_block_hash); break } if wait_finish_checkpoint { @@ -185,8 +181,22 @@ where Ok(()) } + pub async fn wait_unwind(&self, number: BlockNumber) -> eyre::Result<()> { + loop { + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + if let Some(checkpoint) = self.inner.provider.get_stage_checkpoint(StageId::Headers)? { + if checkpoint.block_number == number { + break + } + } + } + Ok(()) + } + /// Asserts that a new block has been added to the blockchain - /// and the tx has been included in the block + /// and the tx has been included in the block. + /// + /// Does NOT work for pipeline since there's no stream notification! pub async fn assert_new_block( &mut self, tip_tx_hash: B256, diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index d94dec2a08c3f..e841e7cd786c3 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -4,7 +4,8 @@ use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; /// One of the accounts of the genesis allocations. pub struct Wallet { pub inner: LocalWallet, - chain_id: u64, + pub inner_nonce: u64, + pub chain_id: u64, amount: usize, derivation_path: Option, } @@ -13,7 +14,7 @@ impl Wallet { /// Creates a new account from one of the secret/pubkeys of the genesis allocations (test.json) pub fn new(amount: usize) -> Self { let inner = MnemonicBuilder::::default().phrase(TEST_MNEMONIC).build().unwrap(); - Self { inner, chain_id: 1, amount, derivation_path: None } + Self { inner, chain_id: 1, amount, derivation_path: None, inner_nonce: 0 } } /// Sets chain id diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index b805c6ee8e778..e9cdb8714d89d 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -67,6 +67,9 @@ pub enum CanonicalError { /// Error indicating a transaction failed to commit during execution. #[error("transaction error on commit: {0}")] CanonicalCommit(String), + /// Error indicating that a previous optimistic sync target was re-orged + #[error("transaction error on revert: {0}")] + OptimisticTargetRevert(BlockNumber), } impl CanonicalError { @@ -83,6 +86,15 @@ impl CanonicalError { CanonicalError::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. }) ) } + + /// Returns `Some(BlockNumber)` if the underlying error matches + /// [CanonicalError::OptimisticTargetRevert]. + pub fn optimistic_revert_block_number(&self) -> Option { + match self { + CanonicalError::OptimisticTargetRevert(block_number) => Some(*block_number), + _ => None, + } + } } /// Error thrown when inserting a block failed because the block is considered invalid. @@ -316,7 +328,8 @@ impl InsertBlockErrorKind { InsertBlockErrorKind::Canonical(err) => match err { CanonicalError::BlockchainTree(_) | CanonicalError::CanonicalCommit(_) | - CanonicalError::CanonicalRevert(_) => false, + CanonicalError::CanonicalRevert(_) | + CanonicalError::OptimisticTargetRevert(_) => false, CanonicalError::Validation(_) => true, CanonicalError::Provider(_) => false, }, diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index d8ad667fcbbf2..7d2b50e418e1c 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -78,6 +78,13 @@ pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { last_finalized_block: BlockNumber, ) -> RethResult<()>; + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered + /// blocks before the tip. + fn update_block_hashes_and_clear_buffered( + &self, + ) -> RethResult>; + /// Reads the last `N` canonical hashes from the database and updates the block indices of the /// tree by attempting to connect the buffered blocks to canonical hashes. /// diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index a38fadf678438..9e3741055ab67 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,41 +1,89 @@ use crate::utils::{advance_chain, setup}; -use reth::primitives::BASE_MAINNET; -use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet}; -use reth_primitives::ChainId; +use reth_interfaces::blockchain_tree::error::BlockchainTreeError; +use reth_rpc_types::engine::PayloadStatusEnum; +use std::sync::Arc; +use tokio::sync::Mutex; #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let chain_id: ChainId = BASE_MAINNET.chain.into(); + let (mut nodes, _tasks, wallet) = setup(3).await?; + let wallet = Arc::new(Mutex::new(wallet)); - let (mut nodes, _tasks, _wallet) = setup(2).await?; - - let second_node = nodes.pop().unwrap(); + let third_node = nodes.pop().unwrap(); + let mut second_node = nodes.pop().unwrap(); let mut first_node = nodes.pop().unwrap(); - let tip: usize = 300; + let tip: usize = 90; let tip_index: usize = tip - 1; + let reorg_depth = 2; - let wallet = Wallet::default(); - - // On first node, create a chain up to block number 300a - let canonical_payload_chain = advance_chain(tip, &mut first_node, |nonce: u64| { - let wallet = wallet.inner.clone(); - Box::pin(async move { - TransactionTestContext::optimism_l1_block_info_tx(chain_id, wallet, nonce).await - }) - }) - .await?; + // On first node, create a chain up to block number 90a + let canonical_payload_chain = advance_chain(tip, &mut first_node, wallet.clone()).await?; let canonical_chain = canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); - // On second node, sync up to block number 300a + // On second node, sync optimistically up to block number 88a second_node .engine_api - .update_forkchoice(canonical_chain[tip_index], canonical_chain[tip_index]) + .update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth]) + .await?; + second_node + .wait_block((tip - reorg_depth) as u64, canonical_chain[tip_index - reorg_depth], true) .await?; - second_node.wait_block(tip as u64, canonical_chain[tip_index], true).await?; + + // On third node, sync optimistically up to block number 90a + third_node.engine_api.update_optimistic_forkchoice(canonical_chain[tip_index]).await?; + third_node.wait_block(tip as u64, canonical_chain[tip_index], true).await?; + + // On second node, create a side chain: 88a -> 89b -> 90b + wallet.lock().await.inner_nonce -= reorg_depth as u64; + second_node.payload.timestamp = first_node.payload.timestamp - reorg_depth as u64; // TODO: probably want to make it node agnostic + let side_payload_chain = advance_chain(reorg_depth, &mut second_node, wallet.clone()).await?; + let side_chain = side_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); + + // Creates fork chain by submitting 89b payload. + // By returning Valid here, op-node will finally return a finalized hash + let _ = third_node + .engine_api + .submit_payload( + side_payload_chain[0].0.clone(), + side_payload_chain[0].1.clone(), + PayloadStatusEnum::Valid, + Default::default(), + ) + .await; + + // It will issue a pipeline reorg to 88a, and then make 89b canonical AND finalized. + third_node.engine_api.update_forkchoice(side_chain[0], side_chain[0]).await?; + + // Make sure we have the updated block + third_node.wait_unwind((tip - reorg_depth) as u64).await?; + third_node + .wait_block( + side_payload_chain[0].0.block().number, + side_payload_chain[0].0.block().hash(), + true, + ) + .await?; + + // Make sure that trying to submit 89a again will result in an invalid payload status, since 89b + // has been set as finalized. + let _ = third_node + .engine_api + .submit_payload( + canonical_payload_chain[tip_index - reorg_depth + 1].0.clone(), + canonical_payload_chain[tip_index - reorg_depth + 1].1.clone(), + PayloadStatusEnum::Invalid { + validation_error: BlockchainTreeError::PendingBlockIsFinalized { + last_finalized: (tip - reorg_depth) as u64 + 1, + } + .to_string(), + }, + Default::default(), + ) + .await; Ok(()) } diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index e86a7c654142f..ad19086aeda64 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,9 +1,10 @@ -use reth::{primitives::Bytes, rpc::types::engine::PayloadAttributes, tasks::TaskManager}; -use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType}; +use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; use reth_node_optimism::{OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes}; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_primitives::{Address, ChainSpecBuilder, Genesis, B256, BASE_MAINNET}; -use std::{future::Future, pin::Pin, sync::Arc}; +use std::sync::Arc; +use tokio::sync::Mutex; /// Optimism Node Helper type pub(crate) type OpNode = NodeHelperType; @@ -24,12 +25,30 @@ pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskMa .await } +/// Advance the chain with sequential payloads returning them in the end. pub(crate) async fn advance_chain( length: usize, node: &mut OpNode, - tx_generator: impl Fn(u64) -> Pin>>, + wallet: Arc>, ) -> eyre::Result> { - node.advance(length as u64, tx_generator, optimism_payload_attributes).await + node.advance( + length as u64, + |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + wallet.inner_nonce, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }, + optimism_payload_attributes, + ) + .await } /// Helper function to create a new eth payload attributes diff --git a/crates/primitives/src/stage/mod.rs b/crates/primitives/src/stage/mod.rs index ffe52554d3222..3c7c972bcf6fe 100644 --- a/crates/primitives/src/stage/mod.rs +++ b/crates/primitives/src/stage/mod.rs @@ -1,6 +1,7 @@ //! Staged sync primitives. mod id; +use crate::{BlockHash, BlockNumber}; pub use id::StageId; mod checkpoints; @@ -9,3 +10,46 @@ pub use checkpoints::{ HeadersCheckpoint, IndexHistoryCheckpoint, MerkleCheckpoint, StageCheckpoint, StageUnitCheckpoint, StorageHashingCheckpoint, }; + +/// Direction and target block for pipeline operations. +#[derive(Debug, Clone, Copy)] +pub enum PipelineTarget { + /// Target for forward synchronization, indicating a block hash to sync to. + Sync(BlockHash), + /// Target for backward unwinding, indicating a block number to unwind to. + Unwind(BlockNumber), +} + +impl PipelineTarget { + /// Returns the target block hash for forward synchronization, if applicable. + /// + /// # Returns + /// + /// - `Some(BlockHash)`: The target block hash for forward synchronization. + /// - `None`: If the target is for backward unwinding. + pub fn sync_target(self) -> Option { + match self { + PipelineTarget::Sync(hash) => Some(hash), + PipelineTarget::Unwind(_) => None, + } + } + + /// Returns the target block number for backward unwinding, if applicable. + /// + /// # Returns + /// + /// - `Some(BlockNumber)`: The target block number for backward unwinding. + /// - `None`: If the target is for forward synchronization. + pub fn unwind_target(self) -> Option { + match self { + PipelineTarget::Sync(_) => None, + PipelineTarget::Unwind(number) => Some(number), + } + } +} + +impl From for PipelineTarget { + fn from(hash: BlockHash) -> Self { + Self::Sync(hash) + } +} diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 305536aab33d9..df2aef8006ee0 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -39,7 +39,12 @@ pub enum EthApiError { UnknownBlockNumber, /// Thrown when querying for `finalized` or `safe` block before the merge transition is /// finalized, - #[error("unknown block")] + /// + /// op-node uses case sensitive string comparison to parse this error: + /// + /// + /// TODO(#8045): Temporary, until a version of is pushed through that doesn't require this to figure out the EL sync status. + #[error("Unknown block")] UnknownSafeOrFinalizedBlock, /// Thrown when an unknown block or transaction index is encountered #[error("unknown block or tx index")] diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 0cbd993c5f986..199cc41e64377 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -7,7 +7,7 @@ use reth_db::database::Database; use reth_interfaces::RethResult; use reth_primitives::{ constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, - stage::{StageCheckpoint, StageId}, + stage::{PipelineTarget, StageCheckpoint, StageId}, static_file::HighestStaticFiles, BlockNumber, B256, }; @@ -130,17 +130,31 @@ where /// Consume the pipeline and run it until it reaches the provided tip, if set. Return the /// pipeline and its result as a future. #[track_caller] - pub fn run_as_fut(mut self, tip: Option) -> PipelineFut { + pub fn run_as_fut(mut self, target: Option) -> PipelineFut { // TODO: fix this in a follow up PR. ideally, consensus engine would be responsible for // updating metrics. let _ = self.register_metrics(); // ignore error Box::pin(async move { // NOTE: the tip should only be None if we are in continuous sync mode. - if let Some(tip) = tip { - self.set_tip(tip); + if let Some(target) = target { + match target { + PipelineTarget::Sync(tip) => self.set_tip(tip), + PipelineTarget::Unwind(target) => { + if let Err(err) = self.produce_static_files() { + return (self, Err(err.into())) + } + if let Err(err) = self.unwind(target, None) { + return (self, Err(err)) + } + self.progress.update(target); + + return (self, Ok(ControlFlow::Continue { block_number: target })) + } + } } + let result = self.run_loop().await; - trace!(target: "sync::pipeline", ?tip, ?result, "Pipeline finished"); + trace!(target: "sync::pipeline", ?target, ?result, "Pipeline finished"); (self, result) }) } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b0f43ba9f7189..bf94e32cf4acf 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -669,6 +669,10 @@ where self.tree.finalize_block(finalized_block) } + fn update_block_hashes_and_clear_buffered(&self) -> RethResult> { + self.tree.update_block_hashes_and_clear_buffered() + } + fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, last_finalized_block: BlockNumber, From 0ad9c7866b6eef75234a353c1dabcce5fb7cc8a9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 May 2024 22:26:58 +0200 Subject: [PATCH 505/700] feat(op): import receipts (#7914) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- bin/reth/src/cli/mod.rs | 8 +- bin/reth/src/commands/import.rs | 2 +- bin/reth/src/commands/import_op.rs | 6 +- bin/reth/src/commands/import_receipts.rs | 165 +++++++++ bin/reth/src/commands/mod.rs | 1 + crates/net/downloaders/src/file_client.rs | 207 ++++++----- .../downloaders/src/file_codec_ovm_receipt.rs | 344 ++++++++++++++++++ crates/net/downloaders/src/lib.rs | 19 + .../downloaders/src/receipt_file_client.rs | 268 ++++++++++++++ crates/primitives/Cargo.toml | 3 - crates/storage/provider/src/lib.rs | 1 + 11 files changed, 930 insertions(+), 94 deletions(-) create mode 100644 bin/reth/src/commands/import_receipts.rs create mode 100644 crates/net/downloaders/src/file_codec_ovm_receipt.rs create mode 100644 crates/net/downloaders/src/receipt_file_client.rs diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 40e1f24be9c48..deece5b62e56f 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -8,7 +8,7 @@ use crate::{ LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, + config_cmd, db, debug_cmd, dump_genesis, import, import_receipts, init_cmd, init_state, node::{self, NoArgs}, p2p, recover, stage, test_vectors, }, @@ -150,6 +150,9 @@ impl Cli { Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::ImportReceipts(command) => { + runner.run_blocking_until_ctrl_c(command.execute()) + } #[cfg(feature = "optimism")] Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), @@ -188,6 +191,9 @@ pub enum Commands { /// This syncs RLP encoded blocks from a file. #[command(name = "import")] Import(import::ImportCommand), + /// This imports RLP encoded receipts from a file. + #[command(name = "import-receipts")] + ImportReceipts(import_receipts::ImportReceiptsCommand), /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. #[cfg(feature = "optimism")] #[command(name = "import-op")] diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index f73cf3c175f8a..354787f326c34 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -138,7 +138,7 @@ impl ImportCommand { let mut total_decoded_blocks = 0; let mut total_decoded_txns = 0; - while let Some(file_client) = reader.next_chunk().await? { + while let Some(file_client) = reader.next_chunk::().await? { // create a new FileClient from chunk read from file info!(target: "reth::cli", "Importing chain file chunk" diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 3147f9b1092f1..8ca1baf5b91df 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -14,7 +14,9 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_db::{init_db, tables, transaction::DbTx}; -use reth_downloaders::file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}; +use reth_downloaders::file_client::{ + ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, +}; use reth_node_core::init::init_genesis; @@ -117,7 +119,7 @@ impl ImportOpCommand { let mut total_decoded_txns = 0; let mut total_filtered_out_dup_txns = 0; - while let Some(mut file_client) = reader.next_chunk().await? { + while let Some(mut file_client) = reader.next_chunk::().await? { // create a new FileClient from chunk read from file info!(target: "reth::cli", "Importing chain file chunk" diff --git a/bin/reth/src/commands/import_receipts.rs b/bin/reth/src/commands/import_receipts.rs new file mode 100644 index 0000000000000..8e06c3c03cb61 --- /dev/null +++ b/bin/reth/src/commands/import_receipts.rs @@ -0,0 +1,165 @@ +//! Command that imports receipts from a file. + +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, +}; +use clap::Parser; +use reth_db::{database::Database, init_db, transaction::DbTx, DatabaseEnv}; +use reth_downloaders::{ + file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, + receipt_file_client::ReceiptFileClient, +}; +use reth_node_core::version::SHORT_VERSION; +use reth_primitives::{stage::StageId, ChainSpec, StaticFileSegment}; +use reth_provider::{ + BundleStateWithReceipts, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, StaticFileWriter, +}; +use tracing::{debug, error, info}; + +use std::{path::PathBuf, sync::Arc}; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct ImportReceiptsCommand { + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = chain_help(), + default_value = SUPPORTED_CHAINS[0], + value_parser = genesis_value_parser + )] + chain: Arc, + + /// Chunk byte length. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + + #[command(flatten)] + db: DatabaseArgs, + + /// The path to a receipts file for import. + #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] + path: PathBuf, +} + +impl ImportReceiptsCommand { + /// Execute `import` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); + + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking receipts import" + ); + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + + let db_path = data_dir.db(); + info!(target: "reth::cli", path = ?db_path, "Opening database"); + + let db = Arc::new(init_db(db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); + let provider_factory = + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; + + let provider = provider_factory.provider_rw()?; + let static_file_provider = provider_factory.static_file_provider(); + + for stage in StageId::ALL { + let checkpoint = provider.get_stage_checkpoint(stage)?; + debug!(target: "reth::cli", + ?stage, + ?checkpoint, + "Read stage checkpoints from db" + ); + } + + // prepare the tx for `write_to_storage` + let tx = provider.into_tx(); + let mut total_decoded_receipts = 0; + + // open file + let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + + while let Some(file_client) = reader.next_chunk::().await? { + // create a new file client from chunk read from file + let ReceiptFileClient { receipts, first_block, total_receipts: total_receipts_chunk } = + file_client; + + // mark these as decoded + total_decoded_receipts += total_receipts_chunk; + + info!(target: "reth::cli", + first_receipts_block=?first_block, + total_receipts_chunk, + "Importing receipt file chunk" + ); + + // We're reusing receipt writing code internal to + // `BundleStateWithReceipts::write_to_storage`, so we just use a default empty + // `BundleState`. + let bundled_state = + BundleStateWithReceipts::new(Default::default(), receipts, first_block); + + let static_file_producer = + static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?; + + // finally, write the receipts + bundled_state.write_to_storage::<::TXMut>( + &tx, + Some(static_file_producer), + OriginalValuesKnown::Yes, + )?; + } + + tx.commit()?; + // as static files works in file ranges, internally it will be committing when creating the + // next file range already, so we only need to call explicitly at the end. + static_file_provider.commit()?; + + if total_decoded_receipts == 0 { + error!(target: "reth::cli", "No receipts were imported, ensure the receipt file is valid and not empty"); + return Ok(()) + } + + // compare the highest static file block to the number of receipts we decoded + // + // `HeaderNumbers` and `TransactionHashNumbers` tables serve as additional indexes, but + // nothing like this needs to exist for Receipts. So `tx.entries::` would + // return zero here. + let total_imported_receipts = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Receipts) + .expect("static files must exist after ensuring we decoded more than zero"); + + if total_imported_receipts != total_decoded_receipts as u64 { + error!(target: "reth::cli", + total_decoded_receipts, + total_imported_receipts, + "Receipts were partially imported" + ); + } + + info!(target: "reth::cli", total_imported_receipts, "Receipt file imported"); + + Ok(()) + } +} diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index a005d5e8b9425..9e6ff8f840247 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -6,6 +6,7 @@ pub mod debug_cmd; pub mod dump_genesis; pub mod import; pub mod import_op; +pub mod import_receipts; pub mod init_cmd; pub mod init_state; diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index ef72a891be7d3..85fac46428225 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,4 +1,5 @@ use super::file_codec::BlockFileCodec; +use futures::Future; use itertools::Either; use reth_interfaces::p2p::{ bodies::client::{BodiesClient, BodiesFut}, @@ -12,7 +13,7 @@ use reth_primitives::{ BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BytesMut, Header, HeadersDirection, SealedHeader, B256, }; -use std::{collections::HashMap, path::Path}; +use std::{collections::HashMap, io, path::Path}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; @@ -57,6 +58,16 @@ pub enum FileClientError { /// An error occurred when decoding blocks, headers, or rlp headers from the file. #[error("{0}")] Rlp(alloy_rlp::Error, Vec), + + /// Custom error message. + #[error("{0}")] + Custom(&'static str), +} + +impl From<&'static str> for FileClientError { + fn from(value: &'static str) -> Self { + Self::Custom(value) + } } impl FileClient { @@ -78,82 +89,6 @@ impl FileClient { Ok(Self::from_reader(&reader[..], file_len).await?.0) } - /// Initialize the [`FileClient`] from bytes that have been read from file. - pub(crate) async fn from_reader( - reader: B, - num_bytes: u64, - ) -> Result<(Self, Vec), FileClientError> - where - B: AsyncReadExt + Unpin, - { - let mut headers = HashMap::new(); - let mut hash_to_number = HashMap::new(); - let mut bodies = HashMap::new(); - - // use with_capacity to make sure the internal buffer contains the entire chunk - let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); - - trace!(target: "downloaders::file", - target_num_bytes=num_bytes, - capacity=stream.read_buffer().capacity(), - "init decode stream" - ); - - let mut remaining_bytes = vec![]; - - let mut log_interval = 0; - let mut log_interval_start_block = 0; - - while let Some(block_res) = stream.next().await { - let block = match block_res { - Ok(block) => block, - Err(FileClientError::Rlp(err, bytes)) => { - trace!(target: "downloaders::file", - %err, - bytes_len=bytes.len(), - "partial block returned from decoding chunk" - ); - remaining_bytes = bytes; - break - } - Err(err) => return Err(err), - }; - let block_number = block.header.number; - let block_hash = block.header.hash_slow(); - - // add to the internal maps - headers.insert(block.header.number, block.header.clone()); - hash_to_number.insert(block_hash, block.header.number); - bodies.insert( - block_hash, - BlockBody { - transactions: block.body, - ommers: block.ommers, - withdrawals: block.withdrawals, - }, - ); - - if log_interval == 0 { - trace!(target: "downloaders::file", - block_number, - "read first block" - ); - log_interval_start_block = block_number; - } else if log_interval % 100_000 == 0 { - trace!(target: "downloaders::file", - blocks=?log_interval_start_block..=block_number, - "read blocks from file" - ); - log_interval_start_block = block_number + 1; - } - log_interval += 1; - } - - trace!(target: "downloaders::file", blocks = headers.len(), "Initialized file client"); - - Ok((Self { headers, hash_to_number, bodies }, remaining_bytes)) - } - /// Get the tip hash of the chain. pub fn tip(&self) -> Option { self.headers.get(&self.max_block()?).map(|h| h.hash_slow()) @@ -241,6 +176,88 @@ impl FileClient { } } +impl FromReader for FileClient { + type Error = FileClientError; + + /// Initialize the [`FileClient`] from bytes that have been read from file. + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + B: AsyncReadExt + Unpin, + { + let mut headers = HashMap::new(); + let mut hash_to_number = HashMap::new(); + let mut bodies = HashMap::new(); + + // use with_capacity to make sure the internal buffer contains the entire chunk + let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); + + trace!(target: "downloaders::file", + target_num_bytes=num_bytes, + capacity=stream.read_buffer().capacity(), + "init decode stream" + ); + + let mut remaining_bytes = vec![]; + + let mut log_interval = 0; + let mut log_interval_start_block = 0; + + async move { + while let Some(block_res) = stream.next().await { + let block = match block_res { + Ok(block) => block, + Err(FileClientError::Rlp(err, bytes)) => { + trace!(target: "downloaders::file", + %err, + bytes_len=bytes.len(), + "partial block returned from decoding chunk" + ); + remaining_bytes = bytes; + break + } + Err(err) => return Err(err), + }; + let block_number = block.header.number; + let block_hash = block.header.hash_slow(); + + // add to the internal maps + headers.insert(block.header.number, block.header.clone()); + hash_to_number.insert(block_hash, block.header.number); + bodies.insert( + block_hash, + BlockBody { + transactions: block.body, + ommers: block.ommers, + withdrawals: block.withdrawals, + }, + ); + + if log_interval == 0 { + trace!(target: "downloaders::file", + block_number, + "read first block" + ); + log_interval_start_block = block_number; + } else if log_interval % 100_000 == 0 { + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + "read blocks from file" + ); + log_interval_start_block = block_number + 1; + } + log_interval += 1; + } + + trace!(target: "downloaders::file", blocks = headers.len(), "Initialized file client"); + + Ok((Self { headers, hash_to_number, bodies }, remaining_bytes)) + } + } +} + impl HeadersClient for FileClient { type Output = HeadersFut; @@ -341,6 +358,11 @@ pub struct ChunkedFileReader { } impl ChunkedFileReader { + /// Returns the remaining file length. + pub fn file_len(&self) -> u64 { + self.file_byte_len + } + /// Opens the file to import from given path. Returns a new instance. If no chunk byte length /// is passed, chunks have [`DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE`] (one static file). pub async fn new>( @@ -377,7 +399,10 @@ impl ChunkedFileReader { } /// Read next chunk from file. Returns [`FileClient`] containing decoded chunk. - pub async fn next_chunk(&mut self) -> Result, FileClientError> { + pub async fn next_chunk(&mut self) -> Result, T::Error> + where + T: FromReader, + { if self.file_byte_len == 0 && self.chunk.is_empty() { // eof return Ok(None) @@ -391,6 +416,7 @@ impl ChunkedFileReader { // read new bytes from file let mut reader = BytesMut::zeroed(new_read_bytes_target_len as usize); + // actual bytes that have been read let new_read_bytes_len = self.file.read_exact(&mut reader).await? as u64; @@ -416,14 +442,7 @@ impl ChunkedFileReader { // make new file client from chunk let (file_client, bytes) = - FileClient::from_reader(&self.chunk[..], next_chunk_byte_len as u64).await?; - - debug!(target: "downloaders::file", - headers_len=file_client.headers.len(), - bodies_len=file_client.bodies.len(), - remaining_bytes_len=bytes.len(), - "parsed blocks that were read from file" - ); + T::from_reader(&self.chunk[..], next_chunk_byte_len as u64).await?; // save left over bytes self.chunk = bytes; @@ -432,6 +451,20 @@ impl ChunkedFileReader { } } +/// Constructs a file client from a reader. +pub trait FromReader { + /// Error returned by file client type. + type Error: From; + /// Returns a file client + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + Self: Sized, + B: AsyncReadExt + Unpin; +} + #[cfg(test)] mod tests { use super::*; @@ -594,7 +627,7 @@ mod tests { // test - while let Some(client) = reader.next_chunk().await.unwrap() { + while let Some(client) = reader.next_chunk::().await.unwrap() { let sync_target = client.tip_header().unwrap(); let sync_target_hash = sync_target.hash(); diff --git a/crates/net/downloaders/src/file_codec_ovm_receipt.rs b/crates/net/downloaders/src/file_codec_ovm_receipt.rs new file mode 100644 index 0000000000000..5b3c81a9233ae --- /dev/null +++ b/crates/net/downloaders/src/file_codec_ovm_receipt.rs @@ -0,0 +1,344 @@ +//! Codec for reading raw receipts from a file. + +use alloy_rlp::{Decodable, RlpDecodable}; +use reth_primitives::{ + bytes::{Buf, BytesMut}, + Address, Bloom, Bytes, Log, Receipt, TxType, B256, +}; +use tokio_util::codec::Decoder; + +use crate::{file_client::FileClientError, receipt_file_client::ReceiptWithBlockNumber}; + +/// Codec for reading raw receipts from a file. +/// +/// If using with [`FramedRead`](tokio_util::codec::FramedRead), the user should make sure the +/// framed reader has capacity for the entire receipts file. Otherwise, the decoder will return +/// [`InputTooShort`](alloy_rlp::Error::InputTooShort), because RLP receipts can only be +/// decoded if the internal buffer is large enough to contain the entire receipt. +/// +/// Without ensuring the framed reader has capacity for the entire file, a receipt is likely to +/// fall across two read buffers, the decoder will not be able to decode the receipt, which will +/// cause it to fail. +/// +/// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set +/// the capacity of the framed reader to the size of the file. +#[derive(Debug)] +pub struct HackReceiptFileCodec; + +impl Decoder for HackReceiptFileCodec { + type Item = Option; + type Error = FileClientError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.is_empty() { + return Ok(None) + } + + let buf_slice = &mut src.as_ref(); + let receipt = HackReceiptContainer::decode(buf_slice) + .map_err(|err| Self::Error::Rlp(err, src.to_vec()))? + .0; + src.advance(src.len() - buf_slice.len()); + + Ok(Some( + receipt.map(|receipt| receipt.try_into().map_err(FileClientError::from)).transpose()?, + )) + } +} + +/// See +#[derive(Debug, PartialEq, Eq, RlpDecodable)] +pub struct HackReceipt { + tx_type: u8, + post_state: Bytes, + status: u64, + cumulative_gas_used: u64, + bloom: Bloom, + /// + logs: Vec, + tx_hash: B256, + contract_address: Address, + gas_used: u64, + block_hash: B256, + block_number: u64, + transaction_index: u32, + l1_gas_price: u64, + l1_gas_used: u64, + l1_fee: u64, + fee_scalar: String, +} + +#[derive(Debug, PartialEq, Eq, RlpDecodable)] +#[rlp(trailing)] +struct HackReceiptContainer(Option); + +impl TryFrom for ReceiptWithBlockNumber { + type Error = &'static str; + fn try_from(exported_receipt: HackReceipt) -> Result { + let HackReceipt { + tx_type, status, cumulative_gas_used, logs, block_number: number, .. + } = exported_receipt; + + #[allow(clippy::needless_update)] + let receipt = Receipt { + tx_type: TxType::try_from(tx_type.to_be_bytes()[0])?, + success: status != 0, + cumulative_gas_used, + logs, + ..Default::default() + }; + + Ok(Self { receipt, number }) + } +} + +#[cfg(test)] +pub(super) mod test { + use reth_primitives::{alloy_primitives::LogData, hex}; + + use super::*; + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_1: &[u8] = &hex!("f9030ff9030c8080018303183db9010000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000f90197f89b948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff863a00109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2da000000000000000000000000000000000000000000000000000000000618d8837f89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0e3ebf0a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d80f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007edc6ca0bb6834800080a05e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a9400000000000000000000000000000000000000008303183da0bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e8754530180018212c2821c2383312e35"); + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_2: &[u8] = &hex!("f90271f9026e8080018301c60db9010000080000000200000000000000000008000000000000000000000100008000000000000000000000000000000000000000000000000000000000400000000000100000000000000000000000020000000000000000000000000000000000004000000000000000000000000000000000400000000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000100000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000008400000000000000000010000000000000000020000000020000000000000000000000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0ea0e40a00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b24080f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007eda7867e0c7d4800080a0af6ed8a6864d44989adc47c84f6fe0aeb1819817505c42cde6cbbcd5e14dd3179400000000000000000000000000000000000000008301c60da045fd6ce41bb8ebb2bccdaa92dd1619e287704cb07722039901a7eba63dea1d130280018212c2821c2383312e35"); + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_3: &[u8] = &hex!("f90271f9026e8080018301c60db9010000000000000000000000000000000000000000400000000000000000008000000000000000000000000000000000004000000000000000000000400004000000100000000000000000000000000000000000000000000000000000000000004000000000000000000000040000000000400080000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000008100000000000000000000000000000000000004000000000000000000000000008000000000000000000010000000000000000000000000000400000000000000001000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d101e54ba00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a9980f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007ed8842f062774800080a08fab01dcec1da547e90a77597999e9153ff788fa6451d1cc942064427bd995019400000000000000000000000000000000000000008301c60da0da4509fe0ca03202ddbe4f68692c132d689ee098433691040ece18c3a45d44c50380018212c2821c2383312e35"); + + fn hack_receipt_1() -> HackReceipt { + let receipt = receipt_block_1(); + + HackReceipt { + tx_type: receipt.receipt.tx_type as u8, + post_state: Bytes::default(), + status: receipt.receipt.success as u64, + cumulative_gas_used: receipt.receipt.cumulative_gas_used, + bloom: Bloom::from(hex!("00000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000")), + logs: receipt.receipt.logs, + tx_hash: B256::from(hex!("5e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a")), contract_address: Address::from(hex!("0000000000000000000000000000000000000000")), gas_used: 202813, + block_hash: B256::from(hex!("bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e875453")), + block_number: receipt.number, + transaction_index: 0, + l1_gas_price: 1, + l1_gas_used: 4802, + l1_fee: 7203, + fee_scalar: String::from("1.5") + } + } + + pub(crate) fn receipt_block_1() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d" + )), + ], + Bytes::from(hex!( + "00000000000000000000000000000000000000000000000000000000618d8837" + )), + ) + .unwrap(), + }; + + let log_2 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000000000000000d0e3ebf0" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_3 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000007edc6ca0bb68348000" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let mut receipt = Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 202813, + ..Default::default() + }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope + receipt.logs = vec![log_1, log_2, log_3]; + + ReceiptWithBlockNumber { receipt, number: 1 } + } + + pub(crate) fn receipt_block_2() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000000000000000d0ea0e40" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b240" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_2 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000007eda7867e0c7d48000" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let mut receipt = Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 116237, + ..Default::default() + }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope + receipt.logs = vec![log_1, log_2]; + + ReceiptWithBlockNumber { receipt, number: 2 } + } + + pub(crate) fn receipt_block_3() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000000000000000d101e54b" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a99" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_2 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000007ed8842f0627748000" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let mut receipt = Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 116237, + ..Default::default() + }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope + receipt.logs = vec![log_1, log_2]; + + ReceiptWithBlockNumber { receipt, number: 3 } + } + + #[test] + fn decode_hack_receipt() { + let receipt = hack_receipt_1(); + + let decoded = HackReceiptContainer::decode(&mut &HACK_RECEIPT_ENCODED_BLOCK_1[..]) + .unwrap() + .0 + .unwrap(); + + assert_eq!(receipt, decoded); + } + + #[test] + #[allow(clippy::needless_update)] + fn receipts_codec() { + // rig + + let mut receipt_1_to_3 = HACK_RECEIPT_ENCODED_BLOCK_1.to_vec(); + receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2); + receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3); + + let encoded = &mut BytesMut::from(&receipt_1_to_3[..]); + + let mut codec = HackReceiptFileCodec; + + // test + + let first_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_1(), first_decoded_receipt); + + let second_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_2(), second_decoded_receipt); + + let third_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_3(), third_decoded_receipt); + } +} diff --git a/crates/net/downloaders/src/lib.rs b/crates/net/downloaders/src/lib.rs index 37c4a95e3fd19..81e669d881681 100644 --- a/crates/net/downloaders/src/lib.rs +++ b/crates/net/downloaders/src/lib.rs @@ -27,10 +27,29 @@ pub mod metrics; /// efficiently buffering headers and bodies for retrieval. pub mod file_client; +/// Module managing file-based data retrieval and buffering of receipts. +/// +/// Contains [ReceiptFileClient](receipt_file_client::ReceiptFileClient) to read receipt data from +/// files, efficiently buffering receipts for retrieval. +/// +/// Currently configured to use codec [`HackReceipt`](file_codec_ovm_receipt::HackReceipt) based on +/// export of below Bedrock data using . Codec can +/// be replaced with regular encoding of receipts for export. +/// +/// NOTE: receipts can be exported using regular op-geth encoding for `Receipt` type, to fit +/// reth's needs for importing. However, this would require patching the diff in to export the `Receipt` and not `HackReceipt` type (originally +/// made for op-erigon's import needs). +pub mod receipt_file_client; + /// Module with a codec for reading and encoding block bodies in files. /// /// Enables decoding and encoding `Block` types within file contexts. pub mod file_codec; +/// Module with a codec for reading and encoding receipts in files. +/// +/// Enables decoding and encoding `HackReceipt` type. See . +pub mod file_codec_ovm_receipt; + #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs new file mode 100644 index 0000000000000..b6291d0a3dc48 --- /dev/null +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -0,0 +1,268 @@ +use futures::Future; +use reth_primitives::{Receipt, Receipts}; +use tokio::io::AsyncReadExt; +use tokio_stream::StreamExt; +use tokio_util::codec::FramedRead; +use tracing::trace; + +use crate::{ + file_client::{FileClientError, FromReader}, + file_codec_ovm_receipt::HackReceiptFileCodec, +}; + +/// File client for reading RLP encoded receipts from file. Receipts in file must be in sequential +/// order w.r.t. block number. +#[derive(Debug)] +pub struct ReceiptFileClient { + /// The buffered receipts, read from file, as nested lists. One list per block number. + pub receipts: Receipts, + /// First (lowest) block number read from file. + pub first_block: u64, + /// Total number of receipts. Count of elements in [`Receipts`] flattened. + pub total_receipts: usize, +} + +impl FromReader for ReceiptFileClient { + type Error = FileClientError; + + /// Initialize the [`ReceiptFileClient`] from bytes that have been read from file. Caution! If + /// first block has no transactions, it's assumed to be the genesis block. + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + B: AsyncReadExt + Unpin, + { + let mut receipts = Receipts::new(); + + // use with_capacity to make sure the internal buffer contains the entire chunk + let mut stream = + FramedRead::with_capacity(reader, HackReceiptFileCodec, num_bytes as usize); + + trace!(target: "downloaders::file", + target_num_bytes=num_bytes, + capacity=stream.read_buffer().capacity(), + coded=?HackReceiptFileCodec, + "init decode stream" + ); + + let mut remaining_bytes = vec![]; + + let mut log_interval = 0; + let mut log_interval_start_block = 0; + + let mut block_number = 0; + let mut total_receipts = 0; + let mut receipts_for_block = vec![]; + let mut first_block = None; + + async move { + while let Some(receipt_res) = stream.next().await { + let receipt = match receipt_res { + Ok(receipt) => receipt, + Err(FileClientError::Rlp(err, bytes)) => { + trace!(target: "downloaders::file", + %err, + bytes_len=bytes.len(), + "partial receipt returned from decoding chunk" + ); + + remaining_bytes = bytes; + + break + } + Err(err) => return Err(err), + }; + + total_receipts += 1; + + match receipt { + Some(ReceiptWithBlockNumber { receipt, number }) => { + if first_block.is_none() { + first_block = Some(number); + block_number = number; + } + + if block_number == number { + receipts_for_block.push(Some(receipt)); + } else { + receipts.push(receipts_for_block); + + // next block + block_number = number; + receipts_for_block = vec![Some(receipt)]; + } + } + None => { + match first_block { + Some(num) => { + // if there was a block number before this, push receipts for that + // block + receipts.push(receipts_for_block); + // block with no txns + block_number = num + receipts.len() as u64; + } + None => { + // this is the first block and it's empty, assume it's the genesis + // block + first_block = Some(0); + block_number = 0; + } + } + + receipts_for_block = vec![]; + } + } + + if log_interval == 0 { + trace!(target: "downloaders::file", + block_number, + total_receipts, + "read first receipt" + ); + log_interval_start_block = block_number; + } else if log_interval % 100_000 == 0 { + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + total_receipts, + "read receipts from file" + ); + log_interval_start_block = block_number + 1; + } + log_interval += 1; + } + + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + total_receipts, + "read receipts from file" + ); + + // we need to push the last receipts + receipts.push(receipts_for_block); + + trace!(target: "downloaders::file", + blocks = receipts.len(), + total_receipts, + "Initialized receipt file client" + ); + + Ok(( + Self { receipts, first_block: first_block.unwrap_or_default(), total_receipts }, + remaining_bytes, + )) + } + } +} + +/// [`Receipt`] with block number. +#[derive(Debug, PartialEq, Eq)] +pub struct ReceiptWithBlockNumber { + /// Receipt. + pub receipt: Receipt, + /// Block number. + pub number: u64, +} + +#[cfg(test)] +mod test { + use reth_primitives::hex; + use reth_tracing::init_test_tracing; + + use crate::file_codec_ovm_receipt::test::{ + receipt_block_1 as op_mainnet_receipt_block_1, + receipt_block_2 as op_mainnet_receipt_block_2, + receipt_block_3 as op_mainnet_receipt_block_3, + HACK_RECEIPT_ENCODED_BLOCK_1 as HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET, + HACK_RECEIPT_ENCODED_BLOCK_2 as HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET, + HACK_RECEIPT_ENCODED_BLOCK_3 as HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET, + }; + + use super::*; + + /// No receipts for genesis block + const HACK_RECEIPT_BLOCK_NO_TRANSACTIONS: &[u8] = &hex!("c0"); + + #[tokio::test] + async fn receipt_file_client_ovm_codec() { + init_test_tracing(); + + // genesis block has no hack receipts + let mut encoded_receipts = HACK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec(); + // one receipt each for block 1 and 2 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET); + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET); + // no receipt for block 4 + encoded_receipts.extend_from_slice(HACK_RECEIPT_BLOCK_NO_TRANSACTIONS); + + let encoded_byte_len = encoded_receipts.len() as u64; + let reader = &mut &encoded_receipts[..]; + + let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = + ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + + assert_eq!(4, total_receipts); + assert_eq!(0, first_block); + assert!(receipts[0].is_empty()); + assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_2().receipt, receipts[2][0].clone().unwrap()); + assert!(receipts[3].is_empty()); + } + + #[tokio::test] + async fn no_receipts_middle_block() { + init_test_tracing(); + + // genesis block has no hack receipts + let mut encoded_receipts = HACK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec(); + // one receipt each for block 1 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET); + // no receipt for block 2 + encoded_receipts.extend_from_slice(HACK_RECEIPT_BLOCK_NO_TRANSACTIONS); + // one receipt for block 3 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET); + + let encoded_byte_len = encoded_receipts.len() as u64; + let reader = &mut &encoded_receipts[..]; + + let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = + ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + + assert_eq!(4, total_receipts); + assert_eq!(0, first_block); + assert!(receipts[0].is_empty()); + assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); + assert!(receipts[2].is_empty()); + assert_eq!(op_mainnet_receipt_block_3().receipt, receipts[3][0].clone().unwrap()); + } + + #[tokio::test] + async fn two_receipts_same_block() { + init_test_tracing(); + + // genesis block has no hack receipts + let mut encoded_receipts = HACK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec(); + // one receipt each for block 1 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET); + // two receipts for block 2 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET); + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET); + // one receipt for block 3 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET); + + let encoded_byte_len = encoded_receipts.len() as u64; + let reader = &mut &encoded_receipts[..]; + + let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = + ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + + assert_eq!(5, total_receipts); + assert_eq!(0, first_block); + assert!(receipts[0].is_empty()); + assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_2().receipt, receipts[2][0].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_2().receipt, receipts[2][1].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_3().receipt, receipts[3][0].clone().unwrap()); + } +} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index f4be57f9c65aa..8d31358d9019e 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -66,8 +66,6 @@ revm-primitives = { workspace = true, features = ["arbitrary"] } nybbles = { workspace = true, features = ["arbitrary"] } alloy-trie = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } - -arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true proptest-derive.workspace = true @@ -109,7 +107,6 @@ zstd-codec = ["dep:zstd"] clap = ["dep:clap"] optimism = [ "reth-codecs/optimism", - "revm-primitives/optimism", "reth-ethereum-forks/optimism", "revm/optimism", ] diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 838edd620c783..2b146245efb82 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -21,6 +21,7 @@ pub mod providers; pub use providers::{ DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, HistoricalStateProvider, HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ProviderFactory, + StaticFileWriter, }; #[cfg(any(test, feature = "test-utils"))] From a7d8da5a27b8cfba128236afb3cf949012315cc1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 8 May 2024 12:04:56 +0200 Subject: [PATCH 506/700] feat(grafana): State & History panel (#8144) --- etc/grafana/dashboards/reth-state-growth.json | 1735 +++++++++++++++++ 1 file changed, 1735 insertions(+) create mode 100644 etc/grafana/dashboards/reth-state-growth.json diff --git a/etc/grafana/dashboards/reth-state-growth.json b/etc/grafana/dashboards/reth-state-growth.json new file mode 100644 index 0000000000000..f6a7bbf844b01 --- /dev/null +++ b/etc/grafana/dashboards/reth-state-growth.json @@ -0,0 +1,1735 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.1.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Ethereum state growth", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 0 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 0 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 0 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 0 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 0 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 0 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 7, + "panels": [], + "title": "State", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 4 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "Account", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Storage", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Bytecodes", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"}[$interval])) + avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"}[$interval])) + avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Total", + "range": true, + "refId": "D" + } + ], + "title": "State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 4 + }, + "id": 13, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"})", + "instant": false, + "interval": "$interval", + "legendFormat": "Account", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Storage", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Bytecodes", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=~\"PlainAccountState|PlainStorageState|Bytecodes\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Total", + "range": true, + "refId": "D" + } + ], + "title": "State Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Account State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 14 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Storage State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Bytecodes Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 8, + "panels": [], + "title": "History", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 12, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Headers\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"headers\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "Headers", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Receipts\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"receipts\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Receipts", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Transactions\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"transactions\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Transactions", + "range": true, + "refId": "C" + } + ], + "title": "History Growth (interval = ${interval})", + "transformations": [ + { + "id": "calculateField", + "options": { + "binary": { + "left": "Headers", + "reducer": "sum", + "right": "Receipts" + }, + "mode": "reduceRow", + "reduce": { + "include": [ + "Headers", + "Receipts", + "Transactions" + ], + "reducer": "sum" + } + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 14, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Headers\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\", segment=\"headers\"})", + "instant": false, + "interval": "$interval", + "legendFormat": "Headers", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Receipts\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\", segment=\"receipts\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Receipts", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Transactions\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\", segment=\"transactions\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Transactions", + "range": true, + "refId": "C" + } + ], + "title": "History Size", + "transformations": [ + { + "id": "calculateField", + "options": { + "mode": "reduceRow", + "reduce": { + "include": [ + "Headers", + "Receipts", + "Transactions" + ], + "reducer": "sum" + } + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 45 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Headers\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"headers\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Headers Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 45 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Receipts\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"receipts\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Receipts Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 55 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Transactions\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"transactions\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions Growth (interval = ${interval})", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "10m", + "value": "10m" + }, + "hide": 0, + "includeAll": false, + "label": "Interval", + "multi": false, + "name": "interval", + "options": [ + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": true, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "5m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Reth - State & History", + "uid": "cab0fcc6-1c33-478c-9675-38bc1af5de82", + "version": 1, + "weekStart": "" + } \ No newline at end of file From 8954ffb42333a0966d509ae470558a5d96396dbc Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 8 May 2024 12:37:17 +0200 Subject: [PATCH 507/700] chore(grafana): dashboard names like public (#8148) --- etc/grafana/dashboards/overview.json | 2 +- etc/grafana/dashboards/reth-discovery.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 40c120e0fbcbf..eacc3a25c8e54 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -8511,7 +8511,7 @@ }, "timepicker": {}, "timezone": "", - "title": "reth", + "title": "Reth", "uid": "2k8BXz24x", "version": 1, "weekStart": "" diff --git a/etc/grafana/dashboards/reth-discovery.json b/etc/grafana/dashboards/reth-discovery.json index 787913e651003..037d6b3bf4970 100644 --- a/etc/grafana/dashboards/reth-discovery.json +++ b/etc/grafana/dashboards/reth-discovery.json @@ -1124,7 +1124,7 @@ }, "timepicker": {}, "timezone": "", - "title": "reth - discovery", + "title": "Reth - Peer Discovery", "uid": "de6e87b2-7630-40b2-b2c4-a500476e799d", "version": 1, "weekStart": "" From 6a6fe4cec229f646ddbf2e1b1c0a46ec5aa02d84 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 8 May 2024 13:46:56 +0200 Subject: [PATCH 508/700] fix(discv5): update metrics wrt unverifiable enrs (#8149) Co-authored-by: Alexey Shekhirin --- crates/net/discv5/src/lib.rs | 2 + crates/net/discv5/src/metrics.rs | 38 +++++--- etc/grafana/dashboards/reth-discovery.json | 91 ++++++++++++++++--- etc/grafana/dashboards/reth-state-growth.json | 22 +++++ 4 files changed, 127 insertions(+), 26 deletions(-) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 826556fb07f40..ffa3c9caf4807 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -273,6 +273,8 @@ impl Discv5 { "discovered unverifiable enr, source socket doesn't match socket advertised in ENR" ); + self.metrics.discovered_peers.increment_unverifiable_enrs_raw_total(1); + self.on_discovered_peer(&enr, socket) } _ => None diff --git a/crates/net/discv5/src/metrics.rs b/crates/net/discv5/src/metrics.rs index 7bd3572f7aed5..d58ed66e08fc7 100644 --- a/crates/net/discv5/src/metrics.rs +++ b/crates/net/discv5/src/metrics.rs @@ -21,7 +21,7 @@ pub struct DiscoveredPeersMetrics { // Kbuckets //////////////////////////////////////////////////////////////////////////////////////////////// /// Total peers currently in [`discv5::Discv5`]'s kbuckets. - total_kbucket_peers_raw: Gauge, + kbucket_peers_raw_total: Gauge, /// Total discovered peers that are inserted into [`discv5::Discv5`]'s kbuckets. /// /// This is a subset of the total established sessions, in which all peers advertise a udp @@ -29,58 +29,72 @@ pub struct DiscoveredPeersMetrics { /// it into [`discv5::Discv5`]'s kbuckets and will hence be included in queries. /// /// Note: the definition of 'discovered' is not exactly synonymous in `reth_discv4::Discv4`. - total_inserted_kbucket_peers_raw: Counter, + inserted_kbucket_peers_raw_total: Counter, //////////////////////////////////////////////////////////////////////////////////////////////// // Sessions //////////////////////////////////////////////////////////////////////////////////////////////// /// Total peers currently connected to [`discv5::Discv5`]. - total_sessions_raw: Gauge, + sessions_raw_total: Gauge, /// Total number of sessions established by [`discv5::Discv5`]. - total_established_sessions_raw: Counter, + established_sessions_raw_total: Counter, /// Total number of sessions established by [`discv5::Discv5`], with peers that don't advertise /// a socket which is reachable from the local node in their node record. /// /// These peers can't make it into [`discv5::Discv5`]'s kbuckets, and hence won't be part of /// queries (neither shared with peers in NODES responses, nor queried for peers with FINDNODE /// requests). - total_established_sessions_unreachable_enr: Counter, + established_sessions_unreachable_enr_total: Counter, /// Total number of sessions established by [`discv5::Discv5`], that pass configured /// [`filter`](crate::filter) rules. - total_established_sessions_custom_filtered: Counter, + established_sessions_custom_filtered_total: Counter, + /// Total number of unverifiable ENRs discovered by [`discv5::Discv5`]. + /// + /// These are peers that fail [`discv5::Discv5`] session establishment, because the UDP socket + /// they're making a connection from doesn't match the UDP socket advertised in their ENR. + /// These peers will be denied a session (and hence can't make it into kbuckets) until they + /// have update their ENR, to reflect their actual UDP socket. + unverifiable_enrs_raw_total: Counter, } impl DiscoveredPeersMetrics { /// Sets current total number of peers in [`discv5::Discv5`]'s kbuckets. pub fn set_total_kbucket_peers(&self, num: usize) { - self.total_kbucket_peers_raw.set(num as f64) + self.kbucket_peers_raw_total.set(num as f64) } /// Increments the number of kbucket insertions in [`discv5::Discv5`]. pub fn increment_kbucket_insertions(&self, num: u64) { - self.total_inserted_kbucket_peers_raw.increment(num) + self.inserted_kbucket_peers_raw_total.increment(num) } /// Sets current total number of peers connected to [`discv5::Discv5`]. pub fn set_total_sessions(&self, num: usize) { - self.total_sessions_raw.set(num as f64) + self.sessions_raw_total.set(num as f64) } /// Increments number of sessions established by [`discv5::Discv5`]. pub fn increment_established_sessions_raw(&self, num: u64) { - self.total_established_sessions_raw.increment(num) + self.established_sessions_raw_total.increment(num) } /// Increments number of sessions established by [`discv5::Discv5`], with peers that don't have /// a reachable node record. pub fn increment_established_sessions_unreachable_enr(&self, num: u64) { - self.total_established_sessions_unreachable_enr.increment(num) + self.established_sessions_unreachable_enr_total.increment(num) } /// Increments number of sessions established by [`discv5::Discv5`], that pass configured /// [`filter`](crate::filter) rules. pub fn increment_established_sessions_filtered(&self, num: u64) { - self.total_established_sessions_custom_filtered.increment(num) + self.established_sessions_custom_filtered_total.increment(num) + } + + /// Increments number of unverifiable ENRs discovered by [`discv5::Discv5`]. These are peers + /// that fail session establishment because their advertised UDP socket doesn't match the + /// socket they are making the connection from. + pub fn increment_unverifiable_enrs_raw_total(&self, num: u64) { + self.unverifiable_enrs_raw_total.increment(num) } } diff --git a/etc/grafana/dashboards/reth-discovery.json b/etc/grafana/dashboards/reth-discovery.json index 037d6b3bf4970..4a1ef344c18ef 100644 --- a/etc/grafana/dashboards/reth-discovery.json +++ b/etc/grafana/dashboards/reth-discovery.json @@ -52,6 +52,7 @@ } ] }, + "description": "Devp2p peer discovery protocols", "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, @@ -578,7 +579,7 @@ { "id": "color", "value": { - "fixedColor": "purple", + "fixedColor": "#9b73d6", "mode": "fixed" } } @@ -628,7 +629,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_discv5_total_kbucket_peers_raw{instance=\"$instance\"}", + "expr": "reth_discv5_kbucket_peers_raw_total{instance=\"$instance\"}", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -644,7 +645,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_discv5_total_sessions_raw{instance=\"$instance\"}", + "expr": "reth_discv5_sessions_raw_total{instance=\"$instance\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -663,7 +664,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\n", + "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\nSessions will succeed to peers that advertise no UDP socket in their ENR. This allows peers to discover their reachable socket. On the other hand, for DoS protection, peers that advertise a different socket than the socket from which they make the connection, are denied a sigp/discv5 session. These peers have an unverifiable ENR. The peers are passed to RLPx nonetheless (some EL implementations of discv5 are more lax about ENR and source socket matching). ", "fieldConfig": { "defaults": { "color": { @@ -728,7 +729,7 @@ { "id": "color", "value": { - "fixedColor": "purple", + "fixedColor": "light-green", "mode": "fixed" } } @@ -743,7 +744,7 @@ { "id": "color", "value": { - "fixedColor": "super-light-red", + "fixedColor": "#9958f4", "mode": "fixed" } } @@ -778,6 +779,36 @@ } } ] + }, + { + "matcher": { + "id": "byName", + "options": "Session Establishment Failed (unverifiable ENR)" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8ab8ff", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Failed Session Establishments (unverifiable ENR)" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8ab8ff", + "mode": "fixed" + } + } + ] } ] }, @@ -808,7 +839,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_discv5_total_inserted_kbucket_peers_raw{instance=\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_discv5_inserted_kbucket_peers_raw_total{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": false, "instant": false, @@ -824,7 +855,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_discv5_established_sessions_raw_total{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, @@ -841,7 +872,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_unreachable_enr{instance=\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_discv5_established_sessions_unreachable_enr_total{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, @@ -858,7 +889,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_total_established_sessions_custom_filtered{instance=\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_discv5_established_sessions_raw_total{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_established_sessions_custom_filtered_total{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, @@ -867,6 +898,23 @@ "range": true, "refId": "D", "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_unverifiable_enrs_raw_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Session Establishments (unverifiable ENR)", + "range": true, + "refId": "E", + "useBackend": false } ], "title": "Peer Churn", @@ -942,7 +990,7 @@ { "id": "color", "value": { - "fixedColor": "purple", + "fixedColor": "#b677d9", "mode": "fixed" } } @@ -957,7 +1005,7 @@ { "id": "color", "value": { - "fixedColor": "super-light-red", + "fixedColor": "light-green", "mode": "fixed" } } @@ -977,6 +1025,21 @@ } } ] + }, + { + "matcher": { + "id": "byName", + "options": "OP EL" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8AB8FF", + "mode": "fixed" + } + } + ] } ] }, @@ -1074,7 +1137,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval]))", + "expr": "(rate(reth_discv5_established_sessions_raw_total{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_unverifiable_enrs_raw_total{instance=\"$instance\"}[$__rate_interval])) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval]))", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, @@ -1125,7 +1188,7 @@ "timepicker": {}, "timezone": "", "title": "Reth - Peer Discovery", - "uid": "de6e87b2-7630-40b2-b2c4-a500476e799d", + "uid": "fd2d69b5-ca32-45d0-946e-c00ddcd7052c", "version": 1, "weekStart": "" } \ No newline at end of file diff --git a/etc/grafana/dashboards/reth-state-growth.json b/etc/grafana/dashboards/reth-state-growth.json index f6a7bbf844b01..35077706e69ca 100644 --- a/etc/grafana/dashboards/reth-state-growth.json +++ b/etc/grafana/dashboards/reth-state-growth.json @@ -1652,6 +1652,28 @@ "tags": [], "templating": { "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "query_result(reth_info)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "query_result(reth_info)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "/.*instance=\\\"([^\\\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, { "current": { "selected": true, From ad00e83e6240334e90f9f0fb84a8dca0c6d5be09 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 May 2024 13:56:28 +0200 Subject: [PATCH 509/700] chore: move dashmap to workspace dep (#8153) --- Cargo.toml | 1 + crates/storage/libmdbx-rs/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dca4a1eb8530c..b4500979534bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -311,6 +311,7 @@ aquamarine = "0.5" bytes = "1.5" bitflags = "2.4" clap = "4" +dashmap = "5.5" derive_more = "0.99.17" fdlimit = "0.3.0" eyre = "0.6" diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 2330b6f79e474..2042cd896cc51 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -22,7 +22,7 @@ indexmap = "2" libc = "0.2" parking_lot.workspace = true thiserror.workspace = true -dashmap = { version = "5.5.3", features = ["inline"], optional = true } +dashmap = { workspace = true, features = ["inline"], optional = true } tracing.workspace = true ffi = { package = "reth-mdbx-sys", path = "./mdbx-sys" } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 526cbdaa5a93f..672f6a7fc4894 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -40,7 +40,7 @@ auto_impl.workspace = true itertools.workspace = true pin-project.workspace = true parking_lot.workspace = true -dashmap = { version = "5.5", features = ["inline"] } +dashmap = { workspace = true, features = ["inline"] } strum.workspace = true # test-utils From 04d2c10c46ecccb870752b56238dfe7cf310f0fd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 May 2024 15:18:40 +0200 Subject: [PATCH 510/700] chore: rm outdated executor types (#8157) --- crates/consensus/beacon/src/engine/sync.rs | 7 +- .../provider/src/test_utils/executor.rs | 71 ------------------- crates/storage/provider/src/test_utils/mod.rs | 2 - .../storage/provider/src/traits/executor.rs | 68 ------------------ crates/storage/provider/src/traits/mod.rs | 3 - 5 files changed, 2 insertions(+), 149 deletions(-) delete mode 100644 crates/storage/provider/src/test_utils/executor.rs delete mode 100644 crates/storage/provider/src/traits/executor.rs diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 261b6874fd084..f73c4b54edb04 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -440,8 +440,8 @@ mod tests { Header, PruneModes, SealedHeader, MAINNET, }; use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, - BundleStateWithReceipts, StaticFileProviderFactory, + test_utils::create_test_provider_factory_with_chain_spec, BundleStateWithReceipts, + StaticFileProviderFactory, }; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_static_file::StaticFileProducer; @@ -492,9 +492,6 @@ mod tests { fn build(self, chain_spec: Arc) -> Pipeline>> { reth_tracing::init_test_tracing(); - let executor_factory = TestExecutorFactory::default(); - executor_factory.extend(self.executor_results); - // Setup pipeline let (tip_tx, _tip_rx) = watch::channel(B256::default()); let mut pipeline = Pipeline::builder() diff --git a/crates/storage/provider/src/test_utils/executor.rs b/crates/storage/provider/src/test_utils/executor.rs deleted file mode 100644 index 8ac963e937f0b..0000000000000 --- a/crates/storage/provider/src/test_utils/executor.rs +++ /dev/null @@ -1,71 +0,0 @@ -use crate::{ - bundle_state::BundleStateWithReceipts, BlockExecutor, ExecutorFactory, PrunableBlockExecutor, - StateProvider, -}; -use parking_lot::Mutex; -use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, U256}; -use std::sync::Arc; -/// Test executor with mocked result. -#[derive(Debug)] -pub struct TestExecutor(pub Option); - -impl BlockExecutor for TestExecutor { - type Error = BlockExecutionError; - - fn execute_and_verify_receipt( - &mut self, - _block: &BlockWithSenders, - _total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - if self.0.is_none() { - return Err(BlockExecutionError::UnavailableForTest) - } - Ok(()) - } - - fn execute_transactions( - &mut self, - _block: &BlockWithSenders, - _total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - Err(BlockExecutionError::UnavailableForTest) - } - - fn take_output_state(&mut self) -> BundleStateWithReceipts { - self.0.clone().unwrap_or_default() - } - - fn size_hint(&self) -> Option { - None - } -} - -impl PrunableBlockExecutor for TestExecutor { - fn set_tip(&mut self, _tip: BlockNumber) {} - - fn set_prune_modes(&mut self, _prune_modes: PruneModes) {} -} - -/// Executor factory with pre-set execution results. -#[derive(Clone, Debug, Default)] -pub struct TestExecutorFactory { - exec_results: Arc>>, -} - -impl TestExecutorFactory { - /// Extend the mocked execution results - pub fn extend(&self, results: Vec) { - self.exec_results.lock().extend(results); - } -} - -impl ExecutorFactory for TestExecutorFactory { - fn with_state<'a, SP: StateProvider + 'a>( - &'a self, - _sp: SP, - ) -> Box::Error> + 'a> { - let exec_res = self.exec_results.lock().pop(); - Box::new(TestExecutor(exec_res)) - } -} diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index f4a5626f6e9bc..2f5462309442f 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -8,12 +8,10 @@ use std::sync::Arc; pub mod blocks; mod events; -mod executor; mod mock; mod noop; pub use events::TestCanonStateSubscriptions; -pub use executor::{TestExecutor, TestExecutorFactory}; pub use mock::{ExtendedAccount, MockEthProvider}; pub use noop::NoopProvider; diff --git a/crates/storage/provider/src/traits/executor.rs b/crates/storage/provider/src/traits/executor.rs deleted file mode 100644 index f12d6416949f9..0000000000000 --- a/crates/storage/provider/src/traits/executor.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! Executor Factory - -use crate::{bundle_state::BundleStateWithReceipts, StateProvider}; -use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, U256}; - -/// A factory capable of creating an executor with the given state provider. -pub trait ExecutorFactory: Send + Sync + 'static { - /// Executor with [`StateProvider`] - fn with_state<'a, SP: StateProvider + 'a>( - &'a self, - sp: SP, - ) -> Box + 'a>; -} - -/// An executor capable of executing a block. -/// -/// This type is capable of executing (multiple) blocks by applying the state changes made by each -/// block. The final state of the executor can extracted using -/// [`Self::take_output_state`]. -pub trait BlockExecutor { - /// The error type returned by the executor. - type Error; - - /// Executes the entire block and verifies: - /// - receipts (receipts root) - /// - /// This will update the state of the executor with the changes made by the block. - fn execute_and_verify_receipt( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), Self::Error>; - - /// Runs the provided transactions and commits their state to the run-time database. - /// - /// The returned [BundleStateWithReceipts] can be used to persist the changes to disk, and - /// contains the changes made by each transaction. - /// - /// The changes in [BundleStateWithReceipts] have a transition ID associated with them: there is - /// one transition ID for each transaction (with the first executed tx having transition ID - /// 0, and so on). - /// - /// The second returned value represents the total gas used by this block of transactions. - /// - /// See [execute_and_verify_receipt](BlockExecutor::execute_and_verify_receipt) for more - /// details. - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error>; - - /// Return bundle state. This is output of executed blocks. - fn take_output_state(&mut self) -> BundleStateWithReceipts; - - /// Returns the size hint of current in-memory changes. - fn size_hint(&self) -> Option; -} - -/// A [BlockExecutor] capable of in-memory pruning of the data that will be written to the database. -pub trait PrunableBlockExecutor: BlockExecutor { - /// Set tip - highest known block number. - fn set_tip(&mut self, tip: BlockNumber); - - /// Set prune modes. - fn set_prune_modes(&mut self, prune_modes: PruneModes); -} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 6d78cf5834855..9aa96bccf0a25 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -48,9 +48,6 @@ pub use transactions::{TransactionsProvider, TransactionsProviderExt}; mod withdrawals; pub use withdrawals::WithdrawalsProvider; -mod executor; -pub use executor::{BlockExecutor, ExecutorFactory, PrunableBlockExecutor}; - mod chain; pub use chain::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, From 72e5122e73ef981a1cd95d97aac1f1cf7de1f691 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 8 May 2024 15:21:16 +0200 Subject: [PATCH 511/700] fix: prevents potential arithmetic underflow (#8156) --- crates/blockchain-tree/src/chain.rs | 3 ++- crates/interfaces/src/blockchain_tree/error.rs | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 637ea52e7e890..db4b4627abe7e 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -119,7 +119,8 @@ impl AppendableChain { DB: Database + Clone, E: BlockExecutorProvider, { - let parent_number = block.number - 1; + let parent_number = + block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; let parent = self.blocks().get(&parent_number).ok_or( BlockchainTreeError::BlockNumberNotFoundInChain { block_number: parent_number }, )?; diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index e9cdb8714d89d..a98d765014bae 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -47,6 +47,9 @@ pub enum BlockchainTreeError { /// The block hash of the block that failed to buffer. block_hash: BlockHash, }, + /// Thrown when trying to access genesis parent. + #[error("genesis block has no parent")] + GenesisBlockHasNoParent, } /// Canonical Errors @@ -318,7 +321,8 @@ impl InsertBlockErrorKind { BlockchainTreeError::CanonicalChain { .. } | BlockchainTreeError::BlockNumberNotFoundInChain { .. } | BlockchainTreeError::BlockHashNotFoundInChain { .. } | - BlockchainTreeError::BlockBufferingFailed { .. } => false, + BlockchainTreeError::BlockBufferingFailed { .. } | + BlockchainTreeError::GenesisBlockHasNoParent => false, } } InsertBlockErrorKind::Provider(_) | InsertBlockErrorKind::Internal(_) => { From 18b7edb1910f211cf09dcbe9ad8a687ba69b3da5 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 8 May 2024 14:42:14 +0100 Subject: [PATCH 512/700] fix(examples): ExEx rollup reverts (#8151) --- examples/exex/rollup/src/db.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/examples/exex/rollup/src/db.rs b/examples/exex/rollup/src/db.rs index 39c2b418b81e9..201272a0000b6 100644 --- a/examples/exex/rollup/src/db.rs +++ b/examples/exex/rollup/src/db.rs @@ -114,13 +114,13 @@ impl Database { if reverts.accounts.len() > 1 { eyre::bail!("too many blocks in account reverts"); } - for (address, account) in - reverts.accounts.first().ok_or(eyre::eyre!("no account reverts"))? - { - tx.execute( - "INSERT INTO account_revert (block_number, address, data) VALUES (?, ?, ?) ON CONFLICT(block_number, address) DO UPDATE SET data = excluded.data", - (block.header.number.to_string(), address.to_string(), serde_json::to_string(account)?), - )?; + if let Some(account_reverts) = reverts.accounts.into_iter().next() { + for (address, account) in account_reverts { + tx.execute( + "INSERT INTO account_revert (block_number, address, data) VALUES (?, ?, ?) ON CONFLICT(block_number, address) DO UPDATE SET data = excluded.data", + (block.header.number.to_string(), address.to_string(), serde_json::to_string(&account)?), + )?; + } } for PlainStorageChangeset { address, wipe_storage, storage } in changeset.storage { @@ -139,19 +139,19 @@ impl Database { if reverts.storage.len() > 1 { eyre::bail!("too many blocks in storage reverts"); } - for PlainStorageRevert { address, wiped, storage_revert } in - reverts.storage.into_iter().next().ok_or(eyre::eyre!("no storage reverts"))? - { - let storage = storage_revert - .into_iter() - .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) - .collect::>(); - let wiped_storage = if wiped { get_storages(&tx, address)? } else { Vec::new() }; - for (key, data) in StorageRevertsIter::new(storage, wiped_storage) { - tx.execute( + if let Some(storage_reverts) = reverts.storage.into_iter().next() { + for PlainStorageRevert { address, wiped, storage_revert } in storage_reverts { + let storage = storage_revert + .into_iter() + .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) + .collect::>(); + let wiped_storage = if wiped { get_storages(&tx, address)? } else { Vec::new() }; + for (key, data) in StorageRevertsIter::new(storage, wiped_storage) { + tx.execute( "INSERT INTO storage_revert (block_number, address, key, data) VALUES (?, ?, ?, ?) ON CONFLICT(block_number, address, key) DO UPDATE SET data = excluded.data", (block.header.number.to_string(), address.to_string(), key.to_string(), data.to_string()), )?; + } } } From bdb8238d79c383033a68cbeef8809c37a481e303 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 May 2024 15:45:47 +0200 Subject: [PATCH 513/700] chore: remove type aliases (#8155) --- crates/revm/src/database.rs | 11 ++--------- crates/rpc/rpc/src/debug.rs | 4 ++-- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 36a7ec96f27ef..93a22a06834d5 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,19 +1,12 @@ -use reth_interfaces::RethError; use reth_primitives::{Address, B256, KECCAK_EMPTY, U256}; use reth_provider::{ProviderError, StateProvider}; use revm::{ - db::{CacheDB, DatabaseRef}, + db::DatabaseRef, primitives::{AccountInfo, Bytecode}, - Database, StateDBBox, + Database, }; use std::ops::{Deref, DerefMut}; -/// SubState of database. Uses revm internal cache with binding to reth StateProvider trait. -pub type SubState = CacheDB>; - -/// State boxed database with reth Error. -pub type RethStateDBBox<'a> = StateDBBox<'a, RethError>; - /// Wrapper around StateProvider that implements revm database trait #[derive(Debug, Clone)] pub struct StateProviderDatabase(pub DB); diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b21adf5205c98..ebc52877d848f 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -17,7 +17,7 @@ use reth_primitives::{ use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProviderBox, TransactionVariant, }; -use reth_revm::database::{StateProviderDatabase, SubState}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; use reth_rpc_types::{ trace::geth::{ @@ -517,7 +517,7 @@ where &self, opts: GethDebugTracingOptions, env: EnvWithHandlerCfg, - db: &mut SubState, + db: &mut CacheDB>, transaction_context: Option, ) -> EthResult<(GethTrace, revm_primitives::State)> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; From 331d1a0c6a7c9e768649ee051ae56963b946fa09 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 May 2024 10:26:48 -0400 Subject: [PATCH 514/700] feat(ci): add op-reth to release workflow (#8132) --- .github/workflows/docker.yml | 12 ++++++++-- .github/workflows/release.yml | 20 +++++++++------- DockerfileOp.cross | 15 ++++++++++++ Makefile | 44 +++++++++++++++++++++++++++++++++++ 4 files changed, 80 insertions(+), 11 deletions(-) create mode 100644 DockerfileOp.cross diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1fb89f7c1698b..20ae6644b909c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -11,8 +11,10 @@ on: env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth + OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth + OP_DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/op-reth DOCKER_USERNAME: ${{ github.actor }} jobs: @@ -36,9 +38,15 @@ jobs: run: | docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 docker buildx create --use --name cross-builder - - name: Build and push image, tag as "latest" + - name: Build and push reth image, tag as "latest" if: ${{ contains(github.event.ref, 'beta') }} run: make PROFILE=maxperf docker-build-push-latest - - name: Build and push image + - name: Build and push reth image if: ${{ ! contains(github.event.ref, 'beta') }} run: make PROFILE=maxperf docker-build-push + - name: Build and push op-reth image, tag as "latest" + if: ${{ contains(github.event.ref, 'beta') }} + run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest + - name: Build and push op-reth image + if: ${{ ! contains(github.event.ref, 'beta') }} + run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 55ce0843fb828..8562da807a715 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,6 +10,7 @@ on: env: REPO_NAME: ${{ github.repository_owner }}/reth + OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth IMAGE_NAME: ${{ github.repository_owner }}/reth CARGO_TERM_COLOR: always @@ -30,6 +31,7 @@ jobs: needs: extract-version strategy: matrix: + build: [{command: build, binary: reth}, {command: build-op, binary: op-reth}] include: - target: x86_64-unknown-linux-gnu os: ubuntu-20.04 @@ -63,12 +65,12 @@ jobs: echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-os-version)" >> $GITHUB_ENV - name: Build Reth - run: make PROFILE=${{ matrix.profile }} build-${{ matrix.target }} + run: make PROFILE=${{ matrix.profile }} ${{ matrix.build.command }}-${{ matrix.target }} - name: Move binary run: | mkdir artifacts [[ "${{ matrix.target }}" == *windows* ]] && ext=".exe" - mv "target/${{ matrix.target }}/${{ matrix.profile }}/reth${ext}" ./artifacts + mv "target/${{ matrix.target }}/${{ matrix.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - name: Configure GPG and create artifacts env: @@ -78,22 +80,22 @@ jobs: export GPG_TTY=$(tty) echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import cd artifacts - tar -czf reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz reth* - echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz ${{ matrix.build.binary }}* + echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz mv *tar.gz* .. shell: bash - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz - path: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz - name: Upload signature uses: actions/upload-artifact@v4 with: - name: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc - path: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc draft-release: name: draft release @@ -184,7 +186,7 @@ jobs: ENDBODY ) assets=() - for asset in ./reth-*.tar.gz*; do + for asset in ./*reth-*.tar.gz*; do assets+=("$asset/$asset") done tag_name="${{ env.VERSION }}" diff --git a/DockerfileOp.cross b/DockerfileOp.cross new file mode 100644 index 0000000000000..47606a828305d --- /dev/null +++ b/DockerfileOp.cross @@ -0,0 +1,15 @@ +# This image is meant to enable cross-architecture builds. +# It assumes the reth binary has already been compiled for `$TARGETPLATFORM` and is +# locatable in `./dist/bin/$TARGETARCH` +FROM --platform=$TARGETPLATFORM ubuntu:22.04 + +LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth +LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" + +# Filled by docker buildx +ARG TARGETARCH + +COPY ./dist/bin/$TARGETARCH/op-reth /usr/local/bin/op-reth + +EXPOSE 30303 30303/udp 9001 8545 8546 +ENTRYPOINT ["/usr/local/bin/op-reth"] diff --git a/Makefile b/Makefile index 5ac3bb4682759..f19a3cd8cd58d 100644 --- a/Makefile +++ b/Makefile @@ -227,6 +227,50 @@ define docker_build_push --push endef +##@ Optimism docker + +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --driver docker-container --name cross-builder` +.PHONY: op-docker-build-push +op-docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag. + $(call op_docker_build_push,$(GIT_TAG),$(GIT_TAG)) + +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --driver docker-container --name cross-builder` +.PHONY: op-docker-build-push-latest +op-docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`. + $(call op_docker_build_push,$(GIT_TAG),latest) + +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --name cross-builder` +.PHONY: op-docker-build-push-nightly +op-docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`. + $(call op_docker_build_push,$(GIT_TAG)-nightly,latest-nightly) + +# Create a cross-arch Docker image with the given tags and push it +define op_docker_build_push + $(MAKE) op-build-x86_64-unknown-linux-gnu + mkdir -p $(BIN_DIR)/amd64 + cp $(BUILD_PATH)/x86_64-unknown-linux-gnu/$(PROFILE)/op-reth $(BIN_DIR)/amd64/op-reth + + $(MAKE) op-build-aarch64-unknown-linux-gnu + mkdir -p $(BIN_DIR)/arm64 + cp $(BUILD_PATH)/aarch64-unknown-linux-gnu/$(PROFILE)/op-reth $(BIN_DIR)/arm64/op-reth + + docker buildx build --file ./DockerfileOp.cross . \ + --platform linux/amd64,linux/arm64 \ + --tag $(DOCKER_IMAGE_NAME):$(1) \ + --tag $(DOCKER_IMAGE_NAME):$(2) \ + --provenance=false \ + --push +endef + ##@ Other .PHONY: clean From d852f7f012c1811a3d1197fd9c5fc63be5ec8233 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 8 May 2024 16:37:45 +0200 Subject: [PATCH 515/700] chore(op): simplify op cli args (#8146) --- bin/reth/src/commands/import_op.rs | 48 ++++++------------------ bin/reth/src/commands/import_receipts.rs | 5 ++- 2 files changed, 15 insertions(+), 38 deletions(-) diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 8ca1baf5b91df..5362b45b08b9d 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -1,8 +1,9 @@ -//! Command that initializes the node by importing a chain from a file. +//! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a +//! file. use crate::{ args::{ - utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + utils::{genesis_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, }, commands::import::{build_import_pipeline, load_config}, @@ -20,7 +21,7 @@ use reth_downloaders::file_client::{ use reth_node_core::init::init_genesis; -use reth_primitives::{hex, stage::StageId, ChainSpec, PruneModes, TxHash}; +use reth_primitives::{hex, stage::StageId, PruneModes, TxHash}; use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -44,18 +45,6 @@ pub struct ImportOpCommand { #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] datadir: MaybePlatformPath, - /// The chain this node is running. - /// - /// Possible values are either a built-in chain or the path to a chain specification file. - #[arg( - long, - value_name = "CHAIN_OR_PATH", - long_help = chain_help(), - default_value = SUPPORTED_CHAINS[0], - value_parser = genesis_value_parser - )] - chain: Arc, - /// Chunk byte length. #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] chunk_len: Option, @@ -85,8 +74,10 @@ impl ImportOpCommand { "Chunking chain import" ); + let chain_spec = genesis_value_parser(SUPPORTED_CHAINS[0])?; + // add network name to data dir - let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + let data_dir = self.datadir.unwrap_or_chain_default(chain_spec.chain); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = load_config(config_path.clone())?; @@ -101,15 +92,16 @@ impl ImportOpCommand { info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; + ProviderFactory::new(db.clone(), chain_spec.clone(), data_dir.static_files())?; - debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); + debug!(target: "reth::cli", chain=%chain_spec.chain, genesis=?chain_spec.genesis_hash(), "Initializing genesis"); init_genesis(provider_factory.clone())?; - let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); info!(target: "reth::cli", "Consensus engine initialized"); // open file @@ -256,21 +248,3 @@ pub fn is_duplicate(tx_hash: TxHash, block_number: u64) -> bool { } false } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_common_import_command_chain_args() { - for chain in SUPPORTED_CHAINS { - let args: ImportOpCommand = - ImportOpCommand::parse_from(["reth", "--chain", chain, "."]); - assert_eq!( - Ok(args.chain.chain), - chain.parse::(), - "failed to parse chain {chain}" - ); - } - } -} diff --git a/bin/reth/src/commands/import_receipts.rs b/bin/reth/src/commands/import_receipts.rs index 8e06c3c03cb61..2686bcf4e53c7 100644 --- a/bin/reth/src/commands/import_receipts.rs +++ b/bin/reth/src/commands/import_receipts.rs @@ -55,7 +55,10 @@ pub struct ImportReceiptsCommand { #[command(flatten)] db: DatabaseArgs, - /// The path to a receipts file for import. + /// The path to a receipts file for import. File must use `HackReceiptCodec` (used for + /// exporting OP chain segment below Bedrock block via testinprod/op-geth). + /// + /// #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] path: PathBuf, } From db868208f300d1f9d794950d69965bcb0621117e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 May 2024 21:34:59 +0200 Subject: [PATCH 516/700] feat: introduce statewriter trait (#8160) --- .../commands/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/commands/debug_cmd/merkle.rs | 2 +- bin/reth/src/commands/import_receipts.rs | 2 +- crates/node-core/src/init.rs | 2 +- crates/stages/src/stages/execution.rs | 3 ++- .../bundle_state_with_receipts.rs | 12 ++++------- .../src/providers/database/provider.rs | 4 ++-- crates/storage/provider/src/traits/mod.rs | 2 +- crates/storage/provider/src/traits/state.rs | 21 ++++++++++++++++++- 9 files changed, 33 insertions(+), 17 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 008530c53100b..f51426015f0ea 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -23,7 +23,7 @@ use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec, Receipts use reth_provider::{ AccountExtReader, BundleStateWithReceipts, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, - StaticFileProviderFactory, StorageReader, + StateWriter, StaticFileProviderFactory, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_tasks::TaskExecutor; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 3d94a3a43a512..f452e2e52200e 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -24,7 +24,7 @@ use reth_network_api::NetworkInfo; use reth_primitives::{fs, stage::StageCheckpoint, BlockHashOrNumber, ChainSpec, PruneModes}; use reth_provider::{ BlockNumReader, BlockWriter, BundleStateWithReceipts, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, ProviderFactory, + OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ diff --git a/bin/reth/src/commands/import_receipts.rs b/bin/reth/src/commands/import_receipts.rs index 2686bcf4e53c7..e6aae327af58b 100644 --- a/bin/reth/src/commands/import_receipts.rs +++ b/bin/reth/src/commands/import_receipts.rs @@ -17,7 +17,7 @@ use reth_node_core::version::SHORT_VERSION; use reth_primitives::{stage::StageId, ChainSpec, StaticFileSegment}; use reth_provider::{ BundleStateWithReceipts, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, - StaticFileProviderFactory, StaticFileWriter, + StateWriter, StaticFileProviderFactory, StaticFileWriter, }; use tracing::{debug, error, info}; diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index b09e29e53b938..6d924b6b1a47e 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -15,7 +15,7 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, - ProviderFactory, StageCheckpointWriter, StaticFileProviderFactory, + ProviderFactory, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, }; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; use serde::{Deserialize, Serialize}; diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 6d2eb2a5d210a..9d8cf6ac66387 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -14,7 +14,8 @@ use reth_primitives::{ use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, BlockReader, BundleStateWithReceipts, Chain, DatabaseProviderRW, HeaderProvider, - LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, TransactionVariant, + LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriter, StatsReader, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::{ diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index a57f18f114ed7..5f6d4af3f843b 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -1,4 +1,4 @@ -use crate::{providers::StaticFileProviderRWRefMut, StateChanges, StateReverts}; +use crate::{providers::StaticFileProviderRWRefMut, StateChanges, StateReverts, StateWriter}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, tables, @@ -309,14 +309,10 @@ impl BundleStateWithReceipts { // swap bundles std::mem::swap(&mut self.bundle, &mut other) } +} - /// Write the [BundleStateWithReceipts] to database and receipts to either database or static - /// files if `static_file_producer` is `Some`. It should be none if there is any kind of - /// pruning/filtering over the receipts. - /// - /// `omit_changed_check` should be set to true if bundle has some of its data detached. This - /// would make some original values not known. - pub fn write_to_storage( +impl StateWriter for BundleStateWithReceipts { + fn write_to_storage( self, tx: &TX, mut static_file_producer: Option>, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 428645f1ae8c6..6e07b7c46a1b7 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -9,8 +9,8 @@ use crate::{ Chain, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, HistoricalStateProvider, HistoryWriter, LatestStateProvider, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, - StageCheckpointReader, StateProviderBox, StatsReader, StorageReader, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, + StageCheckpointReader, StateProviderBox, StateWriter, StatsReader, StorageReader, + TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; use itertools::{izip, Itertools}; use reth_db::{ diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 9aa96bccf0a25..c966cd9efa09d 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -36,7 +36,7 @@ pub use receipts::{ReceiptProvider, ReceiptProviderIdExt}; mod state; pub use state::{ BlockchainTreePendingStateProvider, BundleStateDataProvider, StateProvider, StateProviderBox, - StateProviderFactory, + StateProviderFactory, StateWriter, }; mod trie; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index b5251ca75cc63..4cb74dec61cbf 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -1,11 +1,16 @@ use super::AccountReader; -use crate::{BlockHashReader, BlockIdReader, BundleStateWithReceipts, StateRootProvider}; +use crate::{ + providers::StaticFileProviderRWRefMut, BlockHashReader, BlockIdReader, BundleStateWithReceipts, + StateRootProvider, +}; use auto_impl::auto_impl; +use reth_db::transaction::{DbTx, DbTxMut}; use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ trie::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, }; +use revm::db::OriginalValuesKnown; /// Type alias of boxed [StateProvider]. pub type StateProviderBox = Box; @@ -226,3 +231,17 @@ pub trait BundleStateDataProvider: Send + Sync { /// Needed to create state provider. fn canonical_fork(&self) -> BlockNumHash; } + +/// A helper trait for [BundleStateWithReceipts] to write state and receipts to storage. +pub trait StateWriter { + /// Write the data and receipts to the database or static files if `static_file_producer` is + /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. + fn write_to_storage( + self, + tx: &TX, + static_file_producer: Option>, + is_value_known: OriginalValuesKnown, + ) -> ProviderResult<()> + where + TX: DbTxMut + DbTx; +} From dd7c0214702829c2b0e243286f5d0c2ca05d0874 Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Wed, 8 May 2024 22:45:12 +0200 Subject: [PATCH 517/700] feat: feature gate rpc-types import for alloy conversions (#7963) Co-authored-by: Matthias Seitz --- Cargo.lock | 236 +++++++++++------------ crates/primitives/Cargo.toml | 10 +- crates/primitives/src/block.rs | 24 +-- crates/primitives/src/header.rs | 10 +- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/net.rs | 11 +- crates/primitives/src/serde_helper.rs | 3 - crates/primitives/src/transaction/mod.rs | 20 +- crates/primitives/src/withdrawal.rs | 5 +- crates/rpc/rpc-api/src/eth.rs | 11 +- crates/rpc/rpc/src/eth/api/server.rs | 15 +- crates/rpc/rpc/src/eth/api/state.rs | 6 +- 12 files changed, 170 insertions(+), 183 deletions(-) delete mode 100644 crates/primitives/src/serde_helper.rs diff --git a/Cargo.lock b/Cargo.lock index 164865acbceb6..3e99592edff1e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -222,7 +222,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -490,7 +490,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-primitives", "serde", @@ -591,7 +591,7 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", - "base64 0.22.1", + "base64 0.22.0", "futures-util", "futures-utils-wasm", "serde", @@ -628,7 +628,7 @@ dependencies = [ "arbitrary", "derive_arbitrary", "derive_more", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "nybbles", "proptest", "proptest-derive", @@ -660,48 +660,47 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -1028,9 +1027,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "backon" @@ -1085,9 +1084,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" [[package]] name = "base64ct" @@ -1300,7 +1299,7 @@ dependencies = [ "cfg-if", "dashmap", "fast-float", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "icu_normalizer", "indexmap 2.2.6", "intrusive-collections", @@ -1335,7 +1334,7 @@ checksum = "c055ef3cd87ea7db014779195bc90c6adfc35de4902e3b2fe587adecbd384578" dependencies = [ "boa_macros", "boa_profiler", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "thin-vec", ] @@ -1347,7 +1346,7 @@ checksum = "0cacc9caf022d92195c827a3e5bf83f96089d4bfaff834b359ac7b6be46e9187" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "indexmap 2.2.6", "once_cell", "phf", @@ -1559,9 +1558,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.96" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", @@ -1749,9 +1748,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" @@ -2328,7 +2327,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "lock_api", "once_cell", "parking_lot_core 0.9.10", @@ -2336,15 +2335,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "data-encoding-macro" -version = "0.1.15" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" +checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2352,9 +2351,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.13" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" +checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" dependencies = [ "data-encoding", "syn 1.0.109", @@ -3109,9 +3108,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" dependencies = [ "crc32fast", "miniz_oxide", @@ -3473,9 +3472,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", @@ -3488,7 +3487,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.14.3", ] [[package]] @@ -3497,7 +3496,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.14.3", ] [[package]] @@ -3735,7 +3734,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -3821,7 +3820,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.5.6", "tokio", "tower", "tower-service", @@ -4141,7 +4140,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "serde", ] @@ -4236,7 +4235,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2 0.5.6", "widestring", "windows-sys 0.48.0", "winreg 0.50.0", @@ -4269,12 +4268,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "is_terminal_polyfill" -version = "1.70.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" - [[package]] name = "itertools" version = "0.10.5" @@ -4319,9 +4312,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" +checksum = "c4b0e68d9af1f066c06d6e2397583795b912d78537d7d907c561e82c13d69fa1" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4337,9 +4330,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" +checksum = "92f254f56af1ae84815b9b1325094743dcf05b92abb5e94da2e81a35cff0cada" dependencies = [ "futures-channel", "futures-util", @@ -4361,9 +4354,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" +checksum = "274d68152c24aa78977243bb56f28d7946e6aa309945b37d33174a3f92d89a3a" dependencies = [ "anyhow", "async-trait", @@ -4387,9 +4380,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" +checksum = "ac13bc1e44cd00448a5ff485824a128629c945f02077804cb659c07a0ba41395" dependencies = [ "async-trait", "hyper 0.14.28", @@ -4407,9 +4400,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0" +checksum = "2c326f9e95aeff7d707b2ffde72c22a52acc975ba1c48587776c02b90c4747a6" dependencies = [ "heck 0.4.1", "proc-macro-crate 3.1.0", @@ -4420,9 +4413,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12d8b6a9674422a8572e0b0abb12feeb3f2aeda86528c80d0350c2bd0923ab41" +checksum = "3b5bfbda5f8fb63f997102fd18f73e35e34c84c6dcdbdbbe72c6e48f6d2c959b" dependencies = [ "futures-util", "http 0.2.12", @@ -4444,9 +4437,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" +checksum = "3dc828e537868d6b12bbb07ec20324909a22ced6efca0057c825c3e1126b2c6d" dependencies = [ "anyhow", "beef", @@ -4457,9 +4450,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f448d8eacd945cc17b6c0b42c361531ca36a962ee186342a97cdb8fca679cd77" +checksum = "7cf8dcee48f383e24957e238240f997ec317ba358b4e6d2e8be3f745bcdabdb5" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4468,9 +4461,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58b9db2dfd5bb1194b0ce921504df9ceae210a345bc2f6c5a61432089bbab070" +checksum = "32f00abe918bf34b785f87459b9205790e5361a3f7437adb50e928dc243f27eb" dependencies = [ "http 0.2.12", "jsonrpsee-client-transport", @@ -4558,9 +4551,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.154" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libffi" @@ -4694,9 +4687,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.44.2" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" +checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" dependencies = [ "either", "fnv", @@ -4705,7 +4698,6 @@ dependencies = [ "instant", "libp2p-core", "libp2p-identity", - "lru", "multistream-select", "once_cell", "rand 0.8.5", @@ -4843,7 +4835,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.14.3", ] [[package]] @@ -5331,9 +5323,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.45" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" dependencies = [ "autocfg", "num-integer", @@ -5354,9 +5346,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.19" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -5387,7 +5379,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 2.0.60", @@ -5592,7 +5584,7 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.22.1", + "base64 0.22.0", "serde", ] @@ -5604,9 +5596,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.10" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" +checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" dependencies = [ "memchr", "thiserror", @@ -6294,7 +6286,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.14.3", "memchr", ] @@ -6342,7 +6334,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ - "base64 0.22.1", + "base64 0.22.0", "bytes", "futures-core", "futures-util", @@ -7553,6 +7545,7 @@ dependencies = [ "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-trie", "arbitrary", "assert_matches", @@ -7576,7 +7569,6 @@ dependencies = [ "reth-codecs", "reth-ethereum-forks", "reth-network-types", - "reth-rpc-types", "revm", "revm-primitives", "roaring", @@ -8136,7 +8128,7 @@ dependencies = [ "derive_more", "dyn-clone", "enumn", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "hex", "once_cell", "serde", @@ -8221,9 +8213,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.4" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26f4c25a604fcb3a1bcd96dd6ba37c93840de95de8198d94c0d571a74a804d1" +checksum = "a1c77081a55300e016cb86f2864415b7518741879db925b8d488a0ee0d2da6bf" dependencies = [ "bytemuck", "byteorder", @@ -8418,7 +8410,7 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.1", + "base64 0.22.0", "rustls-pki-types", ] @@ -8501,9 +8493,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ad2bbb0ae5100a07b7a6f2ed7ab5fd0045551a4c507989b7a620046ea3efdc" +checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" dependencies = [ "sdd", ] @@ -8642,9 +8634,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.200" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f" +checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" dependencies = [ "serde_derive", ] @@ -8660,9 +8652,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.200" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" +checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" dependencies = [ "proc-macro2", "quote", @@ -8715,11 +8707,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" +checksum = "2c85f8e96d1d6857f13768fcbd895fcb06225510022a2774ed8b5150581847b0" dependencies = [ - "base64 0.22.1", + "base64 0.22.0", "chrono", "hex", "indexmap 1.9.3", @@ -8733,9 +8725,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" +checksum = "c8b3a576c4eb2924262d5951a3b737ccaf16c931e39a2810c36f9a7e25575557" dependencies = [ "darling 0.20.8", "proc-macro2", @@ -8981,9 +8973,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", "windows-sys 0.52.0", @@ -9269,9 +9261,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-fuzz" -version = "5.1.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8224048089fb4c76b0569e76e00bf6cdaf06790eb5290e9582a0c485094e0a8" +checksum = "b139530208017f9d5a113784ed09cf1b8b22dee95eb99d51d89af1a3c2d6594e" dependencies = [ "serde", "test-fuzz-internal", @@ -9281,9 +9273,9 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "5.1.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43cd6c1a291bd5f843f5dfb813c2fd7ad8e38de06722a14eeb54636c983485cc" +checksum = "16e78ed8148311b6a02578dee5fd77600bf8805b77b2cb8382a9435348080985" dependencies = [ "bincode", "cargo_metadata", @@ -9292,9 +9284,9 @@ dependencies = [ [[package]] name = "test-fuzz-macro" -version = "5.1.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffbe4466c9f941baa7dd177856ebda245d08b2aa2e3b6890d6dd8c54d6ceebe" +checksum = "17f9bc8c69f276df24e4d1c082e52ea057544495916c4aa0708b82e47f55f364" dependencies = [ "darling 0.20.8", "itertools 0.12.1", @@ -9307,9 +9299,9 @@ dependencies = [ [[package]] name = "test-fuzz-runtime" -version = "5.1.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc507e8ea4887c091e1a57b65458c57b3a8fce1b6ed53afee77a174cfe41c17" +checksum = "2b657ccc932fde05dbac5c460bffa40809937adaa5558863fe8174526e1b3bc9" dependencies = [ "hex", "num-traits", @@ -9492,7 +9484,7 @@ dependencies = [ "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -9543,9 +9535,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -9554,6 +9546,7 @@ dependencies = [ "pin-project-lite", "slab", "tokio", + "tracing", ] [[package]] @@ -9906,11 +9899,12 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.93" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0e5d82932dfbf36df38de5df0cfe846d13430b3ae3fdc48b2e91ed692c8df7" +checksum = "8ad7eb6319ebadebca3dacf1f85a93bc54b73dd81b9036795f73de7ddfe27d5a" dependencies = [ "glob", + "once_cell", "serde", "serde_derive", "serde_json", @@ -10572,18 +10566,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.33" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.33" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4b6c273f496d8fd4eaf18853e6b448760225dc030ff2c485a786859aea6393" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 8d31358d9019e..04b25c7cafc7f 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -16,7 +16,6 @@ workspace = true reth-codecs.workspace = true reth-ethereum-forks.workspace = true reth-network-types.workspace = true -reth-rpc-types.workspace = true revm.workspace = true revm-primitives = { workspace = true, features = ["serde"] } @@ -25,9 +24,10 @@ alloy-chains = { workspace = true, features = ["serde", "rlp"] } alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-trie = { workspace = true, features = ["serde"] } -nybbles = { workspace = true, features = ["serde", "rlp"] } +alloy-rpc-types = { workspace = true, optional = true } alloy-genesis.workspace = true -alloy-eips.workspace = true +alloy-eips = { workspace = true, features = ["serde"] } +nybbles = { workspace = true, features = ["serde", "rlp"] } # crypto secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } @@ -66,7 +66,9 @@ revm-primitives = { workspace = true, features = ["arbitrary"] } nybbles = { workspace = true, features = ["arbitrary"] } alloy-trie = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } + assert_matches.workspace = true +arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-derive.workspace = true rand.workspace = true @@ -91,7 +93,6 @@ default = ["c-kzg", "zstd-codec"] asm-keccak = ["alloy-primitives/asm-keccak"] arbitrary = [ "revm-primitives/arbitrary", - "reth-rpc-types/arbitrary", "reth-ethereum-forks/arbitrary", "nybbles/arbitrary", "alloy-trie/arbitrary", @@ -110,6 +111,7 @@ optimism = [ "reth-ethereum-forks/optimism", "revm/optimism", ] +alloy-compat = ["alloy-rpc-types"] test-utils = ["dep:plain_hasher", "dep:hash-db"] [[bench]] diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 4b4831b9c1ed8..8a029dc05233f 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,16 +1,15 @@ use crate::{ - Address, Bytes, GotExpected, Header, SealedHeader, Signature, TransactionSigned, + Address, Bytes, GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, B256, }; use alloy_rlp::{RlpDecodable, RlpEncodable}; #[cfg(any(test, feature = "arbitrary"))] use proptest::prelude::{any, prop_compose}; use reth_codecs::derive_arbitrary; -use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::ops::Deref; -pub use reth_rpc_types::{ +pub use alloy_eips::eip1898::{ BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, }; @@ -148,33 +147,36 @@ impl Deref for Block { } } -impl TryFrom for Block { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for Block { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(block: alloy_rpc_types::Block) -> Result { + use alloy_rpc_types::ConversionError; - fn try_from(block: reth_rpc_types::Block) -> Result { let body = { let transactions: Result, ConversionError> = match block .transactions { - reth_rpc_types::BlockTransactions::Full(transactions) => transactions + alloy_rpc_types::BlockTransactions::Full(transactions) => transactions .into_iter() .map(|tx| { let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; Ok(TransactionSigned::from_transaction_and_signature( tx.try_into()?, - Signature { + crate::Signature { r: signature.r, s: signature.s, odd_y_parity: signature .y_parity - .unwrap_or(reth_rpc_types::Parity(false)) + .unwrap_or(alloy_rpc_types::Parity(false)) .0, }, )) }) .collect(), - reth_rpc_types::BlockTransactions::Hashes(_) | - reth_rpc_types::BlockTransactions::Uncle => { + alloy_rpc_types::BlockTransactions::Hashes(_) | + alloy_rpc_types::BlockTransactions::Uncle => { return Err(ConversionError::MissingFullTransactions) } }; diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index a06be26256ba1..d0bd5baf865f6 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -16,7 +16,6 @@ use bytes::BufMut; #[cfg(any(test, feature = "arbitrary"))] use proptest::prelude::*; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; -use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::{mem, ops::Deref}; @@ -486,10 +485,13 @@ impl Decodable for Header { } } -impl TryFrom for Header { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for Header { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(header: alloy_rpc_types::Header) -> Result { + use alloy_rpc_types::ConversionError; - fn try_from(header: reth_rpc_types::Header) -> Result { Ok(Self { base_fee_per_gas: header .base_fee_per_gas diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 2cd71ae20b29d..27c66e69e786f 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -4,6 +4,7 @@ //! //! ## Feature Flags //! +//! - `alloy-compat`: Adds compatibility conversions for certain alloy types. //! - `arbitrary`: Adds `proptest` and `arbitrary` support for primitive types. //! - `test-utils`: Export utilities for testing @@ -38,7 +39,6 @@ mod prune; mod receipt; /// Helpers for working with revm pub mod revm; -pub mod serde_helper; pub mod stage; pub mod static_file; mod storage; diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index 778e2658bc435..dcb10545f7f2d 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -120,15 +120,10 @@ pub fn parse_nodes(nodes: impl IntoIterator>) -> Vec(&node).expect("couldn't serialize"); assert_eq!(ser, "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"") @@ -246,7 +241,7 @@ mod tests { address: IpAddr::V4([10, 3, 58, 6].into()), tcp_port: 30303u16, udp_port: 30301u16, - id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), + id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), }) } } diff --git a/crates/primitives/src/serde_helper.rs b/crates/primitives/src/serde_helper.rs deleted file mode 100644 index b0d041fdcd025..0000000000000 --- a/crates/primitives/src/serde_helper.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! [serde] utilities. - -pub use reth_rpc_types::serde_helpers::*; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 95407537b271c..eda139ffdf973 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -2,7 +2,6 @@ use crate::compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}; use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, TxKind, B256, U256}; -use alloy_eips::eip2718::Eip2718Error; use alloy_rlp::{ Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, }; @@ -11,7 +10,6 @@ use derive_more::{AsRef, Deref}; use once_cell::sync::Lazy; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; -use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::mem; @@ -614,10 +612,14 @@ impl From for Transaction { } } -impl TryFrom for Transaction { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for Transaction { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(tx: alloy_rpc_types::Transaction) -> Result { + use alloy_eips::eip2718::Eip2718Error; + use alloy_rpc_types::ConversionError; - fn try_from(tx: reth_rpc_types::Transaction) -> Result { match tx.transaction_type.map(TryInto::try_into).transpose().map_err(|_| { ConversionError::Eip2718Error(Eip2718Error::UnexpectedType( tx.transaction_type.unwrap(), @@ -1717,10 +1719,12 @@ impl IntoRecoveredTransaction for TransactionSignedEcRecovered { } } -impl TryFrom for TransactionSignedEcRecovered { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for TransactionSignedEcRecovered { + type Error = alloy_rpc_types::ConversionError; - fn try_from(tx: reth_rpc_types::Transaction) -> Result { + fn try_from(tx: alloy_rpc_types::Transaction) -> Result { + use alloy_rpc_types::ConversionError; let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; let transaction: Transaction = tx.try_into()?; diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index e47b2816a80b9..e4d1b37c0523e 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -85,7 +85,7 @@ impl From> for Withdrawals { #[cfg(test)] mod tests { use super::*; - use crate::{serde_helper::u64_via_ruint, Address}; + use crate::Address; use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; @@ -95,15 +95,12 @@ mod tests { #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] struct RethWithdrawal { /// Monotonically increasing identifier issued by consensus layer. - #[serde(with = "u64_via_ruint")] index: u64, /// Index of validator associated with withdrawal. - #[serde(with = "u64_via_ruint", rename = "validatorIndex")] validator_index: u64, /// Target address for withdrawn ether. address: Address, /// Value of the withdrawal in gwei. - #[serde(with = "u64_via_ruint")] amount: u64, } diff --git a/crates/rpc/rpc-api/src/eth.rs b/crates/rpc/rpc-api/src/eth.rs index 8811ef87dd1d0..b6c2993bb9332 100644 --- a/crates/rpc/rpc-api/src/eth.rs +++ b/crates/rpc/rpc-api/src/eth.rs @@ -1,11 +1,10 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{ - serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, -}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; use reth_rpc_types::{ - state::StateOverride, AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, - EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, RichBlock, - StateContext, SyncStatus, Transaction, TransactionRequest, Work, + serde_helpers::JsonStorageKey, state::StateOverride, AccessListWithGasUsed, + AnyTransactionReceipt, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, + FeeHistory, Header, Index, RichBlock, StateContext, SyncStatus, Transaction, + TransactionRequest, Work, }; /// Eth rpc interface: diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 7ba1539b812f4..a1796a71dd49e 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -2,25 +2,22 @@ //! Handles RPC requests for the `eth_` namespace. use jsonrpsee::core::RpcResult as Result; -use serde_json::Value; -use tracing::trace; - use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{ - serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, -}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; use reth_provider::{ BlockIdReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProviderFactory, }; use reth_rpc_api::EthApiServer; use reth_rpc_types::{ - state::StateOverride, AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, - EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, RichBlock, - StateContext, SyncStatus, TransactionRequest, Work, + serde_helpers::JsonStorageKey, state::StateOverride, AccessListWithGasUsed, + AnyTransactionReceipt, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, + FeeHistory, Header, Index, RichBlock, StateContext, SyncStatus, TransactionRequest, Work, }; use reth_transaction_pool::TransactionPool; +use serde_json::Value; +use tracing::trace; use crate::{ eth::{ diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs index 7f0bdd4e2f701..144b1504f719b 100644 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ b/crates/rpc/rpc/src/eth/api/state.rs @@ -5,13 +5,11 @@ use crate::{ EthApi, }; use reth_evm::ConfigureEvm; -use reth_primitives::{ - serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, U256, -}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, U256}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, }; -use reth_rpc_types::EIP1186AccountProofResponse; +use reth_rpc_types::{serde_helpers::JsonStorageKey, EIP1186AccountProofResponse}; use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::{PoolTransaction, TransactionPool}; From d46774411fee0802f4390e4f04b7184cdcdb3ea2 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 May 2024 17:28:53 -0400 Subject: [PATCH 518/700] release: v0.2.0-beta.7 (#8164) --- Cargo.lock | 148 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 75 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e99592edff1e..621a84ed232b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2678,7 +2678,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "rayon", @@ -6382,7 +6382,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "ahash", "alloy-rlp", @@ -6459,7 +6459,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -6481,7 +6481,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "futures-core", @@ -6503,7 +6503,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "futures", @@ -6545,7 +6545,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aquamarine", "assert_matches", @@ -6571,7 +6571,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-tasks", "tokio", @@ -6580,7 +6580,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", @@ -6598,7 +6598,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "convert_case 0.6.0", "proc-macro2", @@ -6609,7 +6609,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "confy", "humantime-serde", @@ -6625,7 +6625,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "auto_impl", "reth-primitives", @@ -6634,7 +6634,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "mockall", "reth-consensus", @@ -6645,7 +6645,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "arbitrary", "assert_matches", @@ -6682,7 +6682,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -6706,7 +6706,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "derive_more", @@ -6730,7 +6730,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "data-encoding", @@ -6755,7 +6755,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -6786,7 +6786,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-network", @@ -6816,7 +6816,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aes 0.8.4", "alloy-rlp", @@ -6848,7 +6848,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-primitives", "reth-rpc-types", @@ -6858,7 +6858,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "arbitrary", @@ -6893,7 +6893,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "arbitrary", @@ -6916,7 +6916,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-consensus", "reth-consensus-common", @@ -6925,7 +6925,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "reth-engine-primitives", @@ -6940,7 +6940,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-chains", "alloy-primitives", @@ -6955,7 +6955,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-basic-payload-builder", "reth-payload-builder", @@ -6969,7 +6969,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "rayon", "reth-db", @@ -6979,7 +6979,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures-util", "parking_lot 0.12.2", @@ -6991,7 +6991,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-evm", "reth-interfaces", @@ -7003,7 +7003,7 @@ dependencies = [ [[package]] name = "reth-evm-optimism" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-evm", "reth-interfaces", @@ -7018,7 +7018,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "eyre", "metrics", @@ -7038,7 +7038,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "auto_impl", "clap", @@ -7058,7 +7058,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "async-trait", "bytes", @@ -7080,7 +7080,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "bitflags 2.5.0", "byteorder", @@ -7102,7 +7102,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "bindgen", "cc", @@ -7111,7 +7111,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures", "metrics", @@ -7122,7 +7122,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "metrics", "once_cell", @@ -7136,7 +7136,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "pin-project", "reth-network-types", @@ -7145,7 +7145,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "igd-next", "pin-project-lite", @@ -7159,7 +7159,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-node-bindings", "alloy-provider", @@ -7216,7 +7216,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "enr", "reth-discv4", @@ -7231,7 +7231,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7246,7 +7246,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "anyhow", "bincode", @@ -7267,7 +7267,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-db", "reth-engine-primitives", @@ -7281,7 +7281,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aquamarine", "confy", @@ -7321,7 +7321,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "clap", @@ -7385,7 +7385,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "eyre", "futures", @@ -7413,7 +7413,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures", "humantime", @@ -7434,7 +7434,7 @@ dependencies = [ [[package]] name = "reth-node-optimism" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", "async-trait", @@ -7476,7 +7476,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-consensus", "reth-consensus-common", @@ -7485,7 +7485,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", @@ -7507,7 +7507,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures-util", "metrics", @@ -7529,7 +7529,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-primitives", "reth-rpc-types", @@ -7538,7 +7538,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-chains", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", @@ -7587,7 +7587,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -7618,7 +7618,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "derive_more", @@ -7642,7 +7642,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-consensus-common", "reth-interfaces", @@ -7655,7 +7655,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -7710,7 +7710,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "jsonrpsee", "reth-engine-primitives", @@ -7723,7 +7723,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures", "jsonrpsee", @@ -7737,7 +7737,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "hyper 0.14.28", "jsonrpsee", @@ -7775,7 +7775,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -7803,7 +7803,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", @@ -7828,7 +7828,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", @@ -7839,7 +7839,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -7877,7 +7877,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aquamarine", "assert_matches", @@ -7900,7 +7900,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "parking_lot 0.12.2", @@ -7919,7 +7919,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "dyn-clone", "futures-util", @@ -7935,7 +7935,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "reth-primitives", @@ -7944,7 +7944,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "tokio", "tokio-stream", @@ -7952,7 +7952,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "clap", "eyre", @@ -7966,7 +7966,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "aquamarine", @@ -8005,7 +8005,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "auto_impl", @@ -8031,7 +8031,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "criterion", diff --git a/Cargo.toml b/Cargo.toml index b4500979534bf..fe219f51bc747 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -165,7 +165,7 @@ unnecessary_struct_initialization = "allow" use_self = "allow" [workspace.package] -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" edition = "2021" rust-version = "1.76" license = "MIT OR Apache-2.0" From aa07257d3ba071188b7b7e372856b50be094e8ba Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 May 2024 18:22:35 -0400 Subject: [PATCH 519/700] fix: use op-build instead of build-op in release workflow (#8167) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8562da807a715..2b546a6fc4c10 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -31,7 +31,7 @@ jobs: needs: extract-version strategy: matrix: - build: [{command: build, binary: reth}, {command: build-op, binary: op-reth}] + build: [{command: build, binary: reth}, {command: op-build, binary: op-reth}] include: - target: x86_64-unknown-linux-gnu os: ubuntu-20.04 From 1184e8c45b5676e1516844bba18996cdf1562654 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 9 May 2024 13:04:14 +0100 Subject: [PATCH 520/700] chore: use `NoopBodiesDownloader` & `NoopHeaderDownloader` on `stage unwind` command (#8165) --- bin/reth/src/commands/stage/unwind.rs | 46 +++------------------- crates/net/downloaders/src/bodies/mod.rs | 3 ++ crates/net/downloaders/src/bodies/noop.rs | 29 ++++++++++++++ crates/net/downloaders/src/headers/mod.rs | 3 ++ crates/net/downloaders/src/headers/noop.rs | 30 ++++++++++++++ 5 files changed, 70 insertions(+), 41 deletions(-) create mode 100644 crates/net/downloaders/src/bodies/noop.rs create mode 100644 crates/net/downloaders/src/headers/noop.rs diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index b7998d0875b99..3a65974995a7f 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -5,15 +5,9 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{Config, PruneConfig}; use reth_consensus::Consensus; use reth_db::{database::Database, open_db}; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; +use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_exex::ExExManagerHandle; -use reth_node_core::{ - args::{get_secret_key, NetworkArgs}, - dirs::ChainPath, -}; +use reth_node_core::args::NetworkArgs; use reth_primitives::{BlockHashOrNumber, ChainSpec, PruneModes, B256}; use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, @@ -110,8 +104,7 @@ impl Command { .filter(|highest_static_file_block| highest_static_file_block >= range.start()) { info!(target: "reth::cli", ?range, ?highest_static_block, "Executing a pipeline unwind."); - let mut pipeline = - self.build_pipeline(data_dir, config, provider_factory.clone()).await?; + let mut pipeline = self.build_pipeline(config, provider_factory.clone()).await?; // Move all applicable data from database to static files. pipeline.produce_static_files()?; @@ -142,40 +135,11 @@ impl Command { async fn build_pipeline( self, - data_dir: ChainPath, config: Config, provider_factory: ProviderFactory>, ) -> Result>, eyre::Error> { - // Even though we are not planning to download anything, we need to initialize Body and - // Header stage with a network client - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let p2p_secret_key = get_secret_key(&network_secret_path)?; - let default_peers_path = data_dir.known_peers(); - let network = self - .network - .network_config( - &config, - provider_factory.chain_spec(), - p2p_secret_key, - default_peers_path, - ) - .build(provider_factory.clone()) - .start_network() - .await?; - let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - - // building network downloaders using the fetch client - let fetch_client = network.fetch_client().await?; - let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(fetch_client.clone(), Arc::clone(&consensus)); - let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies).build( - fetch_client, - Arc::clone(&consensus), - provider_factory.clone(), - ); let stage_conf = &config.stages; let (tip_tx, tip_rx) = watch::channel(B256::ZERO); @@ -189,8 +153,8 @@ impl Command { provider_factory.clone(), header_mode, Arc::clone(&consensus), - header_downloader, - body_downloader, + NoopHeaderDownloader::default(), + NoopBodiesDownloader::default(), executor.clone(), stage_conf.etl.clone(), ) diff --git a/crates/net/downloaders/src/bodies/mod.rs b/crates/net/downloaders/src/bodies/mod.rs index f8931ea81a423..d4f613413fb39 100644 --- a/crates/net/downloaders/src/bodies/mod.rs +++ b/crates/net/downloaders/src/bodies/mod.rs @@ -2,6 +2,9 @@ #[allow(clippy::module_inception)] pub mod bodies; +/// A body downloader that does nothing. Useful to build unwind-only pipelines. +pub mod noop; + /// A downloader implementation that spawns a downloader to a task pub mod task; diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs new file mode 100644 index 0000000000000..5885a17c11d2f --- /dev/null +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -0,0 +1,29 @@ +use futures::Stream; +use reth_interfaces::p2p::{ + bodies::{downloader::BodyDownloader, response::BlockResponse}, + error::{DownloadError, DownloadResult}, +}; +use reth_primitives::BlockNumber; +use std::ops::RangeInclusive; + +/// A [BodyDownloader] implementation that does nothing. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopBodiesDownloader; + +impl BodyDownloader for NoopBodiesDownloader { + fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { + Ok(()) + } +} + +impl Stream for NoopBodiesDownloader { + type Item = Result, DownloadError>; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + panic!("NoopBodiesDownloader shouldn't be polled.") + } +} diff --git a/crates/net/downloaders/src/headers/mod.rs b/crates/net/downloaders/src/headers/mod.rs index 4321ef52bead8..a261f5579313d 100644 --- a/crates/net/downloaders/src/headers/mod.rs +++ b/crates/net/downloaders/src/headers/mod.rs @@ -1,6 +1,9 @@ /// A Linear downloader implementation. pub mod reverse_headers; +/// A header downloader that does nothing. Useful to build unwind-only pipelines. +pub mod noop; + /// A downloader implementation that spawns a downloader to a task pub mod task; diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs new file mode 100644 index 0000000000000..8127cc2324304 --- /dev/null +++ b/crates/net/downloaders/src/headers/noop.rs @@ -0,0 +1,30 @@ +use futures::Stream; +use reth_interfaces::p2p::headers::{ + downloader::{HeaderDownloader, SyncTarget}, + error::HeadersDownloaderError, +}; +use reth_primitives::SealedHeader; + +/// A [HeaderDownloader] implementation that does nothing. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopHeaderDownloader; + +impl HeaderDownloader for NoopHeaderDownloader { + fn update_local_head(&mut self, _: SealedHeader) {} + + fn update_sync_target(&mut self, _: SyncTarget) {} + + fn set_batch_size(&mut self, _: usize) {} +} + +impl Stream for NoopHeaderDownloader { + type Item = Result, HeadersDownloaderError>; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + panic!("NoopHeaderDownloader shouldn't be polled.") + } +} From 539c70256145f0a126fde406ef14d50fbd8f9589 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 9 May 2024 12:08:32 -0400 Subject: [PATCH 521/700] fix: use configs object list in matrix to release all platforms (#8179) --- .github/workflows/release.yml | 67 ++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 28 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2b546a6fc4c10..9bb9f4d935d2b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -27,50 +27,61 @@ jobs: build: name: build release - runs-on: ${{ matrix.os }} + runs-on: ${{ matrix.configs.os }} needs: extract-version strategy: matrix: + configs: [ + { + target: x86_64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + }, + { + target: aarch64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + }, + { + target: x86_64-apple-darwin + os: macos-13 + profile: maxperf + }, + { + target: aarch64-apple-darwin + os: macos-14 + profile: maxperf + }, + { + target: x86_64-pc-windows-gnu + os: ubuntu-20.04 + profile: maxperf + }, + ] build: [{command: build, binary: reth}, {command: op-build, binary: op-reth}] - include: - - target: x86_64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - - target: aarch64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - - target: x86_64-apple-darwin - os: macos-13 - profile: maxperf - - target: aarch64-apple-darwin - os: macos-14 - profile: maxperf - - target: x86_64-pc-windows-gnu - os: ubuntu-20.04 - profile: maxperf steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: - target: ${{ matrix.target }} + target: ${{ matrix.configs.target }} - uses: taiki-e/install-action@cross - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - name: Apple M1 setup - if: matrix.target == 'aarch64-apple-darwin' + if: matrix.configs.target == 'aarch64-apple-darwin' run: | echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-os-version)" >> $GITHUB_ENV - name: Build Reth - run: make PROFILE=${{ matrix.profile }} ${{ matrix.build.command }}-${{ matrix.target }} + run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - name: Move binary run: | mkdir artifacts - [[ "${{ matrix.target }}" == *windows* ]] && ext=".exe" - mv "target/${{ matrix.target }}/${{ matrix.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts + [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" + mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - name: Configure GPG and create artifacts env: @@ -80,22 +91,22 @@ jobs: export GPG_TTY=$(tty) echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import cd artifacts - tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz ${{ matrix.build.binary }}* - echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}* + echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz mv *tar.gz* .. shell: bash - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz - name: Upload signature uses: actions/upload-artifact@v4 with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc draft-release: name: draft release From 4bbc8509d6af7a412dbb117cae47a37e634c7ede Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 9 May 2024 17:43:26 +0100 Subject: [PATCH 522/700] chore(pruner): set default timeout to `None` on `PrunerBuilder` (#8181) --- crates/prune/src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/prune/src/builder.rs b/crates/prune/src/builder.rs index 8a14ccf4aaeaa..879bd9fb914ad 100644 --- a/crates/prune/src/builder.rs +++ b/crates/prune/src/builder.rs @@ -102,7 +102,7 @@ impl Default for PrunerBuilder { segments: PruneModes::none(), max_reorg_depth: 64, prune_delete_limit: MAINNET.prune_delete_limit, - timeout: Some(Self::DEFAULT_TIMEOUT), + timeout: None, finished_exex_height: watch::channel(FinishedExExHeight::NoExExs).1, } } From 87fee5e8be1983f0baf1647437dd2b098e279ace Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 9 May 2024 13:00:42 -0400 Subject: [PATCH 523/700] fix: use yaml lists instead of objects in release.yml (#8182) --- .github/workflows/release.yml | 49 +++++++++++++++-------------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9bb9f4d935d2b..470b918b2951d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -31,34 +31,27 @@ jobs: needs: extract-version strategy: matrix: - configs: [ - { - target: x86_64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - }, - { - target: aarch64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - }, - { - target: x86_64-apple-darwin - os: macos-13 - profile: maxperf - }, - { - target: aarch64-apple-darwin - os: macos-14 - profile: maxperf - }, - { - target: x86_64-pc-windows-gnu - os: ubuntu-20.04 - profile: maxperf - }, - ] - build: [{command: build, binary: reth}, {command: op-build, binary: op-reth}] + configs: + - target: x86_64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + - target: aarch64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + - target: x86_64-apple-darwin + os: macos-13 + profile: maxperf + - target: aarch64-apple-darwin + os: macos-14 + profile: maxperf + - target: x86_64-pc-windows-gnu + os: ubuntu-20.04 + profile: maxperf + build: + - command: build + binary: reth + - command: op-build + binary: op-reth steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable From ee3c93916dfe8cf49d5d9f2be0e838279dd249d0 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 9 May 2024 19:29:42 +0200 Subject: [PATCH 524/700] chore(deps): remove outdated dev dep and comment (#8184) --- crates/primitives/Cargo.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 04b25c7cafc7f..d9d6c592e79d1 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -82,11 +82,8 @@ plain_hasher = "0.2" sucds = "0.8.1" -# necessary so we don't hit a "undeclared 'std'": -# https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 criterion.workspace = true pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } -secp256k1.workspace = true [features] default = ["c-kzg", "zstd-codec"] From ad54af8406886367fec407750b678c421d39be96 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 9 May 2024 19:24:57 +0100 Subject: [PATCH 525/700] fix: copy and prune data from database with `move_to_static_files`, before a pipeline run/unwind (#8127) --- Cargo.lock | 2 +- bin/reth/Cargo.toml | 1 - bin/reth/src/commands/stage/unwind.rs | 12 +--- crates/primitives/src/prune/mode.rs | 1 + crates/prune/src/builder.rs | 3 +- crates/prune/src/error.rs | 13 +++++ crates/stages-api/Cargo.toml | 1 + crates/stages-api/src/pipeline/mod.rs | 56 ++++++++++++------- .../provider/src/providers/database/mod.rs | 19 +++++-- 9 files changed, 70 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 621a84ed232b6..6d9252332237e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6434,7 +6434,6 @@ dependencies = [ "reth-payload-validator", "reth-primitives", "reth-provider", - "reth-prune", "reth-revm", "reth-rpc", "reth-rpc-api", @@ -7890,6 +7889,7 @@ dependencies = [ "reth-metrics", "reth-primitives", "reth-provider", + "reth-prune", "reth-static-file", "reth-tokio-util", "thiserror", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 37b26686f2a5d..c1ed8981a4ca3 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -43,7 +43,6 @@ reth-payload-builder.workspace = true reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-discv4.workspace = true -reth-prune.workspace = true reth-static-file = { workspace = true } reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 3a65974995a7f..1f0c7fc4569fe 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -2,7 +2,7 @@ use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; -use reth_config::{Config, PruneConfig}; +use reth_config::Config; use reth_consensus::Consensus; use reth_db::{database::Database, open_db}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; @@ -13,7 +13,6 @@ use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, StaticFileProviderFactory, }; -use reth_prune::PrunerBuilder; use reth_stages::{ sets::DefaultStages, stages::{ @@ -107,14 +106,7 @@ impl Command { let mut pipeline = self.build_pipeline(config, provider_factory.clone()).await?; // Move all applicable data from database to static files. - pipeline.produce_static_files()?; - - // Run the pruner so we don't potentially end up with higher height in the database vs - // static files. - let mut pruner = PrunerBuilder::new(PruneConfig::default()) - .prune_delete_limit(usize::MAX) - .build(provider_factory); - pruner.run(*range.end())?; + pipeline.move_to_static_files()?; pipeline.unwind((*range.start()).saturating_sub(1), None)?; } else { diff --git a/crates/primitives/src/prune/mode.rs b/crates/primitives/src/prune/mode.rs index c32f66d35d1e4..3454573b9469e 100644 --- a/crates/primitives/src/prune/mode.rs +++ b/crates/primitives/src/prune/mode.rs @@ -36,6 +36,7 @@ impl PruneMode { PruneMode::Distance(distance) if *distance >= segment.min_blocks(purpose) => { Some((tip - distance, *self)) } + PruneMode::Before(n) if *n == tip + 1 && purpose.is_static_file() => Some((tip, *self)), PruneMode::Before(n) if *n > tip => None, // Nothing to prune yet PruneMode::Before(n) if tip - n >= segment.min_blocks(purpose) => Some((n - 1, *self)), _ => return Err(PruneSegmentError::Configuration(segment)), diff --git a/crates/prune/src/builder.rs b/crates/prune/src/builder.rs index 879bd9fb914ad..4e0ffd21a7960 100644 --- a/crates/prune/src/builder.rs +++ b/crates/prune/src/builder.rs @@ -1,10 +1,9 @@ -use std::time::Duration; - use crate::{segments::SegmentSet, Pruner}; use reth_config::PruneConfig; use reth_db::database::Database; use reth_primitives::{FinishedExExHeight, PruneModes, MAINNET}; use reth_provider::ProviderFactory; +use std::time::Duration; use tokio::sync::watch; /// Contains the information required to build a pruner diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index e12320bc8fdb5..bdf5bacc1cdf7 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -21,3 +21,16 @@ pub enum PrunerError { #[error(transparent)] Provider(#[from] ProviderError), } + +impl From for RethError { + fn from(err: PrunerError) -> Self { + match err { + PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { + RethError::Custom(err.to_string()) + } + PrunerError::Interface(err) => err, + PrunerError::Database(err) => RethError::Database(err), + PrunerError::Provider(err) => RethError::Provider(err), + } + } +} diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml index d1e31ba7828fb..2101961fd2d83 100644 --- a/crates/stages-api/Cargo.toml +++ b/crates/stages-api/Cargo.toml @@ -19,6 +19,7 @@ reth-interfaces.workspace = true reth-static-file.workspace = true reth-tokio-util.workspace = true reth-consensus.workspace = true +reth-prune.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 199cc41e64377..5aceb515b791a 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -15,6 +15,7 @@ use reth_provider::{ providers::StaticFileWriter, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, }; +use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; use reth_tokio_util::EventListeners; use std::pin::Pin; @@ -140,7 +141,7 @@ where match target { PipelineTarget::Sync(tip) => self.set_tip(tip), PipelineTarget::Unwind(target) => { - if let Err(err) = self.produce_static_files() { + if let Err(err) = self.move_to_static_files() { return (self, Err(err.into())) } if let Err(err) = self.unwind(target, None) { @@ -199,7 +200,7 @@ where /// pipeline (for example the `Finish` stage). Or [ControlFlow::Unwind] of the stage that caused /// the unwind. pub async fn run_loop(&mut self) -> Result { - self.produce_static_files()?; + self.move_to_static_files()?; let mut previous_stage = None; for stage_index in 0..self.stages.len() { @@ -236,9 +237,10 @@ where Ok(self.progress.next_ctrl()) } - /// Run [static file producer](StaticFileProducer) and move all data from the database to static - /// files for corresponding [segments](reth_primitives::static_file::StaticFileSegment), - /// according to their [stage checkpoints](StageCheckpoint): + /// Run [static file producer](StaticFileProducer) and [pruner](reth_prune::Pruner) to **move** + /// all data from the database to static files for corresponding + /// [segments](reth_primitives::static_file::StaticFileSegment), according to their [stage + /// checkpoints](StageCheckpoint): /// - [StaticFileSegment::Headers](reth_primitives::static_file::StaticFileSegment::Headers) -> /// [StageId::Headers] /// - [StaticFileSegment::Receipts](reth_primitives::static_file::StaticFileSegment::Receipts) @@ -248,22 +250,38 @@ where /// /// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the /// lock is occupied. - pub fn produce_static_files(&self) -> RethResult<()> { + pub fn move_to_static_files(&self) -> RethResult<()> { let mut static_file_producer = self.static_file_producer.lock(); - let provider = self.provider_factory.provider()?; - let targets = static_file_producer.get_static_file_targets(HighestStaticFiles { - headers: provider - .get_stage_checkpoint(StageId::Headers)? - .map(|checkpoint| checkpoint.block_number), - receipts: provider - .get_stage_checkpoint(StageId::Execution)? - .map(|checkpoint| checkpoint.block_number), - transactions: provider - .get_stage_checkpoint(StageId::Bodies)? - .map(|checkpoint| checkpoint.block_number), - })?; - static_file_producer.run(targets)?; + // Copies data from database to static files + let lowest_static_file_height = { + let provider = self.provider_factory.provider()?; + let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] + .into_iter() + .map(|stage| { + provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number)) + }) + .collect::, _>>()?; + + let targets = static_file_producer.get_static_file_targets(HighestStaticFiles { + headers: stages_checkpoints[0], + receipts: stages_checkpoints[1], + transactions: stages_checkpoints[2], + })?; + static_file_producer.run(targets)?; + stages_checkpoints.into_iter().min().expect("exists") + }; + + // Deletes data which has been copied to static files. + if let Some(prune_tip) = lowest_static_file_height { + // Run the pruner so we don't potentially end up with higher height in the database vs + // static files during a pipeline unwind + let mut pruner = PrunerBuilder::new(Default::default()) + .prune_delete_limit(usize::MAX) + .build(self.provider_factory.clone()); + + pruner.run(prune_tip)?; + } Ok(()) } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 1e2f73cbc9c64..c84e9d8cec239 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -34,10 +34,10 @@ use reth_db::mdbx::DatabaseArguments; /// A common provider that fetches data from a database or static file. /// /// This provider implements most provider or provider factory traits. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct ProviderFactory { /// Database - db: DB, + db: Arc, /// Chain spec chain_spec: Arc, /// Static File Provider @@ -52,7 +52,7 @@ impl ProviderFactory { static_files_path: PathBuf, ) -> RethResult> { Ok(Self { - db, + db: Arc::new(db), chain_spec, static_file_provider: StaticFileProvider::new(static_files_path)?, }) @@ -71,7 +71,7 @@ impl ProviderFactory { #[cfg(any(test, feature = "test-utils"))] /// Consumes Self and returns DB - pub fn into_db(self) -> DB { + pub fn into_db(self) -> Arc { self.db } } @@ -86,7 +86,7 @@ impl ProviderFactory { static_files_path: PathBuf, ) -> RethResult { Ok(ProviderFactory:: { - db: init_db(path, args).map_err(|e| RethError::Custom(e.to_string()))?, + db: Arc::new(init_db(path, args).map_err(|e| RethError::Custom(e.to_string()))?), chain_spec, static_file_provider: StaticFileProvider::new(static_files_path)?, }) @@ -558,6 +558,15 @@ impl PruneCheckpointReader for ProviderFactory { } } +impl Clone for ProviderFactory { + fn clone(&self) -> Self { + ProviderFactory { + db: Arc::clone(&self.db), + chain_spec: self.chain_spec.clone(), + static_file_provider: self.static_file_provider.clone(), + } + } +} #[cfg(test)] mod tests { use super::ProviderFactory; From e6fe864e70727ff376f490f1b56a01c25ef7ea5c Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 9 May 2024 14:26:25 -0400 Subject: [PATCH 526/700] fix: use --show-sdk-platform-version instead of os-version (#8185) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 470b918b2951d..91f65d2bcee71 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -66,7 +66,7 @@ jobs: if: matrix.configs.target == 'aarch64-apple-darwin' run: | echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV - echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-os-version)" >> $GITHUB_ENV + echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV - name: Build Reth run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} From a44e0857373cee447c3823608194ccac53509140 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 9 May 2024 16:40:42 -0400 Subject: [PATCH 527/700] fix: do not use cross for x86_64 apple darwin (#8189) --- Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Makefile b/Makefile index f19a3cd8cd58d..ada2149b81d8c 100644 --- a/Makefile +++ b/Makefile @@ -95,6 +95,7 @@ op-build-aarch64-unknown-linux-gnu: export JEMALLOC_SYS_WITH_LG_PAGE=16 # No jemalloc on Windows build-x86_64-pc-windows-gnu: FEATURES := $(filter-out jemalloc jemalloc-prof,$(FEATURES)) +op-build-x86_64-pc-windows-gnu: FEATURES := $(filter-out jemalloc jemalloc-prof,$(FEATURES)) # Note: The additional rustc compiler flags are for intrinsics needed by MDBX. # See: https://github.com/cross-rs/cross/wiki/FAQ#undefined-reference-with-build-std @@ -116,6 +117,10 @@ build-x86_64-apple-darwin: $(MAKE) build-native-x86_64-apple-darwin build-aarch64-apple-darwin: $(MAKE) build-native-aarch64-apple-darwin +op-build-x86_64-apple-darwin: + $(MAKE) op-build-native-x86_64-apple-darwin +op-build-aarch64-apple-darwin: + $(MAKE) op-build-native-aarch64-apple-darwin # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary From b86c6a5911491fac3258a994d8da837fe0869e9b Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 10 May 2024 12:18:24 +0200 Subject: [PATCH 528/700] avoid double function call in `validate_one` (#8194) --- crates/transaction-pool/src/validate/eth.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index b31a3af489e9f..abee8a5850901 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -195,11 +195,14 @@ where }; // Reject transactions over defined size to prevent DOS attacks - if transaction.size() > self.max_tx_input_bytes { - let size = transaction.size(); + let transaction_size = transaction.size(); + if transaction_size > self.max_tx_input_bytes { return TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::OversizedData(size, self.max_tx_input_bytes), + InvalidPoolTransactionError::OversizedData( + transaction_size, + self.max_tx_input_bytes, + ), ) } @@ -211,11 +214,14 @@ where } // Checks for gas limit - if transaction.gas_limit() > self.block_gas_limit { - let gas_limit = transaction.gas_limit(); + let transaction_gas_limit = transaction.gas_limit(); + if transaction_gas_limit > self.block_gas_limit { return TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::ExceedsGasLimit(gas_limit, self.block_gas_limit), + InvalidPoolTransactionError::ExceedsGasLimit( + transaction_gas_limit, + self.block_gas_limit, + ), ) } From a8bbab2470c08140f10d327989a1ac3122ca2211 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 10 May 2024 12:36:45 +0200 Subject: [PATCH 529/700] chore: implement traits directly for futures::Either (#8172) --- crates/consensus/beacon/src/engine/sync.rs | 8 ++-- .../consensus/beacon/src/engine/test_utils.rs | 10 ++--- crates/interfaces/src/p2p/either.rs | 43 ++++++------------- crates/node/builder/src/launch/mod.rs | 5 +-- 4 files changed, 25 insertions(+), 41 deletions(-) diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index f73c4b54edb04..09c6d208b6e55 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -434,7 +434,7 @@ mod tests { use assert_matches::assert_matches; use futures::poll; use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; - use reth_interfaces::{p2p::either::EitherDownloader, test_utils::TestFullBlockClient}; + use reth_interfaces::{p2p::either::Either, test_utils::TestFullBlockClient}; use reth_primitives::{ constants::ETHEREUM_BLOCK_GAS_LIMIT, stage::StageCheckpoint, BlockBody, ChainSpecBuilder, Header, PruneModes, SealedHeader, MAINNET, @@ -543,15 +543,15 @@ mod tests { self, pipeline: Pipeline, chain_spec: Arc, - ) -> EngineSyncController> + ) -> EngineSyncController> where DB: Database + 'static, Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, { let client = self .client - .map(EitherDownloader::Left) - .unwrap_or_else(|| EitherDownloader::Right(TestFullBlockClient::default())); + .map(Either::Left) + .unwrap_or_else(|| Either::Right(TestFullBlockClient::default())); EngineSyncController::new( pipeline, diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 6cad1b471842c..d9fd67c1308ab 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -17,7 +17,7 @@ use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::{either::Either, test_utils::MockExecutorProvider}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_interfaces::{ - p2p::{bodies::client::BodiesClient, either::EitherDownloader, headers::client::HeadersClient}, + p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, }; @@ -42,7 +42,7 @@ type DatabaseEnv = TempDatabase; type TestBeaconConsensusEngine = BeaconConsensusEngine< Arc, BlockchainProvider>, - Arc>, + Arc>, EthEngineTypes, >; @@ -111,7 +111,7 @@ impl TestEnv { } // TODO: add with_consensus in case we want to use the TestConsensus purposeful failure - this -// would require similar patterns to how we use with_client and the EitherDownloader +// would require similar patterns to how we use with_client and the downloader /// Represents either a real consensus engine, or a test consensus engine. #[derive(Debug, Default)] enum TestConsensusConfig { @@ -331,8 +331,8 @@ where // use either noop client or a user provided client (for example TestFullBlockClient) let client = Arc::new( self.client - .map(EitherDownloader::Left) - .unwrap_or_else(|| EitherDownloader::Right(NoopFullBlockClient::default())), + .map(Either::Left) + .unwrap_or_else(|| Either::Right(NoopFullBlockClient::default())), ); // use either test executor or real executor diff --git a/crates/interfaces/src/p2p/either.rs b/crates/interfaces/src/p2p/either.rs index af7f150189941..ed9d50c736f13 100644 --- a/crates/interfaces/src/p2p/either.rs +++ b/crates/interfaces/src/p2p/either.rs @@ -1,42 +1,35 @@ +//! Support for different download types. + use crate::p2p::{ bodies::client::BodiesClient, download::DownloadClient, headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; -use futures::future::Either; use reth_primitives::B256; -/// A downloader that combines two different downloaders/client implementations that have the same -/// associated types. -#[derive(Debug, Clone)] -pub enum EitherDownloader { - /// The first downloader variant - Left(A), - /// The second downloader variant - Right(B), -} +pub use futures::future::Either; -impl DownloadClient for EitherDownloader +impl DownloadClient for Either where A: DownloadClient, B: DownloadClient, { fn report_bad_message(&self, peer_id: reth_network_types::PeerId) { match self { - EitherDownloader::Left(a) => a.report_bad_message(peer_id), - EitherDownloader::Right(b) => b.report_bad_message(peer_id), + Either::Left(a) => a.report_bad_message(peer_id), + Either::Right(b) => b.report_bad_message(peer_id), } } fn num_connected_peers(&self) -> usize { match self { - EitherDownloader::Left(a) => a.num_connected_peers(), - EitherDownloader::Right(b) => b.num_connected_peers(), + Either::Left(a) => a.num_connected_peers(), + Either::Right(b) => b.num_connected_peers(), } } } -impl BodiesClient for EitherDownloader +impl BodiesClient for Either where A: BodiesClient, B: BodiesClient, @@ -49,17 +42,13 @@ where priority: Priority, ) -> Self::Output { match self { - EitherDownloader::Left(a) => { - Either::Left(a.get_block_bodies_with_priority(hashes, priority)) - } - EitherDownloader::Right(b) => { - Either::Right(b.get_block_bodies_with_priority(hashes, priority)) - } + Either::Left(a) => Either::Left(a.get_block_bodies_with_priority(hashes, priority)), + Either::Right(b) => Either::Right(b.get_block_bodies_with_priority(hashes, priority)), } } } -impl HeadersClient for EitherDownloader +impl HeadersClient for Either where A: HeadersClient, B: HeadersClient, @@ -72,12 +61,8 @@ where priority: Priority, ) -> Self::Output { match self { - EitherDownloader::Left(a) => { - Either::Left(a.get_headers_with_priority(request, priority)) - } - EitherDownloader::Right(b) => { - Either::Right(b.get_headers_with_priority(request, priority)) - } + Either::Left(a) => Either::Left(a.get_headers_with_priority(request, priority)), + Either::Right(b) => Either::Right(b.get_headers_with_priority(request, priority)), } } } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 221434758187c..ece149e31dc2f 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -19,7 +19,6 @@ use reth_blockchain_tree::{ }; use reth_consensus::Consensus; use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; -use reth_interfaces::p2p::either::EitherDownloader; use reth_network::NetworkEvents; use reth_node_api::{FullNodeComponents, FullNodeTypes}; use reth_node_core::{ @@ -327,7 +326,7 @@ where debug!(target: "reth::cli", "Spawning auto mine task"); ctx.task_executor().spawn(Box::pin(task)); - (pipeline, EitherDownloader::Left(client)) + (pipeline, Either::Left(client)) } else { let pipeline = crate::setup::build_networked_pipeline( ctx.node_config(), @@ -345,7 +344,7 @@ where ) .await?; - (pipeline, EitherDownloader::Right(network_client.clone())) + (pipeline, Either::Right(network_client.clone())) }; let pipeline_events = pipeline.events(); From efa5d41bf55922e696d69f8afd859093739e7b7b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 10 May 2024 13:23:54 +0200 Subject: [PATCH 530/700] chore: bump alloy 899fc51 (#8195) --- Cargo.lock | 108 +++++++++++++------------- Cargo.toml | 28 +++---- crates/rpc/rpc-types/src/relay/mod.rs | 10 +-- examples/beacon-api-sse/src/main.rs | 2 +- 4 files changed, 74 insertions(+), 74 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d9252332237e..caacf7c636149 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "c-kzg", "serde", ] @@ -177,11 +177,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "arbitrary", "c-kzg", "derive_more", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "serde", "serde_json", ] @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-primitives", "k256", "serde_json", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -366,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -426,19 +426,19 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "serde", ] [[package]] name = "alloy-rpc-types-beacon" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-primitives", "alloy-rpc-types-engine", "serde", @@ -448,14 +448,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -468,11 +468,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "serde", "serde_json", ] @@ -480,7 +480,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-primitives", "serde", @@ -500,7 +500,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-primitives", "async-trait", @@ -513,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -588,7 +588,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -606,7 +606,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -2976,7 +2976,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6581,8 +6581,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-primitives", "arbitrary", "bytes", @@ -6787,9 +6787,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7540,11 +7540,11 @@ name = "reth-primitives" version = "0.2.0-beta.7" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-trie", "arbitrary", "assert_matches", @@ -7805,7 +7805,7 @@ name = "reth-rpc-types" version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", "alloy-rpc-types-engine", @@ -7830,7 +7830,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7937,7 +7937,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "reth-primitives", "secp256k1", ] @@ -8071,10 +8071,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=7168ac5#7168ac55682fb420da7a82ed94bfb0c30a034113" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=c1b5dd0#c1b5dd0d85dd46ef5ec5258aebd24adc041d103a" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/Cargo.toml b/Cargo.toml index fe219f51bc747..469fc4f7edd5a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,7 +280,7 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "7168ac5" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "c1b5dd0" } # eth alloy-chains = "0.1.15" @@ -289,21 +289,21 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "77c1240" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "899fc51" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } # misc auto_impl = "1" diff --git a/crates/rpc/rpc-types/src/relay/mod.rs b/crates/rpc/rpc-types/src/relay/mod.rs index 35daa1b79b092..2a46d7ffbe45a 100644 --- a/crates/rpc/rpc-types/src/relay/mod.rs +++ b/crates/rpc/rpc-types/src/relay/mod.rs @@ -4,7 +4,7 @@ use crate::engine::{ BlobsBundleV1, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, }; use alloy_primitives::{Address, B256, U256}; -use alloy_rpc_types_beacon::beacon::{BlsPublicKey, BlsSignature}; +use alloy_rpc_types_beacon::{BlsPublicKey, BlsSignature}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; @@ -102,7 +102,7 @@ pub struct SignedBidSubmissionV1 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v1")] + #[serde(with = "alloy_rpc_types_beacon::payload::beacon_payload_v1")] pub execution_payload: ExecutionPayloadV1, /// The signature associated with the submission. pub signature: BlsSignature, @@ -116,7 +116,7 @@ pub struct SignedBidSubmissionV2 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v2")] + #[serde(with = "alloy_rpc_types_beacon::payload::beacon_payload_v2")] pub execution_payload: ExecutionPayloadV2, /// The signature associated with the submission. pub signature: BlsSignature, @@ -130,7 +130,7 @@ pub struct SignedBidSubmissionV3 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v3")] + #[serde(with = "alloy_rpc_types_beacon::payload::beacon_payload_v3")] pub execution_payload: ExecutionPayloadV3, /// The Deneb block bundle for this bid. pub blobs_bundle: BlobsBundleV1, @@ -144,7 +144,7 @@ pub struct SubmitBlockRequest { /// The BidTrace message associated with the block submission. pub message: BidTrace, /// The execution payload for the block submission. - #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload")] + #[serde(with = "alloy_rpc_types_beacon::payload::beacon_payload")] pub execution_payload: ExecutionPayload, /// The signature associated with the block submission. pub signature: BlsSignature, diff --git a/examples/beacon-api-sse/src/main.rs b/examples/beacon-api-sse/src/main.rs index 0cd4d4e78dd78..a2d74a77c21c4 100644 --- a/examples/beacon-api-sse/src/main.rs +++ b/examples/beacon-api-sse/src/main.rs @@ -17,7 +17,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] -use alloy_rpc_types_beacon::beacon::events::PayloadAttributesEvent; +use alloy_rpc_types_beacon::events::PayloadAttributesEvent; use clap::Parser; use futures_util::stream::StreamExt; use mev_share_sse::{client::EventStream, EventClient}; From 12cce00ec238a43d62b534e5485fd53c942f9651 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 10 May 2024 13:26:06 +0200 Subject: [PATCH 531/700] chore: move optimism payload crate to optimism folder (#8169) --- Cargo.toml | 6 +++--- crates/{payload/optimism => optimism/payload}/Cargo.toml | 0 .../{payload/optimism => optimism/payload}/src/builder.rs | 0 crates/{payload/optimism => optimism/payload}/src/error.rs | 0 crates/{payload/optimism => optimism/payload}/src/lib.rs | 0 .../{payload/optimism => optimism/payload}/src/payload.rs | 0 6 files changed, 3 insertions(+), 3 deletions(-) rename crates/{payload/optimism => optimism/payload}/Cargo.toml (100%) rename crates/{payload/optimism => optimism/payload}/src/builder.rs (100%) rename crates/{payload/optimism => optimism/payload}/src/error.rs (100%) rename crates/{payload/optimism => optimism/payload}/src/lib.rs (100%) rename crates/{payload/optimism => optimism/payload}/src/payload.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 469fc4f7edd5a..bdde970162e50 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,6 @@ members = [ "crates/payload/basic/", "crates/payload/builder/", "crates/payload/ethereum/", - "crates/payload/optimism/", "crates/payload/validator/", "crates/primitives/", "crates/prune/", @@ -52,8 +51,9 @@ members = [ "crates/ethereum/node", "crates/node/builder/", "crates/optimism/consensus", - "crates/optimism/node/", "crates/optimism/evm/", + "crates/optimism/node/", + "crates/optimism/payload/", "crates/node-core/", "crates/node/api/", "crates/stages/", @@ -238,7 +238,7 @@ reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } reth-exex = { path = "crates/exex" } -reth-optimism-payload-builder = { path = "crates/payload/optimism" } +reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-interfaces = { path = "crates/interfaces" } reth-ipc = { path = "crates/rpc/ipc" } reth-libmdbx = { path = "crates/storage/libmdbx-rs" } diff --git a/crates/payload/optimism/Cargo.toml b/crates/optimism/payload/Cargo.toml similarity index 100% rename from crates/payload/optimism/Cargo.toml rename to crates/optimism/payload/Cargo.toml diff --git a/crates/payload/optimism/src/builder.rs b/crates/optimism/payload/src/builder.rs similarity index 100% rename from crates/payload/optimism/src/builder.rs rename to crates/optimism/payload/src/builder.rs diff --git a/crates/payload/optimism/src/error.rs b/crates/optimism/payload/src/error.rs similarity index 100% rename from crates/payload/optimism/src/error.rs rename to crates/optimism/payload/src/error.rs diff --git a/crates/payload/optimism/src/lib.rs b/crates/optimism/payload/src/lib.rs similarity index 100% rename from crates/payload/optimism/src/lib.rs rename to crates/optimism/payload/src/lib.rs diff --git a/crates/payload/optimism/src/payload.rs b/crates/optimism/payload/src/payload.rs similarity index 100% rename from crates/payload/optimism/src/payload.rs rename to crates/optimism/payload/src/payload.rs From ef01d502387310462e2c312c39a3548d2f149cf3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 10 May 2024 13:26:44 +0200 Subject: [PATCH 532/700] chore: use engine types crate for types (#8170) --- Cargo.lock | 2 +- crates/storage/provider/Cargo.toml | 3 ++- crates/storage/provider/src/providers/mod.rs | 2 +- crates/storage/provider/src/traits/chain_info.rs | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index caacf7c636149..f9c1af70f1427 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7589,6 +7589,7 @@ name = "reth-provider" version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", + "alloy-rpc-types-engine", "assert_matches", "auto_impl", "dashmap", @@ -7605,7 +7606,6 @@ dependencies = [ "reth-metrics", "reth-nippy-jar", "reth-primitives", - "reth-rpc-types", "reth-trie", "revm", "strum", diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 672f6a7fc4894..1272a824c34b3 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -15,13 +15,14 @@ workspace = true # reth reth-primitives.workspace = true reth-interfaces.workspace = true -reth-rpc-types.workspace = true reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-codecs.workspace = true reth-evm.workspace = true +# ethereum +alloy-rpc-types-engine.workspace = true revm.workspace = true # async diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index bf94e32cf4acf..8a06f0c0d2044 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -60,8 +60,8 @@ mod chain_info; use chain_info::ChainInfoTracker; mod consistent_view; +use alloy_rpc_types_engine::ForkchoiceState; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; -use reth_rpc_types::engine::ForkchoiceState; /// The main type for interacting with the blockchain. /// diff --git a/crates/storage/provider/src/traits/chain_info.rs b/crates/storage/provider/src/traits/chain_info.rs index 5e6379f01c479..76eb7fd3fbe5e 100644 --- a/crates/storage/provider/src/traits/chain_info.rs +++ b/crates/storage/provider/src/traits/chain_info.rs @@ -1,5 +1,5 @@ +use alloy_rpc_types_engine::ForkchoiceState; use reth_primitives::SealedHeader; -use reth_rpc_types::engine::ForkchoiceState; use std::time::Instant; /// A type that can track updates related to fork choice updates. From bab96bedbc7c7720e73f172831697c462c8f5944 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 10 May 2024 19:57:23 +0200 Subject: [PATCH 533/700] fix: skip s value check in trace_filter recovery (#8200) --- crates/rpc/rpc/src/trace.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 7104409146f7c..0a1494e0e97ef 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -266,7 +266,7 @@ where let mut transaction_indices = HashSet::new(); let mut highest_matching_index = 0; for (tx_idx, tx) in block.body.iter().enumerate() { - let from = tx.recover_signer().ok_or(BlockError::InvalidSignature)?; + let from = tx.recover_signer_unchecked().ok_or(BlockError::InvalidSignature)?; let to = tx.to(); if matcher.matches(from, to) { let idx = tx_idx as u64; From e20cb918273f79072b9b85678abb3a144bed4289 Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Sat, 11 May 2024 19:11:41 +0530 Subject: [PATCH 534/700] Extract layers module from rpc crate (#8163) Co-authored-by: Emilia Hane --- Cargo.lock | 529 +++++++++--------- Cargo.toml | 2 + crates/e2e-test-utils/Cargo.toml | 1 + crates/e2e-test-utils/src/engine_api.rs | 2 +- crates/node-core/Cargo.toml | 1 + crates/node-core/src/args/rpc_server.rs | 6 +- crates/node-core/src/cli/config.rs | 6 +- crates/node-core/src/utils.rs | 2 +- crates/node/builder/Cargo.toml | 1 + crates/node/builder/src/launch/common.rs | 2 +- crates/node/builder/src/lib.rs | 2 + crates/node/builder/src/rpc.rs | 2 +- crates/rpc/rpc-builder/Cargo.toml | 1 + crates/rpc/rpc-builder/src/auth.rs | 7 +- crates/rpc/rpc-builder/src/lib.rs | 8 +- crates/rpc/rpc-builder/tests/it/auth.rs | 2 +- crates/rpc/rpc-builder/tests/it/utils.rs | 2 +- crates/rpc/rpc-layer/Cargo.toml | 29 + .../src}/auth_client_layer.rs | 0 .../layers => rpc-layer/src}/auth_layer.rs | 4 +- .../layers => rpc-layer/src}/jwt_secret.rs | 2 +- .../layers => rpc-layer/src}/jwt_validator.rs | 4 +- .../layers/mod.rs => rpc-layer/src/lib.rs} | 14 + crates/rpc/rpc/src/lib.rs | 12 +- 24 files changed, 352 insertions(+), 289 deletions(-) create mode 100644 crates/rpc/rpc-layer/Cargo.toml rename crates/rpc/{rpc/src/layers => rpc-layer/src}/auth_client_layer.rs (100%) rename crates/rpc/{rpc/src/layers => rpc-layer/src}/auth_layer.rs (98%) rename crates/rpc/{rpc/src/layers => rpc-layer/src}/jwt_secret.rs (99%) rename crates/rpc/{rpc/src/layers => rpc-layer/src}/jwt_validator.rs (96%) rename crates/rpc/{rpc/src/layers/mod.rs => rpc-layer/src/lib.rs} (62%) diff --git a/Cargo.lock b/Cargo.lock index f9c1af70f1427..f1a197b371128 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -71,7 +71,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.14", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" +source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -171,7 +171,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" +source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -222,7 +222,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" +source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -300,7 +300,7 @@ dependencies = [ "derive_arbitrary", "derive_more", "ethereum_ssz", - "getrandom 0.2.14", + "getrandom 0.2.15", "hex-literal", "itoa", "k256", @@ -360,7 +360,7 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" +source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -490,7 +490,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" +source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" dependencies = [ "alloy-primitives", "serde", @@ -541,7 +541,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "syn-solidity", "tiny-keccak", ] @@ -559,7 +559,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.60", + "syn 2.0.61", "syn-solidity", ] @@ -569,7 +569,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] @@ -591,7 +591,7 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" dependencies = [ "alloy-json-rpc", - "base64 0.22.0", + "base64 0.22.1", "futures-util", "futures-utils-wasm", "serde", @@ -628,7 +628,7 @@ dependencies = [ "arbitrary", "derive_arbitrary", "derive_more", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "nybbles", "proptest", "proptest-derive", @@ -660,47 +660,48 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -708,9 +709,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" +checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3" [[package]] name = "aquamarine" @@ -723,7 +724,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -909,9 +910,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e9eabd7a98fe442131a17c316bd9349c43695e49e730c3c8e12cfb5f4da2693" +checksum = "9c90a406b4495d129f00461241616194cb8a032c8d1c53c657f0961d5f8e0498" dependencies = [ "brotli", "flate2", @@ -967,7 +968,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -984,7 +985,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1022,14 +1023,14 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] name = "autocfg" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backon" @@ -1084,9 +1085,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -1151,7 +1152,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.60", + "syn 2.0.61", "which", ] @@ -1299,7 +1300,7 @@ dependencies = [ "cfg-if", "dashmap", "fast-float", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "icu_normalizer", "indexmap 2.2.6", "intrusive-collections", @@ -1334,7 +1335,7 @@ checksum = "c055ef3cd87ea7db014779195bc90c6adfc35de4902e3b2fe587adecbd384578" dependencies = [ "boa_macros", "boa_profiler", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "thin-vec", ] @@ -1346,7 +1347,7 @@ checksum = "0cacc9caf022d92195c827a3e5bf83f96089d4bfaff834b359ac7b6be46e9187" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "indexmap 2.2.6", "once_cell", "phf", @@ -1362,7 +1363,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "synstructure", ] @@ -1402,9 +1403,9 @@ dependencies = [ [[package]] name = "brotli" -version = "5.0.0" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19483b140a7ac7174d34b5a581b406c64f84da5409d3e09cf4fff604f9270e67" +checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1471,7 +1472,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1491,9 +1492,9 @@ dependencies = [ [[package]] name = "c-kzg" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3130f3d8717cc02e668a896af24984d5d5d4e8bf12e278e982e0f1bd88a0f9af" +checksum = "cdf100c4cea8f207e883ff91ca886d621d8a166cb04971dfaa9bb8fd99ed95df" dependencies = [ "blst", "cc", @@ -1529,7 +1530,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -1558,9 +1559,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.95" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" dependencies = [ "jobserver", "libc", @@ -1685,7 +1686,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1748,9 +1749,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "comfy-table" @@ -2127,7 +2128,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2284,7 +2285,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2317,7 +2318,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core 0.20.8", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2327,7 +2328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core 0.9.10", @@ -2335,15 +2336,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2351,9 +2352,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" dependencies = [ "data-encoding", "syn 1.0.109", @@ -2434,7 +2435,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2586,7 +2587,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2783,7 +2784,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2796,7 +2797,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2807,7 +2808,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2818,9 +2819,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -3108,9 +3109,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "miniz_oxide", @@ -3240,7 +3241,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -3330,9 +3331,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -3472,9 +3473,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -3487,7 +3488,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -3496,7 +3497,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -3734,7 +3735,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3820,7 +3821,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower", "tower-service", @@ -3850,7 +3851,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -4000,7 +4001,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -4140,7 +4141,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "serde", ] @@ -4235,7 +4236,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg 0.50.0", @@ -4268,6 +4269,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "itertools" version = "0.10.5" @@ -4312,9 +4319,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b0e68d9af1f066c06d6e2397583795b912d78537d7d907c561e82c13d69fa1" +checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4330,9 +4337,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92f254f56af1ae84815b9b1325094743dcf05b92abb5e94da2e81a35cff0cada" +checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" dependencies = [ "futures-channel", "futures-util", @@ -4354,9 +4361,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "274d68152c24aa78977243bb56f28d7946e6aa309945b37d33174a3f92d89a3a" +checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" dependencies = [ "anyhow", "async-trait", @@ -4380,9 +4387,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac13bc1e44cd00448a5ff485824a128629c945f02077804cb659c07a0ba41395" +checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" dependencies = [ "async-trait", "hyper 0.14.28", @@ -4400,22 +4407,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c326f9e95aeff7d707b2ffde72c22a52acc975ba1c48587776c02b90c4747a6" +checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0" dependencies = [ "heck 0.4.1", - "proc-macro-crate 3.1.0", + "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] name = "jsonrpsee-server" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b5bfbda5f8fb63f997102fd18f73e35e34c84c6dcdbdbbe72c6e48f6d2c959b" +checksum = "12d8b6a9674422a8572e0b0abb12feeb3f2aeda86528c80d0350c2bd0923ab41" dependencies = [ "futures-util", "http 0.2.12", @@ -4437,9 +4444,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dc828e537868d6b12bbb07ec20324909a22ced6efca0057c825c3e1126b2c6d" +checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" dependencies = [ "anyhow", "beef", @@ -4450,9 +4457,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cf8dcee48f383e24957e238240f997ec317ba358b4e6d2e8be3f745bcdabdb5" +checksum = "f448d8eacd945cc17b6c0b42c361531ca36a962ee186342a97cdb8fca679cd77" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4461,9 +4468,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f00abe918bf34b785f87459b9205790e5361a3f7437adb50e928dc243f27eb" +checksum = "58b9db2dfd5bb1194b0ce921504df9ceae210a345bc2f6c5a61432089bbab070" dependencies = [ "http 0.2.12", "jsonrpsee-client-transport", @@ -4526,9 +4533,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb8515fff80ed850aea4a1595f2e519c003e2a00a82fe168ebf5269196caf444" +checksum = "47a3633291834c4fbebf8673acbc1b04ec9d151418ff9b8e26dcd79129928758" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4551,9 +4558,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] name = "libffi" @@ -4600,7 +4607,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.14", + "getrandom 0.2.15", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -4687,9 +4694,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" +checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" dependencies = [ "either", "fnv", @@ -4698,6 +4705,7 @@ dependencies = [ "instant", "libp2p-core", "libp2p-identity", + "lru", "multistream-select", "once_cell", "rand 0.8.5", @@ -4835,7 +4843,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -4970,7 +4978,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -5094,7 +5102,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -5263,9 +5271,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ "num-bigint", "num-complex", @@ -5277,11 +5285,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", "serde", @@ -5289,9 +5296,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] @@ -5323,9 +5330,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -5334,11 +5341,10 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", "num-bigint", "num-integer", "num-traits", @@ -5346,9 +5352,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -5379,10 +5385,10 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -5474,9 +5480,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec", "bitvec", @@ -5489,11 +5495,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.109", @@ -5555,9 +5561,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pbkdf2" @@ -5584,7 +5590,7 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "serde", ] @@ -5596,9 +5602,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -5648,7 +5654,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -5677,7 +5683,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -5863,12 +5869,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -5884,15 +5890,6 @@ dependencies = [ "uint", ] -[[package]] -name = "proc-macro-crate" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" -dependencies = [ - "toml_edit 0.20.7", -] - [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -5928,9 +5925,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" dependencies = [ "unicode-ident", ] @@ -6137,7 +6134,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.15", ] [[package]] @@ -6231,7 +6228,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.15", "libredox", "thiserror", ] @@ -6286,7 +6283,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", "memchr", ] @@ -6334,7 +6331,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "bytes", "futures-core", "futures-util", @@ -6603,7 +6600,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -6805,6 +6802,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-rpc", + "reth-rpc-layer", "reth-tracing", "secp256k1", "serde_json", @@ -7129,7 +7127,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.60", + "syn 2.0.61", "trybuild", ] @@ -7308,6 +7306,7 @@ dependencies = [ "reth-prune", "reth-rpc", "reth-rpc-engine-api", + "reth-rpc-layer", "reth-stages", "reth-static-file", "reth-tasks", @@ -7363,6 +7362,7 @@ dependencies = [ "reth-rpc-api", "reth-rpc-builder", "reth-rpc-engine-api", + "reth-rpc-layer", "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", @@ -7757,6 +7757,7 @@ dependencies = [ "reth-rpc", "reth-rpc-api", "reth-rpc-engine-api", + "reth-rpc-layer", "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", @@ -7800,6 +7801,27 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-rpc-layer" +version = "0.2.0-beta.7" +dependencies = [ + "assert_matches", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.28", + "jsonrpsee", + "jsonwebtoken 8.3.0", + "pin-project", + "rand 0.8.5", + "reth-primitives", + "serde", + "tempfile", + "thiserror", + "tokio", + "tower", + "tracing", +] + [[package]] name = "reth-rpc-types" version = "0.2.0-beta.7" @@ -8128,7 +8150,7 @@ dependencies = [ "derive_more", "dyn-clone", "enumn", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "hex", "once_cell", "serde", @@ -8176,7 +8198,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.14", + "getrandom 0.2.15", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -8213,9 +8235,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1c77081a55300e016cb86f2864415b7518741879db925b8d488a0ee0d2da6bf" +checksum = "b26f4c25a604fcb3a1bcd96dd6ba37c93840de95de8198d94c0d571a74a804d1" dependencies = [ "bytemuck", "byteorder", @@ -8297,9 +8319,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -8328,7 +8350,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.22", + "semver 1.0.23", ] [[package]] @@ -8410,15 +8432,15 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.5.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-webpki" @@ -8443,9 +8465,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "092474d1a01ea8278f69e6a358998405fae5b8b963ddaeb2b0b04a128bf1dfb0" [[package]] name = "rusty-fork" @@ -8472,9 +8494,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "ryu-js" @@ -8493,9 +8515,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" +checksum = "76ad2bbb0ae5100a07b7a6f2ed7ab5fd0045551a4c507989b7a620046ea3efdc" dependencies = [ "sdd", ] @@ -8511,9 +8533,9 @@ dependencies = [ [[package]] name = "schnellru" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d" +checksum = "6b0cf7da6fc4477944d5529807234f66802fcb618fc62b9c05bedca7f9be6c43" dependencies = [ "ahash", "cfg-if", @@ -8578,11 +8600,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -8591,9 +8613,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -8610,9 +8632,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -8634,9 +8656,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.199" +version = "1.0.201" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" +checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" dependencies = [ "serde_derive", ] @@ -8652,20 +8674,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.199" +version = "1.0.201" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" +checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] name = "serde_json" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "indexmap 2.2.6", "itoa", @@ -8707,11 +8729,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c85f8e96d1d6857f13768fcbd895fcb06225510022a2774ed8b5150581847b0" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -8725,14 +8747,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8b3a576c4eb2924262d5951a3b737ccaf16c931e39a2810c36f9a7e25575557" +checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ "darling 0.20.8", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -8757,7 +8779,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -8820,9 +8842,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac61da6b35ad76b195eb4771210f947734321a8d81d7738e1580d953bc7a15e" +checksum = "a9b57fd861253bff08bb1919e995f90ba8f4889de2726091c8876f3a4e823b40" dependencies = [ "cc", "cfg-if", @@ -8973,9 +8995,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -9041,7 +9063,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -9099,7 +9121,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -9167,9 +9189,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.60" +version = "2.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" dependencies = [ "proc-macro2", "quote", @@ -9185,7 +9207,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -9202,7 +9224,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -9261,9 +9283,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-fuzz" -version = "5.0.0" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b139530208017f9d5a113784ed09cf1b8b22dee95eb99d51d89af1a3c2d6594e" +checksum = "a9082b62b0966dea2bf7bb254db102c3773d4f0c214a063b37a64f5497304cdc" dependencies = [ "serde", "test-fuzz-internal", @@ -9273,9 +9295,9 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "5.0.0" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e78ed8148311b6a02578dee5fd77600bf8805b77b2cb8382a9435348080985" +checksum = "292a371bafdc4ea286b6bc259c3f8dc82d3ef5cd64878fe4fffecbaa85666710" dependencies = [ "bincode", "cargo_metadata", @@ -9284,9 +9306,9 @@ dependencies = [ [[package]] name = "test-fuzz-macro" -version = "5.0.0" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f9bc8c69f276df24e4d1c082e52ea057544495916c4aa0708b82e47f55f364" +checksum = "abe1689311f7edc6bab4033a259a3c37510b41063e4b01e57970105c0c764428" dependencies = [ "darling 0.20.8", "itertools 0.12.1", @@ -9294,14 +9316,14 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] name = "test-fuzz-runtime" -version = "5.0.0" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b657ccc932fde05dbac5c460bffa40809937adaa5558863fe8174526e1b3bc9" +checksum = "2282e4b5879b5408b03064f54b1841ce2ea52710da3f2fea33ce05ce63edf455" dependencies = [ "hex", "num-traits", @@ -9318,22 +9340,22 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "579e9083ca58dd9dcf91a9923bb9054071b9ebbd800b342194c9feb0ee89fc18" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -9484,7 +9506,7 @@ dependencies = [ "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -9497,7 +9519,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -9535,9 +9557,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -9546,7 +9568,6 @@ dependencies = [ "pin-project-lite", "slab", "tokio", - "tracing", ] [[package]] @@ -9570,17 +9591,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml_edit" -version = "0.20.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap 2.2.6", - "toml_datetime", - "winnow 0.5.40", -] - [[package]] name = "toml_edit" version = "0.21.1" @@ -9602,7 +9612,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] @@ -9700,7 +9710,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -9899,12 +9909,11 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.91" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad7eb6319ebadebca3dacf1f85a93bc54b73dd81b9036795f73de7ddfe27d5a" +checksum = "4ddb747392ea12569d501a5bbca08852e4c8cd88b92566074b2243b8846f09e6" dependencies = [ "glob", - "once_cell", "serde", "serde_derive", "serde_json", @@ -10065,7 +10074,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.15", ] [[package]] @@ -10173,7 +10182,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "wasm-bindgen-shared", ] @@ -10207,7 +10216,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10468,9 +10477,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" dependencies = [ "memchr", ] @@ -10560,28 +10569,28 @@ checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -10601,7 +10610,7 @@ checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "synstructure", ] @@ -10622,7 +10631,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -10644,7 +10653,7 @@ checksum = "7b4e5997cbf58990550ef1f0e5124a05e47e1ebd33a84af25739be6031a62c20" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index bdde970162e50..0d531f5fc1d88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,7 @@ members = [ "crates/rpc/rpc-testing-util/", "crates/rpc/rpc-types/", "crates/rpc/rpc-types-compat/", + "crates/rpc/rpc-layer", "crates/engine-primitives/", "crates/ethereum/engine-primitives/", "crates/ethereum/node", @@ -264,6 +265,7 @@ reth-rpc-builder = { path = "crates/rpc/rpc-builder" } reth-rpc-engine-api = { path = "crates/rpc/rpc-engine-api" } reth-rpc-types = { path = "crates/rpc/rpc-types" } reth-rpc-types-compat = { path = "crates/rpc/rpc-types-compat" } +reth-rpc-layer = { path = "crates/rpc/rpc-layer" } reth-stages = { path = "crates/stages" } reth-stages-api = { path = "crates/stages-api" } reth-static-file = { path = "crates/static-file" } diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 03e0edb91c386..59424cac98fda 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -16,6 +16,7 @@ reth-node-ethereum.workspace = true reth-tracing.workspace = true reth-db.workspace = true reth-rpc.workspace = true +reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-provider.workspace = true reth-node-builder.workspace = true diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index fefd7d6ff6a91..66e8900323be4 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -13,7 +13,7 @@ use reth::{ }; use reth_payload_builder::PayloadId; use reth_primitives::B256; -use reth_rpc::AuthClientService; +use reth_rpc_layer::AuthClientService; use std::marker::PhantomData; /// Helper for engine api operations diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index ef5d63b3fffad..7637b3b2dcb1c 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -23,6 +23,7 @@ reth-rpc.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } +reth-rpc-layer.workspace = true reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node-core/src/args/rpc_server.rs index e19a88737435d..c464463171779 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node-core/src/args/rpc_server.rs @@ -20,9 +20,8 @@ use reth_provider::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, StateProviderFactory, }; -use reth_rpc::{ - eth::{cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig, RPC_DEFAULT_GAS_CAP}, - JwtError, JwtSecret, +use reth_rpc::eth::{ + cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig, RPC_DEFAULT_GAS_CAP, }; use reth_rpc_builder::{ auth::{AuthServerConfig, AuthServerHandle}, @@ -32,6 +31,7 @@ use reth_rpc_builder::{ RpcServerConfig, RpcServerHandle, ServerBuilder, TransportRpcModuleConfig, }; use reth_rpc_engine_api::EngineApi; +use reth_rpc_layer::{JwtError, JwtSecret}; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use std::{ diff --git a/crates/node-core/src/cli/config.rs b/crates/node-core/src/cli/config.rs index 1bce398ef2db1..4583832012834 100644 --- a/crates/node-core/src/cli/config.rs +++ b/crates/node-core/src/cli/config.rs @@ -2,14 +2,12 @@ use reth_network::protocol::IntoRlpxSubProtocol; use reth_primitives::Bytes; -use reth_rpc::{ - eth::{cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig}, - JwtError, JwtSecret, -}; +use reth_rpc::eth::{cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig}; use reth_rpc_builder::{ auth::AuthServerConfig, error::RpcError, EthConfig, Identity, IpcServerBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig, }; +use reth_rpc_layer::{JwtError, JwtSecret}; use reth_transaction_pool::PoolConfig; use std::{borrow::Cow, path::PathBuf, time::Duration}; diff --git a/crates/node-core/src/utils.rs b/crates/node-core/src/utils.rs index fc1467ce30eac..32dc509fa140a 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node-core/src/utils.rs @@ -13,7 +13,7 @@ use reth_primitives::{ fs, BlockHashOrNumber, ChainSpec, HeadersDirection, SealedBlock, SealedHeader, }; use reth_provider::BlockReader; -use reth_rpc::{JwtError, JwtSecret}; +use reth_rpc_layer::{JwtError, JwtSecret}; use std::{ env::VarError, path::{Path, PathBuf}, diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 26635e536deb9..e36ac2e2c396a 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -22,6 +22,7 @@ reth-provider.workspace = true reth-db.workspace = true reth-rpc-engine-api.workspace = true reth-rpc.workspace = true +reth-rpc-layer.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-network.workspace = true diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 043b587b891b7..8a5d8e519005b 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -19,7 +19,7 @@ use reth_node_core::{ use reth_primitives::{BlockNumber, Chain, ChainSpec, Head, PruneModes, B256}; use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; use reth_prune::PrunerBuilder; -use reth_rpc::JwtSecret; +use reth_rpc_layer::JwtSecret; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{error, info, warn}; diff --git a/crates/node/builder/src/lib.rs b/crates/node/builder/src/lib.rs index 11b56ba242f08..ce509658a741d 100644 --- a/crates/node/builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -46,3 +46,5 @@ pub use reth_node_core::node_config::NodeConfig; pub use reth_node_api::*; use aquamarine as _; + +use reth_rpc as _; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 3ac553fa3a544..795d02dfd3072 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -15,7 +15,7 @@ use reth_node_core::{ }, }; use reth_payload_builder::PayloadBuilderHandle; -use reth_rpc::JwtSecret; +use reth_rpc_layer::JwtSecret; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use std::{ diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 7e198c9989f6c..9087ff7c7ffc5 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -18,6 +18,7 @@ reth-network-api.workspace = true reth-provider.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true +reth-rpc-layer.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-evm.workspace = true diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 186d61332ab6b..c9d1bb92b215c 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -28,10 +28,13 @@ use reth_rpc::{ cache::EthStateCache, gas_oracle::GasPriceOracle, EthFilterConfig, FeeHistoryCache, FeeHistoryCacheConfig, }, - secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, EngineEthApi, EthApi, - EthFilter, EthSubscriptionIdProvider, JwtAuthValidator, JwtSecret, + EngineEthApi, EthApi, EthFilter, EthSubscriptionIdProvider, }; use reth_rpc_api::servers::*; +use reth_rpc_layer::{ + secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, JwtAuthValidator, + JwtSecret, +}; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use reth_transaction_pool::TransactionPool; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 7d86a00562c68..036127fdf28df 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -84,12 +84,12 @@ //! AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, //! ChangeSetReader, EvmEnvProvider, StateProviderFactory, //! }; -//! use reth_rpc::JwtSecret; //! use reth_rpc_api::EngineApiServer; //! use reth_rpc_builder::{ //! auth::AuthServerConfig, RethRpcModule, RpcModuleBuilder, RpcServerConfig, //! TransportRpcModuleConfig, //! }; +//! use reth_rpc_layer::JwtSecret; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! use tokio::try_join; @@ -187,11 +187,11 @@ use reth_rpc::{ traits::RawTransactionForwarder, EthBundle, FeeHistoryCache, }, - AdminApi, AuthLayer, Claims, DebugApi, EngineEthApi, EthApi, EthFilter, EthPubSub, - EthSubscriptionIdProvider, JwtAuthValidator, JwtSecret, NetApi, OtterscanApi, RPCApi, RethApi, - TraceApi, TxPoolApi, Web3Api, + AdminApi, DebugApi, EngineEthApi, EthApi, EthFilter, EthPubSub, EthSubscriptionIdProvider, + NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, TxPoolApi, Web3Api, }; use reth_rpc_api::servers::*; +use reth_rpc_layer::{AuthLayer, Claims, JwtAuthValidator, JwtSecret}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, TokioTaskExecutor, diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index b5416bf67743e..6809deffabd30 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -4,8 +4,8 @@ use crate::utils::launch_auth; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_primitives::{Block, U64}; -use reth_rpc::JwtSecret; use reth_rpc_api::clients::EngineApiClient; +use reth_rpc_layer::JwtSecret; use reth_rpc_types::engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, convert_block_to_payload_input_v2, diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 403e12a1b317e..a3272ac026a61 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -5,13 +5,13 @@ use reth_network_api::noop::NoopNetwork; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::MAINNET; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; -use reth_rpc::JwtSecret; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerConfig, AuthServerHandle}, RpcModuleBuilder, RpcModuleSelection, RpcServerConfig, RpcServerHandle, TransportRpcModuleConfig, }; use reth_rpc_engine_api::EngineApi; +use reth_rpc_layer::JwtSecret; use reth_tasks::TokioTaskExecutor; use reth_transaction_pool::test_utils::{TestPool, TestPoolBuilder}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; diff --git a/crates/rpc/rpc-layer/Cargo.toml b/crates/rpc/rpc-layer/Cargo.toml new file mode 100644 index 0000000000000..21aa3f04971fd --- /dev/null +++ b/crates/rpc/rpc-layer/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "reth-rpc-layer" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[dependencies] +http.workspace = true +hyper.workspace = true +tower.workspace = true +http-body.workspace = true +pin-project.workspace = true +tokio.workspace = true +jsonrpsee.workspace = true +jsonwebtoken = "8" +rand.workspace = true +reth-primitives.workspace = true +serde.workspace = true +thiserror.workspace = true +tempfile.workspace = true +assert_matches.workspace = true +tracing.workspace = true + +[lints] +workspace = true diff --git a/crates/rpc/rpc/src/layers/auth_client_layer.rs b/crates/rpc/rpc-layer/src/auth_client_layer.rs similarity index 100% rename from crates/rpc/rpc/src/layers/auth_client_layer.rs rename to crates/rpc/rpc-layer/src/auth_client_layer.rs diff --git a/crates/rpc/rpc/src/layers/auth_layer.rs b/crates/rpc/rpc-layer/src/auth_layer.rs similarity index 98% rename from crates/rpc/rpc/src/layers/auth_layer.rs rename to crates/rpc/rpc-layer/src/auth_layer.rs index ed22d607c1009..4803d2c987392 100644 --- a/crates/rpc/rpc/src/layers/auth_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_layer.rs @@ -18,7 +18,7 @@ use tower::{Layer, Service}; /// ```rust /// async fn build_layered_rpc_server() { /// use jsonrpsee::server::ServerBuilder; -/// use reth_rpc::{AuthLayer, JwtAuthValidator, JwtSecret}; +/// use reth_rpc_layer::{AuthLayer, JwtAuthValidator, JwtSecret}; /// use std::net::SocketAddr; /// /// const AUTH_PORT: u32 = 8551; @@ -167,7 +167,7 @@ mod tests { }; use super::AuthLayer; - use crate::{layers::jwt_secret::Claims, JwtAuthValidator, JwtError, JwtSecret}; + use crate::{jwt_secret::Claims, JwtAuthValidator, JwtError, JwtSecret}; const AUTH_PORT: u32 = 8551; const AUTH_ADDR: &str = "0.0.0.0"; diff --git a/crates/rpc/rpc/src/layers/jwt_secret.rs b/crates/rpc/rpc-layer/src/jwt_secret.rs similarity index 99% rename from crates/rpc/rpc/src/layers/jwt_secret.rs rename to crates/rpc/rpc-layer/src/jwt_secret.rs index a6b09e77496a5..b31cd27c63550 100644 --- a/crates/rpc/rpc/src/layers/jwt_secret.rs +++ b/crates/rpc/rpc-layer/src/jwt_secret.rs @@ -162,7 +162,7 @@ impl JwtSecret { /// and the key. /// /// ```rust - /// use reth_rpc::{Claims, JwtSecret}; + /// use reth_rpc_layer::{Claims, JwtSecret}; /// /// let my_claims = Claims { iat: 0, exp: None }; /// let secret = JwtSecret::random(); diff --git a/crates/rpc/rpc/src/layers/jwt_validator.rs b/crates/rpc/rpc-layer/src/jwt_validator.rs similarity index 96% rename from crates/rpc/rpc/src/layers/jwt_validator.rs rename to crates/rpc/rpc-layer/src/jwt_validator.rs index 86901accc7577..0f5124f9ac95d 100644 --- a/crates/rpc/rpc/src/layers/jwt_validator.rs +++ b/crates/rpc/rpc-layer/src/jwt_validator.rs @@ -4,7 +4,7 @@ use tracing::error; use crate::{AuthValidator, JwtError, JwtSecret}; /// Implements JWT validation logics and integrates -/// to an Http [`AuthLayer`][crate::layers::AuthLayer] +/// to an Http [`AuthLayer`][crate::AuthLayer] /// by implementing the [`AuthValidator`] trait. #[derive(Clone)] #[allow(missing_debug_implementations)] @@ -68,7 +68,7 @@ fn err_response(err: JwtError) -> Response { #[cfg(test)] mod tests { - use crate::layers::jwt_validator::get_bearer; + use crate::jwt_validator::get_bearer; use http::{header, HeaderMap}; #[test] diff --git a/crates/rpc/rpc/src/layers/mod.rs b/crates/rpc/rpc-layer/src/lib.rs similarity index 62% rename from crates/rpc/rpc/src/layers/mod.rs rename to crates/rpc/rpc-layer/src/lib.rs index 83a336e5f9452..41310c3059bcb 100644 --- a/crates/rpc/rpc/src/layers/mod.rs +++ b/crates/rpc/rpc-layer/src/lib.rs @@ -1,4 +1,18 @@ +//! Reth RPC testing utilities. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use assert_matches as _; use http::{HeaderMap, Response}; +use jsonrpsee as _; +use tempfile as _; +use tokio as _; mod auth_client_layer; mod auth_layer; diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index d68f8a0184ea5..e562ee5bfecd0 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -25,11 +25,17 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use http as _; +use http_body as _; +use hyper as _; +use jsonwebtoken as _; +use pin_project as _; +use tower as _; + mod admin; mod debug; mod engine; pub mod eth; -mod layers; mod net; mod otterscan; mod reth; @@ -41,10 +47,6 @@ pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthApiSpec, EthFilter, EthPubSub, EthSubscriptionIdProvider}; -pub use layers::{ - secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, AuthValidator, Claims, - JwtAuthValidator, JwtError, JwtSecret, -}; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; From 8288207cdcd85f4aa5d51ee6418bc53e4c5c64e4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 12 May 2024 11:12:25 +0000 Subject: [PATCH 535/700] chore(deps): weekly `cargo update` (#8205) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 112 ++++++++++++++++++++++++++--------------------------- 1 file changed, 56 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1a197b371128..c27f4b165704a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" +source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" +source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -222,7 +222,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" +source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -360,7 +360,7 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" +source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -490,7 +490,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#6c58a0a2f1b2325aeba6a359e5ea5f4faa08bda0" +source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" dependencies = [ "alloy-primitives", "serde", @@ -541,7 +541,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "syn-solidity", "tiny-keccak", ] @@ -559,7 +559,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.61", + "syn 2.0.63", "syn-solidity", ] @@ -724,7 +724,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -968,7 +968,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -985,7 +985,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1023,7 +1023,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1152,7 +1152,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.61", + "syn 2.0.63", "which", ] @@ -1363,7 +1363,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "synstructure", ] @@ -1472,7 +1472,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1686,7 +1686,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2128,7 +2128,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2285,7 +2285,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2318,7 +2318,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core 0.20.8", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2435,7 +2435,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2587,7 +2587,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2784,7 +2784,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2797,7 +2797,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2808,7 +2808,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -3241,7 +3241,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -3851,7 +3851,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -4001,7 +4001,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -4415,7 +4415,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -4978,7 +4978,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -5102,7 +5102,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -5388,7 +5388,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -5654,7 +5654,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -5683,7 +5683,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -5874,7 +5874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -6600,7 +6600,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -7127,7 +7127,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.61", + "syn 2.0.63", "trybuild", ] @@ -8680,7 +8680,7 @@ checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -8754,7 +8754,7 @@ dependencies = [ "darling 0.20.8", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -8779,7 +8779,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -9063,7 +9063,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -9121,7 +9121,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -9189,9 +9189,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.61" +version = "2.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" +checksum = "bf5be731623ca1a1fb7d8be6f261a3be6d3e2337b8a1f97be944d020c8fcb704" dependencies = [ "proc-macro2", "quote", @@ -9207,7 +9207,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -9224,7 +9224,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -9316,7 +9316,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -9355,7 +9355,7 @@ checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -9519,7 +9519,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -9710,7 +9710,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -10182,7 +10182,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "wasm-bindgen-shared", ] @@ -10216,7 +10216,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10569,7 +10569,7 @@ checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "synstructure", ] @@ -10590,7 +10590,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -10610,7 +10610,7 @@ checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "synstructure", ] @@ -10631,7 +10631,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -10653,7 +10653,7 @@ checksum = "7b4e5997cbf58990550ef1f0e5124a05e47e1ebd33a84af25739be6031a62c20" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] From 487f7e302b379e704517de2688962ad9a5bdc9c5 Mon Sep 17 00:00:00 2001 From: guha-rahul <52607971+guha-rahul@users.noreply.github.com> Date: Sun, 12 May 2024 16:58:26 +0530 Subject: [PATCH 536/700] refactor: move all alloy-compat code to a standalone module (#8192) Co-authored-by: Matthias Seitz --- crates/primitives/src/alloy_compat.rs | 201 +++++++++++++++++++++++ crates/primitives/src/block.rs | 45 ----- crates/primitives/src/header.rs | 47 ------ crates/primitives/src/lib.rs | 3 +- crates/primitives/src/transaction/mod.rs | 104 ------------ 5 files changed, 203 insertions(+), 197 deletions(-) create mode 100644 crates/primitives/src/alloy_compat.rs diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs new file mode 100644 index 0000000000000..8b4368a1211b4 --- /dev/null +++ b/crates/primitives/src/alloy_compat.rs @@ -0,0 +1,201 @@ +//! Common conversions from alloy types. + +use crate::{ + Block, Header, Transaction, TransactionSigned, TxEip1559, TxEip2930, TxEip4844, TxLegacy, + TxType, +}; +use alloy_primitives::TxKind; +use alloy_rlp::Error as RlpError; + +impl TryFrom for Block { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(block: alloy_rpc_types::Block) -> Result { + use alloy_rpc_types::ConversionError; + + let body = { + let transactions: Result, ConversionError> = match block + .transactions + { + alloy_rpc_types::BlockTransactions::Full(transactions) => transactions + .into_iter() + .map(|tx| { + let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; + Ok(TransactionSigned::from_transaction_and_signature( + tx.try_into()?, + crate::Signature { + r: signature.r, + s: signature.s, + odd_y_parity: signature + .y_parity + .unwrap_or(alloy_rpc_types::Parity(false)) + .0, + }, + )) + }) + .collect(), + alloy_rpc_types::BlockTransactions::Hashes(_) | + alloy_rpc_types::BlockTransactions::Uncle => { + return Err(ConversionError::MissingFullTransactions) + } + }; + transactions? + }; + + Ok(Self { + header: block.header.try_into()?, + body, + ommers: Default::default(), + withdrawals: block.withdrawals.map(Into::into), + }) + } +} + +impl TryFrom for Header { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(header: alloy_rpc_types::Header) -> Result { + use alloy_rpc_types::ConversionError; + + Ok(Self { + base_fee_per_gas: header + .base_fee_per_gas + .map(|base_fee_per_gas| { + base_fee_per_gas.try_into().map_err(ConversionError::BaseFeePerGasConversion) + }) + .transpose()?, + beneficiary: header.miner, + blob_gas_used: header + .blob_gas_used + .map(|blob_gas_used| { + blob_gas_used.try_into().map_err(ConversionError::BlobGasUsedConversion) + }) + .transpose()?, + difficulty: header.difficulty, + excess_blob_gas: header + .excess_blob_gas + .map(|excess_blob_gas| { + excess_blob_gas.try_into().map_err(ConversionError::ExcessBlobGasConversion) + }) + .transpose()?, + extra_data: header.extra_data, + gas_limit: header.gas_limit.try_into().map_err(ConversionError::GasLimitConversion)?, + gas_used: header.gas_used.try_into().map_err(ConversionError::GasUsedConversion)?, + logs_bloom: header.logs_bloom, + mix_hash: header.mix_hash.unwrap_or_default(), + nonce: u64::from_be_bytes(header.nonce.unwrap_or_default().0), + number: header.number.ok_or(ConversionError::MissingBlockNumber)?, + ommers_hash: header.uncles_hash, + parent_beacon_block_root: header.parent_beacon_block_root, + parent_hash: header.parent_hash, + receipts_root: header.receipts_root, + state_root: header.state_root, + timestamp: header.timestamp, + transactions_root: header.transactions_root, + withdrawals_root: header.withdrawals_root, + }) + } +} + +impl TryFrom for Transaction { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(tx: alloy_rpc_types::Transaction) -> Result { + use alloy_eips::eip2718::Eip2718Error; + use alloy_rpc_types::ConversionError; + + match tx.transaction_type.map(TryInto::try_into).transpose().map_err(|_| { + ConversionError::Eip2718Error(Eip2718Error::UnexpectedType( + tx.transaction_type.unwrap(), + )) + })? { + None | Some(TxType::Legacy) => { + // legacy + if tx.max_fee_per_gas.is_some() || tx.max_priority_fee_per_gas.is_some() { + return Err(ConversionError::Eip2718Error( + RlpError::Custom("EIP-1559 fields are present in a legacy transaction") + .into(), + )) + } + Ok(Transaction::Legacy(TxLegacy { + chain_id: tx.chain_id, + nonce: tx.nonce, + gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, + gas_limit: tx + .gas + .try_into() + .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + to: tx.to.map_or(TxKind::Create, TxKind::Call), + value: tx.value, + input: tx.input, + })) + } + Some(TxType::Eip2930) => { + // eip2930 + Ok(Transaction::Eip2930(TxEip2930 { + chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, + nonce: tx.nonce, + gas_limit: tx + .gas + .try_into() + .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + to: tx.to.map_or(TxKind::Create, TxKind::Call), + value: tx.value, + input: tx.input, + access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, + gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, + })) + } + Some(TxType::Eip1559) => { + // EIP-1559 + Ok(Transaction::Eip1559(TxEip1559 { + chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, + nonce: tx.nonce, + max_priority_fee_per_gas: tx + .max_priority_fee_per_gas + .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, + max_fee_per_gas: tx + .max_fee_per_gas + .ok_or(ConversionError::MissingMaxFeePerGas)?, + gas_limit: tx + .gas + .try_into() + .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + to: tx.to.map_or(TxKind::Create, TxKind::Call), + value: tx.value, + access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, + input: tx.input, + })) + } + Some(TxType::Eip4844) => { + // EIP-4844 + Ok(Transaction::Eip4844(TxEip4844 { + chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, + nonce: tx.nonce, + max_priority_fee_per_gas: tx + .max_priority_fee_per_gas + .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, + max_fee_per_gas: tx + .max_fee_per_gas + .ok_or(ConversionError::MissingMaxFeePerGas)?, + gas_limit: tx + .gas + .try_into() + .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + to: tx.to.map_or(TxKind::Create, TxKind::Call), + value: tx.value, + access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, + input: tx.input, + blob_versioned_hashes: tx + .blob_versioned_hashes + .ok_or(ConversionError::MissingBlobVersionedHashes)?, + max_fee_per_blob_gas: tx + .max_fee_per_blob_gas + .ok_or(ConversionError::MissingMaxFeePerBlobGas)?, + })) + } + #[cfg(feature = "optimism")] + Some(TxType::Deposit) => todo!(), + } + } +} diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 8a029dc05233f..467bca8166ec2 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -147,51 +147,6 @@ impl Deref for Block { } } -#[cfg(feature = "alloy-compat")] -impl TryFrom for Block { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(block: alloy_rpc_types::Block) -> Result { - use alloy_rpc_types::ConversionError; - - let body = { - let transactions: Result, ConversionError> = match block - .transactions - { - alloy_rpc_types::BlockTransactions::Full(transactions) => transactions - .into_iter() - .map(|tx| { - let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; - Ok(TransactionSigned::from_transaction_and_signature( - tx.try_into()?, - crate::Signature { - r: signature.r, - s: signature.s, - odd_y_parity: signature - .y_parity - .unwrap_or(alloy_rpc_types::Parity(false)) - .0, - }, - )) - }) - .collect(), - alloy_rpc_types::BlockTransactions::Hashes(_) | - alloy_rpc_types::BlockTransactions::Uncle => { - return Err(ConversionError::MissingFullTransactions) - } - }; - transactions? - }; - - Ok(Self { - header: block.header.try_into()?, - body, - ommers: Default::default(), - withdrawals: block.withdrawals.map(Into::into), - }) - } -} - /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct BlockWithSenders { diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index d0bd5baf865f6..db75e1b6d4c0c 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -485,53 +485,6 @@ impl Decodable for Header { } } -#[cfg(feature = "alloy-compat")] -impl TryFrom for Header { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(header: alloy_rpc_types::Header) -> Result { - use alloy_rpc_types::ConversionError; - - Ok(Self { - base_fee_per_gas: header - .base_fee_per_gas - .map(|base_fee_per_gas| { - base_fee_per_gas.try_into().map_err(ConversionError::BaseFeePerGasConversion) - }) - .transpose()?, - beneficiary: header.miner, - blob_gas_used: header - .blob_gas_used - .map(|blob_gas_used| { - blob_gas_used.try_into().map_err(ConversionError::BlobGasUsedConversion) - }) - .transpose()?, - difficulty: header.difficulty, - excess_blob_gas: header - .excess_blob_gas - .map(|excess_blob_gas| { - excess_blob_gas.try_into().map_err(ConversionError::ExcessBlobGasConversion) - }) - .transpose()?, - extra_data: header.extra_data, - gas_limit: header.gas_limit.try_into().map_err(ConversionError::GasLimitConversion)?, - gas_used: header.gas_used.try_into().map_err(ConversionError::GasUsedConversion)?, - logs_bloom: header.logs_bloom, - mix_hash: header.mix_hash.unwrap_or_default(), - nonce: u64::from_be_bytes(header.nonce.unwrap_or_default().0), - number: header.number.ok_or(ConversionError::MissingBlockNumber)?, - ommers_hash: header.uncles_hash, - parent_beacon_block_root: header.parent_beacon_block_root, - parent_hash: header.parent_hash, - receipts_root: header.receipts_root, - state_root: header.state_root, - timestamp: header.timestamp, - transactions_root: header.transactions_root, - withdrawals_root: header.withdrawals_root, - }) - } -} - /// Errors that can occur during header sanity checks. #[derive(thiserror::Error, Debug, PartialEq, Eq, Clone)] pub enum HeaderValidationError { diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 27c66e69e786f..71d264712b7d3 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod account; +#[cfg(feature = "alloy-compat")] +mod alloy_compat; pub mod basefee; mod block; mod chain; @@ -46,7 +48,6 @@ mod storage; pub mod transaction; pub mod trie; mod withdrawal; - pub use account::{Account, Bytecode}; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index eda139ffdf973..3117615e842d3 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -612,110 +612,6 @@ impl From for Transaction { } } -#[cfg(feature = "alloy-compat")] -impl TryFrom for Transaction { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: alloy_rpc_types::Transaction) -> Result { - use alloy_eips::eip2718::Eip2718Error; - use alloy_rpc_types::ConversionError; - - match tx.transaction_type.map(TryInto::try_into).transpose().map_err(|_| { - ConversionError::Eip2718Error(Eip2718Error::UnexpectedType( - tx.transaction_type.unwrap(), - )) - })? { - None | Some(TxType::Legacy) => { - // legacy - if tx.max_fee_per_gas.is_some() || tx.max_priority_fee_per_gas.is_some() { - return Err(ConversionError::Eip2718Error( - RlpError::Custom("EIP-1559 fields are present in a legacy transaction") - .into(), - )) - } - Ok(Transaction::Legacy(TxLegacy { - chain_id: tx.chain_id, - nonce: tx.nonce, - gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - input: tx.input, - })) - } - Some(TxType::Eip2930) => { - // eip2930 - Ok(Transaction::Eip2930(TxEip2930 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - input: tx.input, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, - })) - } - Some(TxType::Eip1559) => { - // EIP-1559 - Ok(Transaction::Eip1559(TxEip1559 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - input: tx.input, - })) - } - Some(TxType::Eip4844) => { - // EIP-4844 - Ok(Transaction::Eip4844(TxEip4844 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - input: tx.input, - blob_versioned_hashes: tx - .blob_versioned_hashes - .ok_or(ConversionError::MissingBlobVersionedHashes)?, - max_fee_per_blob_gas: tx - .max_fee_per_blob_gas - .ok_or(ConversionError::MissingMaxFeePerBlobGas)?, - })) - } - #[cfg(feature = "optimism")] - Some(TxType::Deposit) => todo!(), - } - } -} - impl Compact for Transaction { // Serializes the TxType to the buffer if necessary, returning 2 bits of the type as an // identifier instead of the length. From d9f9504dbde67019f8b5da3091bb8996dcb07e74 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Sun, 12 May 2024 12:38:34 +0100 Subject: [PATCH 537/700] chore: refactor `DefaultStages` to take `StageConfig` (#8173) --- bin/reth/src/commands/debug_cmd/execution.rs | 17 +-- bin/reth/src/commands/import.rs | 33 +---- bin/reth/src/commands/stage/run.rs | 60 ++++++-- bin/reth/src/commands/stage/unwind.rs | 29 +--- crates/config/src/config.rs | 13 ++ .../consensus/beacon/src/engine/test_utils.rs | 5 +- crates/node/builder/src/setup.rs | 59 +------- crates/stages/benches/criterion.rs | 8 +- crates/stages/src/lib.rs | 5 +- crates/stages/src/sets.rs | 130 ++++++++++++------ crates/stages/src/stages/execution.rs | 28 ++++ crates/stages/src/stages/hashing_account.rs | 16 +-- crates/stages/src/stages/hashing_storage.rs | 16 +-- .../src/stages/index_account_history.rs | 14 +- .../src/stages/index_storage_history.rs | 14 +- crates/stages/src/stages/sender_recovery.rs | 5 +- crates/stages/src/stages/tx_lookup.rs | 16 +-- 17 files changed, 245 insertions(+), 223 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 50e93dfbca836..8fecc928ab02c 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -35,7 +35,7 @@ use reth_provider::{ }; use reth_stages::{ sets::DefaultStages, - stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage}, + stages::{ExecutionStage, ExecutionStageThresholds}, Pipeline, StageSet, }; use reth_static_file::StaticFileProducer; @@ -109,6 +109,7 @@ impl Command { .into_task_with(task_executor); let stage_conf = &config.stages; + let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); let executor = block_executor!(self.chain.clone()); @@ -124,11 +125,9 @@ impl Command { header_downloader, body_downloader, executor.clone(), - stage_conf.etl.clone(), + stage_conf.clone(), + prune_modes.clone(), ) - .set(SenderRecoveryStage { - commit_threshold: stage_conf.sender_recovery.commit_threshold, - }) .set(ExecutionStage::new( executor, ExecutionStageThresholds { @@ -137,12 +136,8 @@ impl Command { max_cumulative_gas: None, max_duration: None, }, - stage_conf - .merkle - .clean_threshold - .max(stage_conf.account_hashing.clean_threshold) - .max(stage_conf.storage_hashing.clean_threshold), - config.prune.clone().map(|prune| prune.segments).unwrap_or_default(), + stage_conf.execution_external_clean_threshold(), + prune_modes, ExExManagerHandle::empty(), )), ) diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 354787f326c34..7d6b12fd8f089 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -21,7 +21,6 @@ use reth_downloaders::{ file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; -use reth_exex::ExExManagerHandle; use reth_interfaces::p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, @@ -33,11 +32,7 @@ use reth_provider::{ BlockNumReader, ChainSpecProvider, HeaderProvider, HeaderSyncMode, ProviderError, ProviderFactory, StageCheckpointReader, StaticFileProviderFactory, }; -use reth_stages::{ - prelude::*, - stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage}, - Pipeline, StageSet, -}; +use reth_stages::{prelude::*, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; @@ -273,29 +268,11 @@ where consensus.clone(), header_downloader, body_downloader, - executor.clone(), - config.stages.etl.clone(), - ) - .set(SenderRecoveryStage { - commit_threshold: config.stages.sender_recovery.commit_threshold, - }) - .set(ExecutionStage::new( executor, - ExecutionStageThresholds { - max_blocks: config.stages.execution.max_blocks, - max_changes: config.stages.execution.max_changes, - max_cumulative_gas: config.stages.execution.max_cumulative_gas, - max_duration: config.stages.execution.max_duration, - }, - config - .stages - .merkle - .clean_threshold - .max(config.stages.account_hashing.clean_threshold) - .max(config.stages.storage_hashing.clean_threshold), - config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), - ExExManagerHandle::empty(), - )) + config.stages.clone(), + PruneModes::default(), + ) + .builder() .disable_all_if(&StageId::STATE_REQUIRED, || should_exec), ) .build(provider_factory, static_file_producer); diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 59d26fc293069..d34b67db42a10 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -16,7 +16,10 @@ use crate::{ use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; -use reth_config::{config::EtlConfig, Config}; +use reth_config::{ + config::{EtlConfig, HashingConfig, SenderRecoveryConfig, TransactionLookupConfig}, + Config, +}; use reth_db::init_db; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; use reth_exex::ExExManagerHandle; @@ -165,6 +168,7 @@ impl Command { Some(self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(data_dir.data_dir()))), self.etl_file_size.unwrap_or(EtlConfig::default_file_size()), ); + let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); let (mut exec_stage, mut unwind_stage): (Box>, Option>>) = match self.stage { @@ -222,7 +226,12 @@ impl Command { ); (Box::new(stage), None) } - StageEnum::Senders => (Box::new(SenderRecoveryStage::new(batch_size)), None), + StageEnum::Senders => ( + Box::new(SenderRecoveryStage::new(SenderRecoveryConfig { + commit_threshold: batch_size, + })), + None, + ), StageEnum::Execution => { let executor = block_executor!(self.chain.clone()); ( @@ -235,31 +244,52 @@ impl Command { max_duration: None, }, config.stages.merkle.clean_threshold, - config.prune.map(|prune| prune.segments).unwrap_or_default(), + prune_modes, ExExManagerHandle::empty(), )), None, ) } - StageEnum::TxLookup => { - (Box::new(TransactionLookupStage::new(batch_size, etl_config, None)), None) - } - StageEnum::AccountHashing => { - (Box::new(AccountHashingStage::new(1, batch_size, etl_config)), None) - } - StageEnum::StorageHashing => { - (Box::new(StorageHashingStage::new(1, batch_size, etl_config)), None) - } + StageEnum::TxLookup => ( + Box::new(TransactionLookupStage::new( + TransactionLookupConfig { chunk_size: batch_size }, + etl_config, + prune_modes.transaction_lookup, + )), + None, + ), + StageEnum::AccountHashing => ( + Box::new(AccountHashingStage::new( + HashingConfig { clean_threshold: 1, commit_threshold: batch_size }, + etl_config, + )), + None, + ), + StageEnum::StorageHashing => ( + Box::new(StorageHashingStage::new( + HashingConfig { clean_threshold: 1, commit_threshold: batch_size }, + etl_config, + )), + None, + ), StageEnum::Merkle => ( - Box::new(MerkleStage::default_execution()), + Box::new(MerkleStage::new_execution(config.stages.merkle.clean_threshold)), Some(Box::new(MerkleStage::default_unwind())), ), StageEnum::AccountHistory => ( - Box::new(IndexAccountHistoryStage::default().with_etl_config(etl_config)), + Box::new(IndexAccountHistoryStage::new( + config.stages.index_account_history, + etl_config, + prune_modes.account_history, + )), None, ), StageEnum::StorageHistory => ( - Box::new(IndexStorageHistoryStage::default().with_etl_config(etl_config)), + Box::new(IndexStorageHistoryStage::new( + config.stages.index_storage_history, + etl_config, + prune_modes.storage_history, + )), None, ), _ => return Ok(()), diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 1f0c7fc4569fe..d2ebe70db1850 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -15,11 +15,7 @@ use reth_provider::{ }; use reth_stages::{ sets::DefaultStages, - stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, IndexAccountHistoryStage, - IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, - TransactionLookupStage, - }, + stages::{ExecutionStage, ExecutionStageThresholds}, Pipeline, StageSet, }; use reth_static_file::StaticFileProducer; @@ -133,6 +129,7 @@ impl Command { let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let stage_conf = &config.stages; + let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); let executor = block_executor!(provider_factory.chain_spec()); @@ -148,11 +145,9 @@ impl Command { NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), executor.clone(), - stage_conf.etl.clone(), + stage_conf.clone(), + prune_modes.clone(), ) - .set(SenderRecoveryStage { - commit_threshold: stage_conf.sender_recovery.commit_threshold, - }) .set(ExecutionStage::new( executor, ExecutionStageThresholds { @@ -161,20 +156,10 @@ impl Command { max_cumulative_gas: None, max_duration: None, }, - stage_conf - .merkle - .clean_threshold - .max(stage_conf.account_hashing.clean_threshold) - .max(stage_conf.storage_hashing.clean_threshold), - config.prune.clone().map(|prune| prune.segments).unwrap_or_default(), + stage_conf.execution_external_clean_threshold(), + prune_modes, ExExManagerHandle::empty(), - )) - .set(AccountHashingStage::default()) - .set(StorageHashingStage::default()) - .set(MerkleStage::default_unwind()) - .set(TransactionLookupStage::default()) - .set(IndexAccountHistoryStage::default()) - .set(IndexStorageHistoryStage::default()), + )), ) .build( provider_factory.clone(), diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index f6537a04c79f7..aa8b7ee09ab14 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -96,6 +96,19 @@ pub struct StageConfig { pub etl: EtlConfig, } +impl StageConfig { + /// The highest threshold (in number of blocks) for switching between incremental and full + /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is + /// required to figure out if can prune or not changesets on subsequent pipeline runs during + /// `ExecutionStage` + pub fn execution_external_clean_threshold(&self) -> u64 { + self.merkle + .clean_threshold + .max(self.account_hashing.clean_threshold) + .max(self.storage_hashing.clean_threshold) + } +} + /// Header stage configuration. #[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)] #[serde(default)] diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index d9fd67c1308ab..13ffd0c4fff78 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -6,7 +6,7 @@ use crate::{ use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; -use reth_config::config::EtlConfig; +use reth_config::config::StageConfig; use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; use reth_downloaders::{ @@ -375,7 +375,8 @@ where header_downloader, body_downloader, executor_factory.clone(), - EtlConfig::default(), + StageConfig::default(), + PruneModes::default(), )) } }; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 8033ab1c68b49..3314891fe9695 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -18,15 +18,7 @@ use reth_node_core::{ primitives::{BlockNumber, B256}, }; use reth_provider::{HeaderSyncMode, ProviderFactory}; -use reth_stages::{ - prelude::DefaultStages, - stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, IndexAccountHistoryStage, - IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, - TransactionLookupStage, - }, - Pipeline, StageSet, -}; +use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::debug; @@ -131,56 +123,19 @@ where header_downloader, body_downloader, executor.clone(), - stage_config.etl.clone(), + stage_config.clone(), + prune_modes.clone(), ) - .set(SenderRecoveryStage { - commit_threshold: stage_config.sender_recovery.commit_threshold, - }) .set( ExecutionStage::new( executor, - ExecutionStageThresholds { - max_blocks: stage_config.execution.max_blocks, - max_changes: stage_config.execution.max_changes, - max_cumulative_gas: stage_config.execution.max_cumulative_gas, - max_duration: stage_config.execution.max_duration, - }, - stage_config - .merkle - .clean_threshold - .max(stage_config.account_hashing.clean_threshold) - .max(stage_config.storage_hashing.clean_threshold), - prune_modes.clone(), + stage_config.execution.into(), + stage_config.execution_external_clean_threshold(), + prune_modes, exex_manager_handle, ) .with_metrics_tx(metrics_tx), - ) - .set(AccountHashingStage::new( - stage_config.account_hashing.clean_threshold, - stage_config.account_hashing.commit_threshold, - stage_config.etl.clone(), - )) - .set(StorageHashingStage::new( - stage_config.storage_hashing.clean_threshold, - stage_config.storage_hashing.commit_threshold, - stage_config.etl.clone(), - )) - .set(MerkleStage::new_execution(stage_config.merkle.clean_threshold)) - .set(TransactionLookupStage::new( - stage_config.transaction_lookup.chunk_size, - stage_config.etl.clone(), - prune_modes.transaction_lookup, - )) - .set(IndexAccountHistoryStage::new( - stage_config.index_account_history.commit_threshold, - prune_modes.account_history, - stage_config.etl.clone(), - )) - .set(IndexStorageHistoryStage::new( - stage_config.index_storage_history.commit_threshold, - prune_modes.storage_history, - stage_config.etl.clone(), - )), + ), ) .build(provider_factory, static_file_producer); diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index 98b97462b9a61..976723dcd2d74 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -2,7 +2,7 @@ use criterion::{criterion_main, measurement::WallTime, BenchmarkGroup, Criterion}; #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; -use reth_config::config::EtlConfig; +use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_primitives::{stage::StageCheckpoint, BlockNumber}; @@ -87,7 +87,11 @@ fn transaction_lookup(c: &mut Criterion, runtime: &Runtime) { let mut group = c.benchmark_group("Stages"); // don't need to run each stage for that many times group.sample_size(10); - let stage = TransactionLookupStage::new(DEFAULT_NUM_BLOCKS, EtlConfig::default(), None); + let stage = TransactionLookupStage::new( + TransactionLookupConfig { chunk_size: DEFAULT_NUM_BLOCKS }, + EtlConfig::default(), + None, + ); let db = setup::txs_testdata(DEFAULT_NUM_BLOCKS); diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 2c6aaff251063..370dd18aca6d5 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -28,7 +28,7 @@ //! # use reth_provider::HeaderSyncMode; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; -//! # use reth_config::config::EtlConfig; +//! # use reth_config::config::StageConfig; //! # use reth_consensus::Consensus; //! # use reth_consensus::test_utils::TestConsensus; //! # @@ -62,7 +62,8 @@ //! headers_downloader, //! bodies_downloader, //! executor_provider, -//! EtlConfig::default(), +//! StageConfig::default(), +//! PruneModes::default(), //! )) //! .build(provider_factory, static_file_producer); //! ``` diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 7ec85170fc0f3..e8257047e5825 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -17,7 +17,7 @@ //! # use reth_provider::StaticFileProviderFactory; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; -//! # use reth_config::config::EtlConfig; +//! # use reth_config::config::StageConfig; //! # use reth_evm::execute::BlockExecutorProvider; //! //! # fn create(exec: impl BlockExecutorProvider) { @@ -30,7 +30,7 @@ //! ); //! // Build a pipeline with all offline stages. //! let pipeline = Pipeline::builder() -//! .add_stages(OfflineStages::new(exec, EtlConfig::default())) +//! .add_stages(OfflineStages::new(exec, StageConfig::default(), PruneModes::default())) //! .build(provider_factory, static_file_producer); //! //! # } @@ -43,13 +43,14 @@ use crate::{ }, StageSet, StageSetBuilder, }; -use reth_config::config::EtlConfig; +use reth_config::config::StageConfig; use reth_consensus::Consensus; use reth_db::database::Database; use reth_evm::execute::BlockExecutorProvider; use reth_interfaces::p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, }; +use reth_primitives::PruneModes; use reth_provider::{HeaderSyncGapProvider, HeaderSyncMode}; use std::sync::Arc; @@ -80,12 +81,15 @@ pub struct DefaultStages { online: OnlineStages, /// Executor factory needs for execution stage executor_factory: EF, - /// ETL configuration - etl_config: EtlConfig, + /// Configuration for each stage in the pipeline + stages_config: StageConfig, + /// Prune configuration for every segment that can be pruned + prune_modes: PruneModes, } impl DefaultStages { /// Create a new set of default stages with default values. + #[allow(clippy::too_many_arguments)] pub fn new( provider: Provider, header_mode: HeaderSyncMode, @@ -93,7 +97,8 @@ impl DefaultStages { header_downloader: H, body_downloader: B, executor_factory: E, - etl_config: EtlConfig, + stages_config: StageConfig, + prune_modes: PruneModes, ) -> Self where E: BlockExecutorProvider, @@ -105,10 +110,11 @@ impl DefaultStages { consensus, header_downloader, body_downloader, - etl_config.clone(), + stages_config.clone(), ), executor_factory, - etl_config, + stages_config, + prune_modes, } } } @@ -121,11 +127,12 @@ where pub fn add_offline_stages( default_offline: StageSetBuilder, executor_factory: E, - etl_config: EtlConfig, + stages_config: StageConfig, + prune_modes: PruneModes, ) -> StageSetBuilder { StageSetBuilder::default() .add_set(default_offline) - .add_set(OfflineStages::new(executor_factory, etl_config)) + .add_set(OfflineStages::new(executor_factory, stages_config, prune_modes)) .add_stage(FinishStage) } } @@ -139,7 +146,12 @@ where DB: Database + 'static, { fn builder(self) -> StageSetBuilder { - Self::add_offline_stages(self.online.builder(), self.executor_factory, self.etl_config) + Self::add_offline_stages( + self.online.builder(), + self.executor_factory, + self.stages_config.clone(), + self.prune_modes, + ) } } @@ -159,8 +171,8 @@ pub struct OnlineStages { header_downloader: H, /// The block body downloader body_downloader: B, - /// ETL configuration - etl_config: EtlConfig, + /// Configuration for each stage in the pipeline + stages_config: StageConfig, } impl OnlineStages { @@ -171,9 +183,9 @@ impl OnlineStages { consensus: Arc, header_downloader: H, body_downloader: B, - etl_config: EtlConfig, + stages_config: StageConfig, ) -> Self { - Self { provider, header_mode, consensus, header_downloader, body_downloader, etl_config } + Self { provider, header_mode, consensus, header_downloader, body_downloader, stages_config } } } @@ -198,7 +210,7 @@ where mode: HeaderSyncMode, header_downloader: H, consensus: Arc, - etl_config: EtlConfig, + stages_config: StageConfig, ) -> StageSetBuilder { StageSetBuilder::default() .add_stage(HeaderStage::new( @@ -206,7 +218,7 @@ where header_downloader, mode, consensus.clone(), - etl_config, + stages_config.etl, )) .add_stage(bodies) } @@ -226,7 +238,7 @@ where self.header_downloader, self.header_mode, self.consensus.clone(), - self.etl_config.clone(), + self.stages_config.etl.clone(), )) .add_stage(BodyStage::new(self.body_downloader)) } @@ -244,14 +256,16 @@ where pub struct OfflineStages { /// Executor factory needs for execution stage pub executor_factory: EF, - /// ETL configuration - etl_config: EtlConfig, + /// Configuration for each stage in the pipeline + stages_config: StageConfig, + /// Prune configuration for every segment that can be pruned + prune_modes: PruneModes, } impl OfflineStages { /// Create a new set of offline stages with default values. - pub fn new(executor_factory: EF, etl_config: EtlConfig) -> Self { - Self { executor_factory, etl_config } + pub fn new(executor_factory: EF, stages_config: StageConfig, prune_modes: PruneModes) -> Self { + Self { executor_factory, stages_config, prune_modes } } } @@ -261,10 +275,17 @@ where DB: Database, { fn builder(self) -> StageSetBuilder { - ExecutionStages::new(self.executor_factory) - .builder() - .add_set(HashingStages { etl_config: self.etl_config.clone() }) - .add_set(HistoryIndexingStages { etl_config: self.etl_config }) + ExecutionStages::new( + self.executor_factory, + self.stages_config.clone(), + self.prune_modes.clone(), + ) + .builder() + .add_set(HashingStages { stages_config: self.stages_config.clone() }) + .add_set(HistoryIndexingStages { + stages_config: self.stages_config.clone(), + prune_modes: self.prune_modes, + }) } } @@ -274,12 +295,16 @@ where pub struct ExecutionStages { /// Executor factory that will create executors. executor_factory: E, + /// Configuration for each stage in the pipeline + stages_config: StageConfig, + /// Prune configuration for every segment that can be pruned + prune_modes: PruneModes, } impl ExecutionStages { /// Create a new set of execution stages with default values. - pub fn new(executor_factory: E) -> Self { - Self { executor_factory } + pub fn new(executor_factory: E, stages_config: StageConfig, prune_modes: PruneModes) -> Self { + Self { executor_factory, stages_config, prune_modes } } } @@ -290,8 +315,13 @@ where { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() - .add_stage(SenderRecoveryStage::default()) - .add_stage(ExecutionStage::new_with_executor(self.executor_factory)) + .add_stage(SenderRecoveryStage::new(self.stages_config.sender_recovery)) + .add_stage(ExecutionStage::from_config( + self.executor_factory, + self.stages_config.execution, + self.stages_config.execution_external_clean_threshold(), + self.prune_modes, + )) } } @@ -299,17 +329,23 @@ where #[derive(Debug, Default)] #[non_exhaustive] pub struct HashingStages { - /// ETL configuration - etl_config: EtlConfig, + /// Configuration for each stage in the pipeline + stages_config: StageConfig, } impl StageSet for HashingStages { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(MerkleStage::default_unwind()) - .add_stage(AccountHashingStage::default().with_etl_config(self.etl_config.clone())) - .add_stage(StorageHashingStage::default().with_etl_config(self.etl_config)) - .add_stage(MerkleStage::default_execution()) + .add_stage(AccountHashingStage::new( + self.stages_config.account_hashing, + self.stages_config.etl.clone(), + )) + .add_stage(StorageHashingStage::new( + self.stages_config.storage_hashing, + self.stages_config.etl.clone(), + )) + .add_stage(MerkleStage::new_execution(self.stages_config.merkle.clean_threshold)) } } @@ -317,15 +353,29 @@ impl StageSet for HashingStages { #[derive(Debug, Default)] #[non_exhaustive] pub struct HistoryIndexingStages { - /// ETL configuration - etl_config: EtlConfig, + /// Configuration for each stage in the pipeline + stages_config: StageConfig, + /// Prune configuration for every segment that can be pruned + prune_modes: PruneModes, } impl StageSet for HistoryIndexingStages { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() - .add_stage(TransactionLookupStage::default().with_etl_config(self.etl_config.clone())) - .add_stage(IndexStorageHistoryStage::default().with_etl_config(self.etl_config.clone())) - .add_stage(IndexAccountHistoryStage::default().with_etl_config(self.etl_config)) + .add_stage(TransactionLookupStage::new( + self.stages_config.transaction_lookup, + self.stages_config.etl.clone(), + self.prune_modes.transaction_lookup, + )) + .add_stage(IndexStorageHistoryStage::new( + self.stages_config.index_storage_history, + self.stages_config.etl.clone(), + self.prune_modes.account_history, + )) + .add_stage(IndexAccountHistoryStage::new( + self.stages_config.index_account_history, + self.stages_config.etl.clone(), + self.prune_modes.storage_history, + )) } } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 9d8cf6ac66387..0f933cea78221 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -1,5 +1,6 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; use num_traits::Zero; +use reth_config::config::ExecutionConfig; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; @@ -111,6 +112,22 @@ impl ExecutionStage { ) } + /// Create new instance of [ExecutionStage] from configuration. + pub fn from_config( + executor_provider: E, + config: ExecutionConfig, + external_clean_threshold: u64, + prune_modes: PruneModes, + ) -> Self { + Self::new( + executor_provider, + config.into(), + external_clean_threshold, + prune_modes, + ExExManagerHandle::empty(), + ) + } + /// Set the metric events sender. pub fn with_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { self.metrics_tx = Some(metrics_tx); @@ -540,6 +557,17 @@ impl ExecutionStageThresholds { } } +impl From for ExecutionStageThresholds { + fn from(config: ExecutionConfig) -> Self { + ExecutionStageThresholds { + max_blocks: config.max_blocks, + max_changes: config.max_changes, + max_cumulative_gas: config.max_cumulative_gas, + max_duration: config.max_duration, + } + } +} + /// Returns a `StaticFileProviderRWRefMut` static file producer after performing a consistency /// check. /// diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 051b6a85f9321..1a63f6d893c65 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -1,5 +1,5 @@ use itertools::Itertools; -use reth_config::config::EtlConfig; +use reth_config::config::{EtlConfig, HashingConfig}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, @@ -44,14 +44,12 @@ pub struct AccountHashingStage { impl AccountHashingStage { /// Create new instance of [AccountHashingStage]. - pub fn new(clean_threshold: u64, commit_threshold: u64, etl_config: EtlConfig) -> Self { - Self { clean_threshold, commit_threshold, etl_config } - } - - /// Set the ETL configuration to use. - pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { - self.etl_config = etl_config; - self + pub fn new(config: HashingConfig, etl_config: EtlConfig) -> Self { + Self { + clean_threshold: config.clean_threshold, + commit_threshold: config.commit_threshold, + etl_config, + } } } diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 97da1278d8c0a..97f9154c3829b 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -1,5 +1,5 @@ use itertools::Itertools; -use reth_config::config::EtlConfig; +use reth_config::config::{EtlConfig, HashingConfig}; use reth_db::{ codecs::CompactU256, cursor::{DbCursorRO, DbDupCursorRW}, @@ -45,14 +45,12 @@ pub struct StorageHashingStage { impl StorageHashingStage { /// Create new instance of [StorageHashingStage]. - pub fn new(clean_threshold: u64, commit_threshold: u64, etl_config: EtlConfig) -> Self { - Self { clean_threshold, commit_threshold, etl_config } - } - - /// Set the ETL configuration to use. - pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { - self.etl_config = etl_config; - self + pub fn new(config: HashingConfig, etl_config: EtlConfig) -> Self { + Self { + clean_threshold: config.clean_threshold, + commit_threshold: config.commit_threshold, + etl_config, + } } } diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index d4524065127d4..6c313f0d3e005 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -1,5 +1,5 @@ use super::{collect_history_indices, load_history_indices}; -use reth_config::config::EtlConfig; +use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db::{ database::Database, models::ShardedKey, table::Decode, tables, transaction::DbTxMut, }; @@ -31,17 +31,11 @@ pub struct IndexAccountHistoryStage { impl IndexAccountHistoryStage { /// Create new instance of [IndexAccountHistoryStage]. pub fn new( - commit_threshold: u64, - prune_mode: Option, + config: IndexHistoryConfig, etl_config: EtlConfig, + prune_mode: Option, ) -> Self { - Self { commit_threshold, prune_mode, etl_config } - } - - /// Set the ETL configuration to use. - pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { - self.etl_config = etl_config; - self + Self { commit_threshold: config.commit_threshold, etl_config, prune_mode } } } diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index 6d5b6e2ade842..51fc92f18b145 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -1,5 +1,5 @@ use super::{collect_history_indices, load_history_indices}; -use reth_config::config::EtlConfig; +use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db::{ database::Database, models::{storage_sharded_key::StorageShardedKey, AddressStorageKey, BlockNumberAddress}, @@ -35,17 +35,11 @@ pub struct IndexStorageHistoryStage { impl IndexStorageHistoryStage { /// Create new instance of [IndexStorageHistoryStage]. pub fn new( - commit_threshold: u64, - prune_mode: Option, + config: IndexHistoryConfig, etl_config: EtlConfig, + prune_mode: Option, ) -> Self { - Self { commit_threshold, prune_mode, etl_config } - } - - /// Set the ETL configuration to use. - pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { - self.etl_config = etl_config; - self + Self { commit_threshold: config.commit_threshold, prune_mode, etl_config } } } diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index e078fd95421ca..0bb05e0c40e02 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -1,3 +1,4 @@ +use reth_config::config::SenderRecoveryConfig; use reth_consensus::ConsensusError; use reth_db::{ cursor::DbCursorRW, @@ -42,8 +43,8 @@ pub struct SenderRecoveryStage { impl SenderRecoveryStage { /// Create new instance of [SenderRecoveryStage]. - pub fn new(commit_threshold: u64) -> Self { - Self { commit_threshold } + pub fn new(config: SenderRecoveryConfig) -> Self { + Self { commit_threshold: config.commit_threshold } } } diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 342183905ba18..fae08e854dc02 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -1,5 +1,5 @@ use num_traits::Zero; -use reth_config::config::EtlConfig; +use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, @@ -45,14 +45,12 @@ impl Default for TransactionLookupStage { impl TransactionLookupStage { /// Create new instance of [TransactionLookupStage]. - pub fn new(chunk_size: u64, etl_config: EtlConfig, prune_mode: Option) -> Self { - Self { chunk_size, etl_config, prune_mode } - } - - /// Set the ETL configuration to use. - pub fn with_etl_config(mut self, etl_config: EtlConfig) -> Self { - self.etl_config = etl_config; - self + pub fn new( + config: TransactionLookupConfig, + etl_config: EtlConfig, + prune_mode: Option, + ) -> Self { + Self { chunk_size: config.chunk_size, etl_config, prune_mode } } } From 3efab64218fc34a19d3b56b11c9a370ed23b41d9 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Sun, 12 May 2024 15:43:15 +0300 Subject: [PATCH 538/700] refactor(ethereum, primitives, evm): use Alloy EIP-4788 constants (#8208) --- Cargo.lock | 110 +++++++++++++------------ Cargo.toml | 28 +++---- crates/ethereum/evm/Cargo.toml | 4 +- crates/ethereum/evm/src/execute.rs | 13 +-- crates/primitives/src/constants/mod.rs | 12 +-- crates/primitives/src/revm/env.rs | 4 +- crates/revm/Cargo.toml | 3 + crates/revm/src/state_change.rs | 6 +- 8 files changed, 86 insertions(+), 94 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c27f4b165704a..67f7e3a2c96f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "c-kzg", "serde", ] @@ -177,11 +177,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "arbitrary", "c-kzg", "derive_more", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "serde", "serde_json", ] @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-primitives", "k256", "serde_json", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -366,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -426,19 +426,19 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "serde", ] [[package]] name = "alloy-rpc-types-beacon" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-primitives", "alloy-rpc-types-engine", "serde", @@ -448,14 +448,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -468,11 +468,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "serde", "serde_json", ] @@ -480,7 +480,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-primitives", "serde", @@ -500,7 +500,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-primitives", "async-trait", @@ -513,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -588,7 +588,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -606,7 +606,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=899fc51#899fc51af8b5f4de6df1605ca3ffe8d8d6fa8c69" +source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -2977,7 +2977,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6578,8 +6578,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-primitives", "arbitrary", "bytes", @@ -6784,9 +6784,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -6990,6 +6990,7 @@ dependencies = [ name = "reth-evm-ethereum" version = "0.2.0-beta.7" dependencies = [ + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "reth-evm", "reth-interfaces", "reth-primitives", @@ -7540,11 +7541,11 @@ name = "reth-primitives" version = "0.2.0-beta.7" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-trie", "arbitrary", "assert_matches", @@ -7643,6 +7644,7 @@ dependencies = [ name = "reth-revm" version = "0.2.0-beta.7" dependencies = [ + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "reth-consensus-common", "reth-interfaces", "reth-primitives", @@ -7827,7 +7829,7 @@ name = "reth-rpc-types" version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", "alloy-rpc-types-engine", @@ -7852,7 +7854,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7959,7 +7961,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "reth-primitives", "secp256k1", ] @@ -8093,10 +8095,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=c1b5dd0#c1b5dd0d85dd46ef5ec5258aebd24adc041d103a" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=a5df8a0#a5df8a041d2c82a58840776be37f935a72803917" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=899fc51)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/Cargo.toml b/Cargo.toml index 0d531f5fc1d88..ead6935d8c72d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "c1b5dd0" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "a5df8a0" } # eth alloy-chains = "0.1.15" @@ -291,21 +291,21 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "899fc51" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "899fc51" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "f1d7085" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } # misc auto_impl = "1" diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 6fa61e34ff23a..e9f8bc5ad317b 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -24,4 +24,6 @@ revm-primitives.workspace = true tracing.workspace = true [dev-dependencies] -reth-revm = { workspace = true, features = ["test-utils"] } \ No newline at end of file +reth-revm = { workspace = true, features = ["test-utils"] } +alloy-eips.workspace = true + diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index c80e476bcbdab..15702ba7508d9 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -443,31 +443,26 @@ where #[cfg(test)] mod tests { use super::*; - use reth_primitives::{ - bytes, - constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, - keccak256, Account, Block, Bytes, ChainSpecBuilder, ForkCondition, B256, - }; + use alloy_eips::eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}; + use reth_primitives::{keccak256, Account, Block, ChainSpecBuilder, ForkCondition, B256}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; use std::collections::HashMap; - static BEACON_ROOT_CONTRACT_CODE: Bytes = bytes!("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"); - fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { let mut db = StateProviderTest::default(); let beacon_root_contract_account = Account { balance: U256::ZERO, - bytecode_hash: Some(keccak256(BEACON_ROOT_CONTRACT_CODE.clone())), + bytecode_hash: Some(keccak256(BEACON_ROOTS_CODE.clone())), nonce: 1, }; db.insert_account( BEACON_ROOTS_ADDRESS, beacon_root_contract_account, - Some(BEACON_ROOT_CONTRACT_CODE.clone()), + Some(BEACON_ROOTS_CODE.clone()), HashMap::new(), ); diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index 4fc0aadfe9dc2..1df3d0284b51e 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -1,9 +1,6 @@ //! Ethereum protocol-related constants -use crate::{ - revm_primitives::{address, b256}, - Address, B256, U256, -}; +use crate::{revm_primitives::b256, B256, U256}; use std::time::Duration; #[cfg(feature = "optimism")] @@ -197,13 +194,6 @@ pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3; /// pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15; -/// The address for the beacon roots contract defined in EIP-4788. -pub const BEACON_ROOTS_ADDRESS: Address = address!("000F3df6D732807Ef1319fB7B8bB8522d0Beac02"); - -/// The caller to be used when calling the EIP-4788 beacon roots contract at the beginning of the -/// block. -pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); - #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index b13a7018f7ce8..f2c14bbbd743a 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -1,11 +1,11 @@ use crate::{ - constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, recover_signer_unchecked, revm_primitives::{BlockEnv, Env, TransactTo, TxEnv}, Address, Bytes, Chain, ChainSpec, Header, Transaction, TransactionSignedEcRecovered, TxKind, B256, U256, }; +use alloy_eips::eip4788::BEACON_ROOTS_ADDRESS; #[cfg(feature = "optimism")] use revm_primitives::OptimismFields; @@ -143,7 +143,7 @@ pub fn tx_env_with_recovered(transaction: &TransactionSignedEcRecovered) -> TxEn /// * if no code exists at `BEACON_ROOTS_ADDRESS`, the call must fail silently pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_block_root: B256) { env.tx = TxEnv { - caller: SYSTEM_ADDRESS, + caller: alloy_eips::eip4788::SYSTEM_ADDRESS, transact_to: TransactTo::Call(BEACON_ROOTS_ADDRESS), // Explicitly set nonce to None so revm does not do any nonce checks nonce: None, diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 2b621ed76ec06..87d30ca6f2959 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -22,6 +22,9 @@ reth-trie = { workspace = true, optional = true } # revm revm.workspace = true +# alloy +alloy-eips.workspace = true + # common tracing.workspace = true diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index d2b0a6b5b3805..1279e1b9626f0 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -1,8 +1,8 @@ use reth_consensus_common::calc; use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ - constants::SYSTEM_ADDRESS, revm::env::fill_tx_env_with_beacon_root_contract_call, Address, - ChainSpec, Header, Withdrawal, B256, U256, + revm::env::fill_tx_env_with_beacon_root_contract_call, Address, ChainSpec, Header, Withdrawal, + B256, U256, }; use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; use std::collections::HashMap; @@ -104,7 +104,7 @@ where } }; - state.remove(&SYSTEM_ADDRESS); + state.remove(&alloy_eips::eip4788::SYSTEM_ADDRESS); state.remove(&evm.block().coinbase); evm.context.evm.db.commit(state); From c2a05f07d3ef82ee4cd38a064a2f40d8688b66b2 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Sun, 12 May 2024 16:12:24 +0300 Subject: [PATCH 539/700] chore: bump revm to latest with placeholders for EOF (#7765) --- Cargo.lock | 79 +++++++++++++++++----------- Cargo.toml | 6 +-- crates/primitives/src/account.rs | 68 +++++++++++++----------- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/revm/compat.rs | 14 ++--- crates/primitives/src/revm/env.rs | 3 ++ crates/revm/src/state_change.rs | 2 +- crates/rpc/rpc/src/eth/api/call.rs | 2 +- crates/rpc/rpc/src/eth/error.rs | 61 +++++++++++++-------- crates/rpc/rpc/src/eth/revm_utils.rs | 3 ++ examples/exex/rollup/src/db.rs | 2 +- 11 files changed, 143 insertions(+), 99 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 67f7e3a2c96f7..fc5e8abea4800 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2757,7 +2757,7 @@ dependencies = [ "k256", "log", "rand 0.8.5", - "secp256k1", + "secp256k1 0.28.2", "serde", "sha3", "zeroize", @@ -2997,7 +2997,7 @@ dependencies = [ "reth-tracing", "reth-trie", "rusqlite", - "secp256k1", + "secp256k1 0.28.2", "serde_json", "tokio", ] @@ -4883,7 +4883,7 @@ dependencies = [ "reth-network", "reth-network-types", "reth-primitives", - "secp256k1", + "secp256k1 0.28.2", "tokio", ] @@ -5783,7 +5783,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-tracing", - "secp256k1", + "secp256k1 0.28.2", "serde_json", "tokio", "tokio-stream", @@ -6613,7 +6613,7 @@ dependencies = [ "reth-net-nat", "reth-network", "reth-primitives", - "secp256k1", + "secp256k1 0.28.2", "serde", "tempfile", "toml", @@ -6692,7 +6692,7 @@ dependencies = [ "reth-network-types", "reth-primitives", "reth-tracing", - "secp256k1", + "secp256k1 0.28.2", "serde", "thiserror", "tokio", @@ -6718,7 +6718,7 @@ dependencies = [ "reth-network-types", "reth-primitives", "reth-tracing", - "secp256k1", + "secp256k1 0.28.2", "thiserror", "tokio", "tracing", @@ -6739,7 +6739,7 @@ dependencies = [ "reth-primitives", "reth-tracing", "schnellru", - "secp256k1", + "secp256k1 0.28.2", "serde", "serde_with", "thiserror", @@ -6804,7 +6804,7 @@ dependencies = [ "reth-rpc", "reth-rpc-layer", "reth-tracing", - "secp256k1", + "secp256k1 0.28.2", "serde_json", "tokio", "tokio-stream", @@ -6832,7 +6832,7 @@ dependencies = [ "reth-net-common", "reth-network-types", "reth-primitives", - "secp256k1", + "secp256k1 0.28.2", "sha2 0.10.8", "sha3", "thiserror", @@ -6877,7 +6877,7 @@ dependencies = [ "reth-network-types", "reth-primitives", "reth-tracing", - "secp256k1", + "secp256k1 0.28.2", "serde", "snap", "test-fuzz", @@ -6904,7 +6904,7 @@ dependencies = [ "reth-net-common", "reth-primitives", "reth-tracing", - "secp256k1", + "secp256k1 0.28.2", "serde", "test-fuzz", "thiserror", @@ -7048,7 +7048,7 @@ dependencies = [ "reth-network-api", "reth-network-types", "reth-primitives", - "secp256k1", + "secp256k1 0.28.2", "thiserror", "tokio", "tracing", @@ -7199,7 +7199,7 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "schnellru", - "secp256k1", + "secp256k1 0.28.2", "serde", "serde_json", "serial_test", @@ -7235,7 +7235,7 @@ dependencies = [ "alloy-rlp", "enr", "rand 0.8.5", - "secp256k1", + "secp256k1 0.28.2", "serde_json", "serde_with", "thiserror", @@ -7370,7 +7370,7 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "reth-trie", - "secp256k1", + "secp256k1 0.28.2", "serde", "serde_json", "shellexpand", @@ -7572,7 +7572,7 @@ dependencies = [ "revm", "revm-primitives", "roaring", - "secp256k1", + "secp256k1 0.28.2", "serde", "serde_json", "strum", @@ -7697,7 +7697,7 @@ dependencies = [ "revm-inspectors", "revm-primitives", "schnellru", - "secp256k1", + "secp256k1 0.28.2", "serde", "serde_json", "tempfile", @@ -7963,7 +7963,7 @@ version = "0.2.0-beta.7" dependencies = [ "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", "reth-primitives", - "secp256k1", + "secp256k1 0.28.2", ] [[package]] @@ -8079,9 +8079,9 @@ dependencies = [ [[package]] name = "revm" -version = "8.0.0" +version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a454c1c650b2b2e23f0c461af09e6c31e1d15e1cbebe905a701c46b8a50afc" +checksum = "3a2c336f9921588e50871c00024feb51a521eca50ce6d01494bb9c50f837c8ed" dependencies = [ "auto_impl", "cfg-if", @@ -8095,7 +8095,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=a5df8a0#a5df8a041d2c82a58840776be37f935a72803917" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=ff0eca1#ff0eca19e0eee0b3d188d9f179eaf4fd5ace4bea" dependencies = [ "alloy-primitives", "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", @@ -8112,9 +8112,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "4.0.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d322f2730cd300e99d271a1704a2dfb8973d832428f5aa282aaa40e2473b5eec" +checksum = "a58182c7454179826f9dad2ca577661963092ce9d0fd0c9d682c1e9215a72e70" dependencies = [ "revm-primitives", "serde", @@ -8122,9 +8122,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "6.0.0" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "931f692f3f4fc72ec39d5d270f8e9d208c4a6008de7590ee96cf948e3b6d3f8d" +checksum = "dc8af9aa737eef0509a50d9f3cc1a631557a00ef2e70a3aa8a75d9ee0ed275bb" dependencies = [ "aurora-engine-modexp", "c-kzg", @@ -8132,16 +8132,16 @@ dependencies = [ "once_cell", "revm-primitives", "ripemd", - "secp256k1", + "secp256k1 0.29.0", "sha2 0.10.8", "substrate-bn", ] [[package]] name = "revm-primitives" -version = "3.1.1" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbbc9640790cebcb731289afb7a7d96d16ad94afeb64b5d0b66443bd151e79d6" +checksum = "b9bf5d465e64b697da6a111cb19e798b5b2ebb18e5faf2ad48e9e8d47c64add2" dependencies = [ "alloy-primitives", "auto_impl", @@ -8587,10 +8587,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ "rand 0.8.5", - "secp256k1-sys", + "secp256k1-sys 0.9.2", "serde", ] +[[package]] +name = "secp256k1" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" +dependencies = [ + "rand 0.8.5", + "secp256k1-sys 0.10.0", +] + [[package]] name = "secp256k1-sys" version = "0.9.2" @@ -8600,6 +8610,15 @@ dependencies = [ "cc", ] +[[package]] +name = "secp256k1-sys" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b" +dependencies = [ + "cc", +] + [[package]] name = "security-framework" version = "2.11.0" diff --git a/Cargo.toml b/Cargo.toml index ead6935d8c72d..1429909df29c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,9 +280,9 @@ reth-node-events = { path = "crates/node/events" } reth-testing-utils = { path = "testing/testing-utils" } # revm -revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } -revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "a5df8a0" } +revm = { version = "9.0.0", features = ["std", "secp256k1"], default-features = false } +revm-primitives = { version = "4.0.0", features = ["std"], default-features = false } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "ff0eca1" } # eth alloy-chains = "0.1.15" diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs index d0bc3788e485d..bbaf420126634 100644 --- a/crates/primitives/src/account.rs +++ b/crates/primitives/src/account.rs @@ -1,11 +1,12 @@ use crate::{ keccak256, - revm_primitives::{Bytecode as RevmBytecode, BytecodeState, Bytes, JumpMap}, + revm_primitives::{Bytecode as RevmBytecode, Bytes}, GenesisAccount, B256, KECCAK_EMPTY, U256, }; use byteorder::{BigEndian, ReadBytesExt}; use bytes::Buf; use reth_codecs::{main_codec, Compact}; +use revm_primitives::JumpTable; use serde::{Deserialize, Serialize}; use std::ops::Deref; @@ -80,27 +81,29 @@ impl Compact for Bytecode { where B: bytes::BufMut + AsMut<[u8]>, { - buf.put_u32(self.0.bytecode.len() as u32); - buf.put_slice(self.0.bytecode.as_ref()); - let len = match self.0.state() { - BytecodeState::Raw => { + let bytecode = &self.0.bytecode()[..]; + buf.put_u32(bytecode.len() as u32); + buf.put_slice(bytecode); + let len = match &self.0 { + RevmBytecode::LegacyRaw(_) => { buf.put_u8(0); 1 } - BytecodeState::Checked { len } => { - buf.put_u8(1); - buf.put_u64(*len as u64); - 9 - } - BytecodeState::Analysed { len, jump_map } => { + // `1` has been removed. + RevmBytecode::LegacyAnalyzed(analyzed) => { buf.put_u8(2); - buf.put_u64(*len as u64); - let map = jump_map.as_slice(); + buf.put_u64(analyzed.original_len() as u64); + let map = analyzed.jump_table().as_slice(); buf.put_slice(map); - 9 + map.len() + 1 + 8 + map.len() + } + RevmBytecode::Eof(_) => { + // buf.put_u8(3); + // TODO(EOF) + todo!("EOF") } }; - len + self.0.bytecode.len() + 4 + len + bytecode.len() + 4 } fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { @@ -109,17 +112,17 @@ impl Compact for Bytecode { let variant = buf.read_u8().expect("could not read bytecode variant"); let decoded = match variant { 0 => Bytecode(RevmBytecode::new_raw(bytes)), - 1 => Bytecode(unsafe { - RevmBytecode::new_checked(bytes, buf.read_u64::().unwrap() as usize) - }), - 2 => Bytecode(RevmBytecode { - bytecode: bytes, - state: BytecodeState::Analysed { - len: buf.read_u64::().unwrap() as usize, - jump_map: JumpMap::from_slice(buf), - }, + 1 => unreachable!("Junk data in database: checked Bytecode variant was removed"), + 2 => Bytecode(unsafe { + RevmBytecode::new_analyzed( + bytes, + buf.read_u64::().unwrap() as usize, + JumpTable::from_slice(buf), + ) }), - _ => unreachable!("Junk data in database: unknown BytecodeState variant"), + // TODO(EOF) + 3 => todo!("EOF"), + _ => unreachable!("Junk data in database: unknown Bytecode variant"), }; (decoded, &[]) } @@ -129,6 +132,7 @@ impl Compact for Bytecode { mod tests { use super::*; use crate::hex_literal::hex; + use revm_primitives::LegacyAnalyzedBytecode; #[test] fn test_account() { @@ -174,17 +178,21 @@ mod tests { #[test] fn test_bytecode() { let mut buf = vec![]; - let mut bytecode = Bytecode(RevmBytecode::new_raw(Bytes::default())); - let len = bytecode.clone().to_compact(&mut buf); + let bytecode = Bytecode::new_raw(Bytes::default()); + let len = bytecode.to_compact(&mut buf); assert_eq!(len, 5); let mut buf = vec![]; - bytecode.0.bytecode = Bytes::from(hex!("ffff").as_ref()); - let len = bytecode.clone().to_compact(&mut buf); + let bytecode = Bytecode::new_raw(Bytes::from(&hex!("ffff"))); + let len = bytecode.to_compact(&mut buf); assert_eq!(len, 7); let mut buf = vec![]; - bytecode.0.state = BytecodeState::Analysed { len: 2, jump_map: JumpMap::from_slice(&[0]) }; + let bytecode = Bytecode(RevmBytecode::LegacyAnalyzed(LegacyAnalyzedBytecode::new( + Bytes::from(&hex!("ffff")), + 2, + JumpTable::from_slice(&[0]), + ))); let len = bytecode.clone().to_compact(&mut buf); assert_eq!(len, 16); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 71d264712b7d3..d7317951e187a 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -116,7 +116,7 @@ pub use alloy_primitives::{ StorageValue, TxHash, TxIndex, TxKind, TxNumber, B128, B256, B512, B64, U128, U256, U64, U8, }; pub use reth_ethereum_forks::*; -pub use revm_primitives::{self, JumpMap}; +pub use revm_primitives::{self, JumpTable}; #[doc(hidden)] #[deprecated = "use B64 instead"] diff --git a/crates/primitives/src/revm/compat.rs b/crates/primitives/src/revm/compat.rs index 9727708823f7b..a153e86e9e734 100644 --- a/crates/primitives/src/revm/compat.rs +++ b/crates/primitives/src/revm/compat.rs @@ -1,8 +1,5 @@ use crate::{revm_primitives::AccountInfo, Account, Address, TxKind, KECCAK_EMPTY, U256}; -use revm::{ - interpreter::gas::validate_initial_tx_gas, - primitives::{MergeSpec, ShanghaiSpec}, -}; +use revm::{interpreter::gas::validate_initial_tx_gas, primitives::SpecId}; /// Converts a Revm [`AccountInfo`] into a Reth [`Account`]. /// @@ -38,9 +35,8 @@ pub fn calculate_intrinsic_gas_after_merge( access_list: &[(Address, Vec)], is_shanghai: bool, ) -> u64 { - if is_shanghai { - validate_initial_tx_gas::(input, kind.is_create(), access_list) - } else { - validate_initial_tx_gas::(input, kind.is_create(), access_list) - } + let spec_id = if is_shanghai { SpecId::SHANGHAI } else { SpecId::MERGE }; + // TODO(EOF) + let initcodes = &[]; + validate_initial_tx_gas(spec_id, input, kind.is_create(), access_list, initcodes) } diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index f2c14bbbd743a..f519abc058231 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -171,6 +171,9 @@ pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_b // enveloped tx size. enveloped_tx: Some(Bytes::default()), }, + // TODO(EOF) + eof_initcodes: vec![], + eof_initcodes_hashed: Default::default(), }; // ensure the block gas limit is >= the tx diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index 1279e1b9626f0..2799734254730 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -87,7 +87,7 @@ where } // get previous env - let previous_env = Box::new(evm.env().clone()); + let previous_env = Box::new(evm.context.env().clone()); // modify env for pre block call fill_tx_env_with_beacon_root_contract_call(&mut evm.context.evm.env, parent_beacon_block_root); diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index acd5c30e87348..d638251322c8f 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -467,7 +467,7 @@ where ExecutionResult::Success { .. } => { // transaction succeeded by manually increasing the gas limit to // highest, which means the caller lacks funds to pay for the tx - RpcInvalidTransactionError::BasicOutOfGas(U256::from(req_gas_limit)).into() + RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into() } ExecutionResult::Revert { output, .. } => { // reverted again after bumping the limit diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index df2aef8006ee0..77bffee400b78 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -4,7 +4,7 @@ use crate::result::{internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error use alloy_sol_types::decode_revert_reason; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}; use reth_interfaces::RethError; -use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes, U256}; +use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes}; use reth_rpc_types::{ error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, }; @@ -267,14 +267,14 @@ pub enum RpcInvalidTransactionError { /// Thrown when calculating gas usage #[error("gas uint64 overflow")] GasUintOverflow, - /// returned if the transaction is specified to use less gas than required to start the + /// Thrown if the transaction is specified to use less gas than required to start the /// invocation. #[error("intrinsic gas too low")] GasTooLow, - /// returned if the transaction gas exceeds the limit + /// Thrown if the transaction gas exceeds the limit #[error("intrinsic gas too high")] GasTooHigh, - /// thrown if a transaction is not supported in the current network configuration. + /// Thrown if a transaction is not supported in the current network configuration. #[error("transaction type not supported")] TxTypeNotSupported, /// Thrown to ensure no one is able to specify a transaction with a tip higher than the total @@ -291,25 +291,29 @@ pub enum RpcInvalidTransactionError { #[error("max fee per gas less than block base fee")] FeeCapTooLow, /// Thrown if the sender of a transaction is a contract. - #[error("sender not an eoa")] + #[error("sender is not an EOA")] SenderNoEOA, - /// Thrown during estimate if caller has insufficient funds to cover the tx. - #[error("out of gas: gas required exceeds allowance: {0:?}")] - BasicOutOfGas(U256), - /// As BasicOutOfGas but thrown when gas exhausts during memory expansion. - #[error("out of gas: gas exhausts during memory expansion: {0:?}")] - MemoryOutOfGas(U256), - /// As BasicOutOfGas but thrown when gas exhausts during precompiled contract execution. - #[error("out of gas: gas exhausts during precompiled contract execution: {0:?}")] - PrecompileOutOfGas(U256), - /// revm's Type cast error, U256 casts down to a u64 with overflow - #[error("out of gas: revm's Type cast error, U256 casts down to a u64 with overflow {0:?}")] - InvalidOperandOutOfGas(U256), + /// Gas limit was exceeded during execution. + /// Contains the gas limit. + #[error("out of gas: gas required exceeds allowance: {0}")] + BasicOutOfGas(u64), + /// Gas limit was exceeded during memory expansion. + /// Contains the gas limit. + #[error("out of gas: gas exhausted during memory expansion: {0}")] + MemoryOutOfGas(u64), + /// Gas limit was exceeded during precompile execution. + /// Contains the gas limit. + #[error("out of gas: gas exhausted during precompiled contract execution: {0}")] + PrecompileOutOfGas(u64), + /// An operand to an opcode was invalid or out of range. + /// Contains the gas limit. + #[error("out of gas: invalid operand to an opcode; {0}")] + InvalidOperandOutOfGas(u64), /// Thrown if executing a transaction failed during estimate/call - #[error("{0}")] + #[error(transparent)] Revert(RevertError), /// Unspecific EVM halt error. - #[error("EVM error {0:?}")] + #[error("EVM error: {0:?}")] EvmHalt(HaltReason), /// Invalid chain id set for the transaction. #[error("invalid chain ID")] @@ -337,8 +341,13 @@ pub enum RpcInvalidTransactionError { #[error("blob transaction missing blob hashes")] BlobTransactionMissingBlobHashes, /// Blob transaction has too many blobs - #[error("blob transaction exceeds max blobs per block")] - TooManyBlobs, + #[error("blob transaction exceeds max blobs per block; got {have}, max {max}")] + TooManyBlobs { + /// The maximum number of blobs allowed. + max: usize, + /// The number of blobs in the transaction. + have: usize, + }, /// Blob transaction is a create transaction #[error("blob transaction is a create transaction")] BlobTransactionIsCreate, @@ -385,7 +394,6 @@ impl RpcInvalidTransactionError { /// Converts the out of gas error pub(crate) fn out_of_gas(reason: OutOfGasError, gas_limit: u64) -> Self { - let gas_limit = U256::from(gas_limit); match reason { OutOfGasError::Basic => RpcInvalidTransactionError::BasicOutOfGas(gas_limit), OutOfGasError::Memory => RpcInvalidTransactionError::MemoryOutOfGas(gas_limit), @@ -462,7 +470,9 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::BlobVersionNotSupported => { RpcInvalidTransactionError::BlobHashVersionMismatch } - InvalidTransaction::TooManyBlobs => RpcInvalidTransactionError::TooManyBlobs, + InvalidTransaction::TooManyBlobs { max, have } => { + RpcInvalidTransactionError::TooManyBlobs { max, have } + } InvalidTransaction::BlobCreateTransaction => { RpcInvalidTransactionError::BlobTransactionIsCreate } @@ -476,6 +486,11 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::HaltedDepositPostRegolith => RpcInvalidTransactionError::Optimism( OptimismInvalidTransactionError::HaltedDepositPostRegolith, ), + // TODO(EOF) + InvalidTransaction::EofInitcodesNotSupported => todo!("EOF"), + InvalidTransaction::EofInitcodesNumberLimit => todo!("EOF"), + InvalidTransaction::EofInitcodesSizeLimit => todo!("EOF"), + InvalidTransaction::EofCrateShouldHaveToAddress => todo!("EOF"), } } } diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index c2855163bad40..e0cb8e14d9cf4 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -272,6 +272,9 @@ pub(crate) fn create_txn_env( max_fee_per_blob_gas, #[cfg(feature = "optimism")] optimism: OptimismFields { enveloped_tx: Some(Bytes::new()), ..Default::default() }, + // TODO(EOF) + eof_initcodes: Default::default(), + eof_initcodes_hashed: Default::default(), }; Ok(env) diff --git a/examples/exex/rollup/src/db.rs b/examples/exex/rollup/src/db.rs index 201272a0000b6..2c42beafb93c1 100644 --- a/examples/exex/rollup/src/db.rs +++ b/examples/exex/rollup/src/db.rs @@ -158,7 +158,7 @@ impl Database { for (hash, bytecode) in changeset.contracts { tx.execute( "INSERT INTO bytecode (hash, data) VALUES (?, ?) ON CONFLICT(hash) DO NOTHING", - (hash.to_string(), bytecode.bytes().to_string()), + (hash.to_string(), bytecode.bytecode().to_string()), )?; } From aed78a6e4dcf0a9baa43924da35e867dedf3191a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 13 May 2024 10:01:15 +0200 Subject: [PATCH 540/700] chore: bump alloy dd7a999 (#8215) --- Cargo.lock | 112 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 28 +++++++------- 2 files changed, 70 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc5e8abea4800..1d461214233da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "c-kzg", "serde", ] @@ -177,11 +177,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "arbitrary", "c-kzg", "derive_more", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "serde", "serde_json", ] @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-primitives", "k256", "serde_json", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -366,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -426,19 +426,19 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "serde", ] [[package]] name = "alloy-rpc-types-beacon" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-primitives", "alloy-rpc-types-engine", "serde", @@ -448,14 +448,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -468,11 +468,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "serde", "serde_json", ] @@ -480,7 +480,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", "serde", @@ -500,7 +500,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", "async-trait", @@ -513,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -588,7 +588,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -606,7 +606,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=f1d7085#f1d708522e7fe676a3ee86dd198f87182f220927" +source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -2977,7 +2977,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6578,8 +6578,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-primitives", "arbitrary", "bytes", @@ -6784,9 +6784,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -6990,7 +6990,7 @@ dependencies = [ name = "reth-evm-ethereum" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "reth-evm", "reth-interfaces", "reth-primitives", @@ -7541,11 +7541,11 @@ name = "reth-primitives" version = "0.2.0-beta.7" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-trie", "arbitrary", "assert_matches", @@ -7644,7 +7644,7 @@ dependencies = [ name = "reth-revm" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "reth-consensus-common", "reth-interfaces", "reth-primitives", @@ -7829,7 +7829,7 @@ name = "reth-rpc-types" version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", "alloy-rpc-types-engine", @@ -7854,7 +7854,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7961,7 +7961,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "reth-primitives", "secp256k1 0.28.2", ] @@ -8095,10 +8095,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=ff0eca1#ff0eca19e0eee0b3d188d9f179eaf4fd5ace4bea" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=21a2db5#21a2db5a3a828a35e82b116e5d046a9efaca1449" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=f1d7085)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/Cargo.toml b/Cargo.toml index 1429909df29c7..3492f7d313834 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "9.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "4.0.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "ff0eca1" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "21a2db5" } # eth alloy-chains = "0.1.15" @@ -291,21 +291,21 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "f1d7085" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "f1d7085" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "dd7a999" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } # misc auto_impl = "1" From 4fa627736681289ba899b38f1c7a97d9fcf33dc6 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 13 May 2024 11:16:37 +0300 Subject: [PATCH 541/700] feat: add prague timestamp conversion from genesis to chainspec (#8216) --- crates/primitives/src/chain/spec.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index de56ff1fe8b67..98ec3d972dcc0 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -1024,6 +1024,7 @@ impl From for ChainSpec { let time_hardfork_opts = [ (Hardfork::Shanghai, genesis.config.shanghai_time), (Hardfork::Cancun, genesis.config.cancun_time), + (Hardfork::Prague, genesis.config.prague_time), #[cfg(feature = "optimism")] (Hardfork::Regolith, optimism_genesis_info.regolith_time), #[cfg(feature = "optimism")] @@ -2971,6 +2972,29 @@ Post-merge hard forks (timestamp based): assert_eq!(genesis.config.cancun_time, Some(4661)); } + #[test] + fn test_parse_prague_genesis_all_formats() { + let s = r#"{"config":{"ethash":{},"chainId":1337,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"shanghaiTime":0,"cancunTime":4661, "pragueTime": 4662},"nonce":"0x0","timestamp":"0x0","extraData":"0x","gasLimit":"0x4c4b40","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"658bdf435d810c91414ec09147daa6db62406379":{"balance":"0x487a9a304539440000"},"aa00000000000000000000000000000000000000":{"code":"0x6042","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x1","nonce":"0x1"},"bb00000000000000000000000000000000000000":{"code":"0x600154600354","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x2","nonce":"0x1"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":"0x3b9aca00"}"#; + let genesis: AllGenesisFormats = serde_json::from_str(s).unwrap(); + + // this should be the genesis format + let genesis = match genesis { + AllGenesisFormats::Geth(genesis) => genesis, + _ => panic!("expected geth genesis format"), + }; + + // assert that the alloc was picked up + let acc = genesis + .alloc + .get(&"0xaa00000000000000000000000000000000000000".parse::
().unwrap()) + .unwrap(); + assert_eq!(acc.balance, U256::from(1)); + // assert that the cancun time was picked up + assert_eq!(genesis.config.cancun_time, Some(4661)); + // assert that the prague time was picked up + assert_eq!(genesis.config.prague_time, Some(4662)); + } + #[test] fn test_parse_cancun_genesis_all_formats() { let s = r#"{"config":{"ethash":{},"chainId":1337,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"shanghaiTime":0,"cancunTime":4661},"nonce":"0x0","timestamp":"0x0","extraData":"0x","gasLimit":"0x4c4b40","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"658bdf435d810c91414ec09147daa6db62406379":{"balance":"0x487a9a304539440000"},"aa00000000000000000000000000000000000000":{"code":"0x6042","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x1","nonce":"0x1"},"bb00000000000000000000000000000000000000":{"code":"0x600154600354","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x2","nonce":"0x1"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":"0x3b9aca00"}"#; From 12f1e9c944f75b70a360ed48f17b7a724028038d Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 13 May 2024 17:07:37 +0300 Subject: [PATCH 542/700] chore: remove validate_block_regarding_chain (#8224) --- crates/consensus/common/Cargo.toml | 2 -- crates/consensus/common/src/validation.rs | 39 +---------------------- 2 files changed, 1 insertion(+), 40 deletions(-) diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index af93788ee67d5..accf2d08e0e88 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -13,8 +13,6 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-interfaces.workspace = true -reth-provider.workspace = true reth-consensus.workspace=true [dev-dependencies] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index b67d40e98533e..bf94937209cd2 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,7 +1,6 @@ //! Collection of methods for block validation. use reth_consensus::ConsensusError; -use reth_interfaces::RethResult; use reth_primitives::{ constants::{ eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, @@ -9,7 +8,6 @@ use reth_primitives::{ }, ChainSpec, GotExpected, Hardfork, Header, SealedBlock, SealedHeader, }; -use reth_provider::{HeaderProvider, WithdrawalsProvider}; /// Validate header standalone pub fn validate_header_standalone( @@ -110,33 +108,6 @@ pub fn validate_block_standalone( Ok(()) } -/// Validate block with regard to chain (parent) -/// -/// Checks: -/// If we already know the block. -/// If parent is known -/// -/// Returns parent block header -pub fn validate_block_regarding_chain( - block: &SealedBlock, - provider: &PROV, -) -> RethResult { - let hash = block.header.hash(); - - // Check if block is known. - if provider.is_known(&hash)? { - return Err(ConsensusError::BlockKnown { hash, number: block.header.number }.into()) - } - - // Check if parent is known. - let parent = provider - .header(&block.parent_hash)? - .ok_or(ConsensusError::ParentUnknown { hash: block.parent_hash })?; - - // Return parent header. - Ok(parent.seal(block.parent_hash)) -} - /// Validates that the EIP-4844 header fields exist and conform to the spec. This ensures that: /// /// * `blob_gas_used` exists as a header field @@ -204,7 +175,7 @@ mod tests { BlockNumber, Bytes, ChainSpecBuilder, Signature, Transaction, TransactionSigned, TxEip4844, Withdrawal, Withdrawals, U256, }; - use reth_provider::AccountReader; + use reth_provider::{AccountReader, HeaderProvider, WithdrawalsProvider}; use std::ops::RangeBounds; mock! { @@ -398,22 +369,14 @@ mod tests { assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(())); let block = create_block_with_withdrawals(&[5, 6, 7, 8, 9]); assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(())); - let (_, parent) = mock_block(); - let provider = Provider::new(Some(parent.clone())); - let block = create_block_with_withdrawals(&[0, 1, 2]); - let res = validate_block_regarding_chain(&block, &provider); - assert!(res.is_ok()); // Withdrawal index should be the last withdrawal index + 1 let mut provider = Provider::new(Some(parent)); - let block = create_block_with_withdrawals(&[3, 4, 5]); provider .withdrawals_provider .expect_latest_withdrawal() .return_const(Ok(Some(Withdrawal { index: 2, ..Default::default() }))); - let res = validate_block_regarding_chain(&block, &provider); - assert!(res.is_ok()); } #[test] From 081796b138fd00a6d0af2ad463123fdbe04765ab Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 13 May 2024 17:39:06 +0300 Subject: [PATCH 543/700] feat: impl `Compact` for `FixedBytes` (#8222) --- .../codecs/derive/src/compact/generator.rs | 3 ++- crates/storage/codecs/src/lib.rs | 24 +++++++++++++++---- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 03dab1a144c4c..c28bf8d1a4fbc 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -52,7 +52,8 @@ pub fn generate_from_to(ident: &Ident, fields: &FieldList, is_zstd: bool) -> Tok /// Generates code to implement the `Compact` trait method `to_compact`. fn generate_from_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> TokenStream2 { let mut lines = vec![]; - let mut known_types = vec!["B256", "Address", "Bloom", "Vec", "TxHash", "BlockHash"]; + let mut known_types = + vec!["B256", "Address", "Bloom", "Vec", "TxHash", "BlockHash", "FixedBytes"]; // Only types without `Bytes` should be added here. It's currently manually added, since // it's hard to figure out with derive_macro which types have Bytes fields. diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 9c5d757b9fdeb..907fee440d8e5 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -17,7 +17,7 @@ pub use reth_codecs_derive::*; -use alloy_primitives::{Address, Bloom, Bytes, B256, B512, U256}; +use alloy_primitives::{Address, Bloom, Bytes, FixedBytes, U256}; use bytes::Buf; #[cfg(any(test, feature = "alloy"))] @@ -301,9 +301,9 @@ impl Compact for [u8; N] { } } -/// Implements the [`Compact`] trait for fixed size byte array types like [`B256`]. +/// Implements the [`Compact`] trait for wrappers over fixed size byte array types. #[macro_export] -macro_rules! impl_compact_for_bytes { +macro_rules! impl_compact_for_wrapped_bytes { ($($name:tt),+) => { $( impl Compact for $name { @@ -324,8 +324,23 @@ macro_rules! impl_compact_for_bytes { )+ }; } +impl_compact_for_wrapped_bytes!(Address, Bloom); -impl_compact_for_bytes!(Address, B256, B512, Bloom); +impl Compact for FixedBytes { + #[inline] + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.0.to_compact(buf) + } + + #[inline] + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (v, buf) = <[u8; N]>::from_compact(buf, len); + (Self::from(v), buf) + } +} impl Compact for bool { /// `bool` vars go directly to the `StructFlags` and are not written to the buffer. @@ -378,6 +393,7 @@ const fn decode_varuint_panic() -> ! { #[cfg(test)] mod tests { use super::*; + use alloy_primitives::B256; #[test] fn compact_bytes() { From 19e5fcb003aaa1a1caa7f502d791c91f2f83bd54 Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 14 May 2024 20:41:32 +0800 Subject: [PATCH 544/700] docs(network): update command instruction for the --trusted-only (#8246) Signed-off-by: jsvisa Co-authored-by: Matthias Seitz --- crates/net/network/src/peers/manager.rs | 2 +- crates/node-core/src/args/network.rs | 15 ++++++--------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index d6ae9c4da812b..e13b080afae71 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -1276,7 +1276,7 @@ pub struct PeersConfig { /// How often to recheck free slots for outbound connections. #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] pub refill_slots_interval: Duration, - /// Trusted nodes to connect to. + /// Trusted nodes to connect to or accept from pub trusted_nodes: HashSet, /// Connect to or accept from trusted nodes only? #[cfg_attr(feature = "serde", serde(alias = "connect_trusted_nodes_only"))] diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 9ff93c5a9d233..8202739bc90d7 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -21,6 +21,7 @@ use reth_primitives::{mainnet_nodes, ChainSpec, NodeRecord}; use secp256k1::SecretKey; use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr}, + ops::Not, path::PathBuf, sync::Arc, }; @@ -39,7 +40,7 @@ pub struct NetworkArgs { #[arg(long, value_delimiter = ',')] pub trusted_peers: Vec, - /// Connect only to trusted peers + /// Connect to or accept from trusted peers only #[arg(long)] pub trusted_only: bool, @@ -156,13 +157,9 @@ impl NetworkArgs { self.discovery.apply_to_builder(network_config_builder) } - /// If `no_persist_peers` is true then this returns the path to the persistent peers file path. + /// If `no_persist_peers` is false then this returns the path to the persistent peers file path. pub fn persistent_peers_file(&self, peers_file: PathBuf) -> Option { - if self.no_persist_peers { - return None - } - - Some(peers_file) + self.no_persist_peers.not().then_some(peers_file) } /// Sets the p2p port to zero, to allow the OS to assign a random unused port when @@ -258,12 +255,12 @@ pub struct DiscoveryArgs { /// The interval in seconds at which to carry out boost lookup queries, for a fixed number of /// times, at bootstrap. - #[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval", + #[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval", default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)] pub discv5_bootstrap_lookup_interval: u64, /// The number of times to carry out boost lookup queries at bootstrap. - #[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown", + #[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown", default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)] pub discv5_bootstrap_lookup_countdown: u64, } From d1f38f16613417ef1d964b9dd383fdcef25950c5 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 14 May 2024 17:27:33 +0200 Subject: [PATCH 545/700] feat: proof verification (#8220) --- Cargo.lock | 5 +-- Cargo.toml | 2 +- crates/primitives/src/trie/mod.rs | 2 +- crates/primitives/src/trie/proofs.rs | 44 +++++++++++++++++-- .../storage/provider/src/test_utils/mock.rs | 4 +- .../storage/provider/src/test_utils/noop.rs | 4 +- crates/trie/src/proof.rs | 34 ++++++++------ 7 files changed, 70 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1d461214233da..1d4f74de6a762 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -619,9 +619,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beb28aa4ecd32fdfa1b1bdd111ff7357dd562c6b2372694cf9e613434fcba659" +checksum = "d55bd16fdb7ff4bd74cc4c878eeac7e8a27c0d7ba9df4ab58d9310aaafb62d43" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -633,7 +633,6 @@ dependencies = [ "proptest", "proptest-derive", "serde", - "smallvec", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index 3492f7d313834..102fffa88a589 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -290,7 +290,7 @@ alloy-primitives = "0.7.2" alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" -alloy-trie = "0.3.1" +alloy-trie = "0.4.0" alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } diff --git a/crates/primitives/src/trie/mod.rs b/crates/primitives/src/trie/mod.rs index 848aaed646f47..ed61aca39b583 100644 --- a/crates/primitives/src/trie/mod.rs +++ b/crates/primitives/src/trie/mod.rs @@ -24,4 +24,4 @@ pub use storage::StorageTrieEntry; mod subnode; pub use subnode::StoredSubNode; -pub use alloy_trie::{BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH}; +pub use alloy_trie::{proof, BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH}; diff --git a/crates/primitives/src/trie/proofs.rs b/crates/primitives/src/trie/proofs.rs index 094f4d29df315..1949867be5ba6 100644 --- a/crates/primitives/src/trie/proofs.rs +++ b/crates/primitives/src/trie/proofs.rs @@ -1,10 +1,15 @@ //! Merkle trie proofs. -use super::Nibbles; +use super::{ + proof::{verify_proof, ProofVerificationError}, + Nibbles, TrieAccount, +}; use crate::{keccak256, Account, Address, Bytes, B256, U256}; +use alloy_rlp::encode_fixed_size; +use alloy_trie::EMPTY_ROOT_HASH; /// The merkle proof with the relevant account info. -#[derive(PartialEq, Eq, Default, Debug)] +#[derive(PartialEq, Eq, Debug)] pub struct AccountProof { /// The address associated with the account. pub address: Address, @@ -22,7 +27,13 @@ pub struct AccountProof { impl AccountProof { /// Create new account proof entity. pub fn new(address: Address) -> Self { - Self { address, ..Default::default() } + Self { + address, + info: None, + proof: Vec::new(), + storage_root: EMPTY_ROOT_HASH, + storage_proofs: Vec::new(), + } } /// Set account info, storage root and requested storage proofs. @@ -41,6 +52,26 @@ impl AccountProof { pub fn set_proof(&mut self, proof: Vec) { self.proof = proof; } + + /// Verify the storage proofs and account proof against the provided state root. + pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> { + // Verify storage proofs. + for storage_proof in &self.storage_proofs { + storage_proof.verify(self.storage_root)?; + } + + // Verify the account proof. + let expected = if self.info.is_none() && self.storage_root == EMPTY_ROOT_HASH { + None + } else { + Some(alloy_rlp::encode(TrieAccount::from(( + self.info.unwrap_or_default(), + self.storage_root, + )))) + }; + let nibbles = Nibbles::unpack(keccak256(self.address)); + verify_proof(root, nibbles, expected, &self.proof) + } } /// The merkle proof of the storage entry. @@ -83,4 +114,11 @@ impl StorageProof { pub fn set_proof(&mut self, proof: Vec) { self.proof = proof; } + + /// Verify the proof against the provided storage root. + pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> { + let expected = + if self.value.is_zero() { None } else { Some(encode_fixed_size(&self.value).to_vec()) }; + verify_proof(root, self.nibbles.clone(), expected, &self.proof) + } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index db490bd37ca74..96e137ac6fdf9 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -560,8 +560,8 @@ impl StateProvider for MockEthProvider { })) } - fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { - Ok(AccountProof::default()) + fn proof(&self, address: Address, _keys: &[B256]) -> ProviderResult { + Ok(AccountProof::new(address)) } } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 626bd535115e9..373dc4d7e5f09 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -319,8 +319,8 @@ impl StateProvider for NoopProvider { Ok(None) } - fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { - Ok(AccountProof::default()) + fn proof(&self, address: Address, _keys: &[B256]) -> ProviderResult { + Ok(AccountProof::new(address)) } } diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs index 55eb47710f038..80f0c552e3892 100644 --- a/crates/trie/src/proof.rs +++ b/crates/trie/src/proof.rs @@ -11,7 +11,7 @@ use reth_interfaces::trie::{StateRootError, StorageRootError}; use reth_primitives::{ constants::EMPTY_ROOT_HASH, keccak256, - trie::{AccountProof, HashBuilder, Nibbles, StorageProof, TrieAccount}, + trie::{proof::ProofRetainer, AccountProof, HashBuilder, Nibbles, StorageProof, TrieAccount}, Address, B256, }; @@ -60,8 +60,8 @@ where let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); // Create a hash builder to rebuild the root node since it is not available in the database. - let mut hash_builder = - HashBuilder::default().with_proof_retainer(Vec::from([target_nibbles])); + let retainer = ProofRetainer::from_iter([target_nibbles]); + let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut account_rlp = Vec::with_capacity(128); let mut account_node_iter = AccountNodeIter::new(walker, hashed_account_cursor); @@ -126,7 +126,8 @@ where ); let walker = TrieWalker::new(trie_cursor, prefix_set); - let mut hash_builder = HashBuilder::default().with_proof_retainer(target_nibbles); + let retainer = ProofRetainer::from_iter(target_nibbles); + let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut storage_node_iter = StorageNodeIter::new(walker, hashed_storage_cursor, hashed_address); while let Some(node) = storage_node_iter.try_next()? { @@ -200,7 +201,7 @@ mod tests { fn insert_genesis( provider_factory: &ProviderFactory, chain_spec: Arc, - ) -> RethResult<()> { + ) -> RethResult { let mut provider = provider_factory.provider_rw()?; // Hash accounts and insert them into hashing table. @@ -224,21 +225,21 @@ mod tests { }); provider.insert_storage_for_hashing(alloc_storage)?; - let (_, updates) = StateRoot::from_tx(provider.tx_ref()) + let (root, updates) = StateRoot::from_tx(provider.tx_ref()) .root_with_updates() .map_err(Into::::into)?; updates.flush(provider.tx_mut())?; provider.commit()?; - Ok(()) + Ok(root) } #[test] fn testspec_proofs() { // Create test database and insert genesis accounts. let factory = create_test_provider_factory(); - insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); + let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); let data = Vec::from([ ( @@ -288,6 +289,7 @@ mod tests { expected_proof, "proof for {target:?} does not match" ); + assert_eq!(account_proof.verify(root), Ok(())); } } @@ -295,7 +297,7 @@ mod tests { fn testspec_empty_storage_proof() { // Create test database and insert genesis accounts. let factory = create_test_provider_factory(); - insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); + let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); let target = Address::from_str("0x1ed9b1dd266b607ee278726d324b855a093394a6").unwrap(); let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); @@ -306,15 +308,18 @@ mod tests { assert_eq!(slots.len(), account_proof.storage_proofs.len()); for (idx, slot) in slots.into_iter().enumerate() { - assert_eq!(account_proof.storage_proofs.get(idx), Some(&StorageProof::new(slot))); + let proof = account_proof.storage_proofs.get(idx).unwrap(); + assert_eq!(proof, &StorageProof::new(slot)); + assert_eq!(proof.verify(account_proof.storage_root), Ok(())); } + assert_eq!(account_proof.verify(root), Ok(())); } #[test] fn mainnet_genesis_account_proof() { // Create test database and insert genesis accounts. let factory = create_test_provider_factory(); - insert_genesis(&factory, MAINNET.clone()).unwrap(); + let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); // Address from mainnet genesis allocation. // keccak256 - `0xcf67b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4` @@ -332,13 +337,14 @@ mod tests { let provider = factory.provider().unwrap(); let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); + assert_eq!(account_proof.verify(root), Ok(())); } #[test] fn mainnet_genesis_account_proof_nonexistent() { // Create test database and insert genesis accounts. let factory = create_test_provider_factory(); - insert_genesis(&factory, MAINNET.clone()).unwrap(); + let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); // Address that does not exist in mainnet genesis allocation. // keccak256 - `0x18f415ffd7f66bb1924d90f0e82fb79ca8c6d8a3473cd9a95446a443b9db1761` @@ -354,13 +360,14 @@ mod tests { let provider = factory.provider().unwrap(); let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); + assert_eq!(account_proof.verify(root), Ok(())); } #[test] fn holesky_deposit_contract_proof() { // Create test database and insert genesis accounts. let factory = create_test_provider_factory(); - insert_genesis(&factory, HOLESKY.clone()).unwrap(); + let root = insert_genesis(&factory, HOLESKY.clone()).unwrap(); let target = Address::from_str("0x4242424242424242424242424242424242424242").unwrap(); // existent @@ -439,5 +446,6 @@ mod tests { let provider = factory.provider().unwrap(); let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &slots).unwrap(); similar_asserts::assert_eq!(account_proof, expected); + assert_eq!(account_proof.verify(root), Ok(())); } } From 2e2c8e1d63c00e92a972c6a414fddfa7c4241d41 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 14 May 2024 20:33:50 +0200 Subject: [PATCH 546/700] fix: allow to call V1 methods post-Shanghai (#8250) --- crates/engine-primitives/src/lib.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/engine-primitives/src/lib.rs b/crates/engine-primitives/src/lib.rs index 99edf521c0b62..aa2c4468158ca 100644 --- a/crates/engine-primitives/src/lib.rs +++ b/crates/engine-primitives/src/lib.rs @@ -159,10 +159,6 @@ pub fn validate_withdrawals_presence( return Err(message_validation_kind .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) } - if is_shanghai_active { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) - } } EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { if is_shanghai_active && !has_withdrawals { From aa5c52b944ba47f59512700571a19d8ae877abb2 Mon Sep 17 00:00:00 2001 From: Vid Kersic <38610409+Vid201@users.noreply.github.com> Date: Wed, 15 May 2024 09:00:17 +0200 Subject: [PATCH 547/700] chore: add alloy-compat for signature and transaction (#8197) --- crates/primitives/src/alloy_compat.rs | 67 ++++++++++++++++++++++-- crates/primitives/src/transaction/mod.rs | 36 ------------- 2 files changed, 64 insertions(+), 39 deletions(-) diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 8b4368a1211b4..6fdd477dd3bb4 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -1,8 +1,8 @@ //! Common conversions from alloy types. use crate::{ - Block, Header, Transaction, TransactionSigned, TxEip1559, TxEip2930, TxEip4844, TxLegacy, - TxType, + transaction::extract_chain_id, Block, Header, Signature, Transaction, TransactionSigned, + TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, }; use alloy_primitives::TxKind; use alloy_rlp::Error as RlpError; @@ -115,7 +115,7 @@ impl TryFrom for Transaction { return Err(ConversionError::Eip2718Error( RlpError::Custom("EIP-1559 fields are present in a legacy transaction") .into(), - )) + )); } Ok(Transaction::Legacy(TxLegacy { chain_id: tx.chain_id, @@ -199,3 +199,64 @@ impl TryFrom for Transaction { } } } + +impl TryFrom for TransactionSigned { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(tx: alloy_rpc_types::Transaction) -> Result { + use alloy_rpc_types::ConversionError; + + let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; + let transaction: Transaction = tx.try_into()?; + + Ok(TransactionSigned::from_transaction_and_signature( + transaction.clone(), + Signature { + r: signature.r, + s: signature.s, + odd_y_parity: if let Some(y_parity) = signature.y_parity { + y_parity.0 + } else { + match transaction.tx_type() { + // If the transaction type is Legacy, adjust the v component of the + // signature according to the Ethereum specification + TxType::Legacy => { + extract_chain_id(signature.v.to()) + .map_err(|_| ConversionError::InvalidSignature)? + .0 + } + _ => !signature.v.is_zero(), + } + }, + }, + )) + } +} + +impl TryFrom for TransactionSignedEcRecovered { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(tx: alloy_rpc_types::Transaction) -> Result { + use alloy_rpc_types::ConversionError; + + let transaction: TransactionSigned = tx.try_into()?; + + transaction.try_into_ecrecovered().map_err(|_| ConversionError::InvalidSignature) + } +} + +impl TryFrom for Signature { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(signature: alloy_rpc_types::Signature) -> Result { + use alloy_rpc_types::ConversionError; + + let odd_y_parity = if let Some(y_parity) = signature.y_parity { + y_parity.0 + } else { + extract_chain_id(signature.v.to()).map_err(|_| ConversionError::InvalidSignature)?.0 + }; + + Ok(Self { r: signature.r, s: signature.s, odd_y_parity }) + } +} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 3117615e842d3..c441a32774111 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1615,42 +1615,6 @@ impl IntoRecoveredTransaction for TransactionSignedEcRecovered { } } -#[cfg(feature = "alloy-compat")] -impl TryFrom for TransactionSignedEcRecovered { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: alloy_rpc_types::Transaction) -> Result { - use alloy_rpc_types::ConversionError; - let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; - - let transaction: Transaction = tx.try_into()?; - - TransactionSigned::from_transaction_and_signature( - transaction.clone(), - Signature { - r: signature.r, - s: signature.s, - odd_y_parity: if let Some(y_parity) = signature.y_parity { - y_parity.0 - } else { - match transaction.tx_type() { - // If the transaction type is Legacy, adjust the v component of the - // signature according to the Ethereum specification - TxType::Legacy => { - extract_chain_id(signature.v.to()) - .map_err(|_| ConversionError::InvalidSignature)? - .0 - } - _ => !signature.v.is_zero(), - } - }, - }, - ) - .try_into_ecrecovered() - .map_err(|_| ConversionError::InvalidSignature) - } -} - #[cfg(test)] mod tests { use crate::{ From e7d85e11dd9a14c81c3b8e872262fa000df83bd8 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 15 May 2024 10:28:21 +0300 Subject: [PATCH 548/700] chore: install apt-get deps before cargo deps (#8262) --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index f1cc4d804ee95..1dc54909ff9be 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,6 +4,9 @@ WORKDIR /app LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" +# Install system dependencies +RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config + # Builds a cargo-chef plan FROM chef AS planner COPY . . @@ -24,9 +27,6 @@ ENV RUSTFLAGS "$RUSTFLAGS" ARG FEATURES="" ENV FEATURES $FEATURES -# Install system dependencies -RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config - # Builds dependencies RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json From 79d505adb29a84674df6f293909fab2192af2431 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 15 May 2024 01:15:15 -0700 Subject: [PATCH 549/700] fix: Make L1 tx data fee calculation aware of Ecotone hardfork (#8268) --- crates/optimism/evm/src/l1.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 7b605448ff309..66093e857b4ab 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -190,7 +190,9 @@ impl RethL1BlockInfo for L1BlockInfo { return Ok(U256::ZERO) } - let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, timestamp) { + let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, timestamp) { + SpecId::ECOTONE + } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, timestamp) { SpecId::REGOLITH } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { SpecId::BEDROCK From f0681947db048acaaef9bb56a557254be115fc08 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 May 2024 14:15:33 +0200 Subject: [PATCH 550/700] chore(rpc): cleanup rpc-layer (#8274) --- crates/rpc/rpc-layer/Cargo.toml | 20 ++++++++++++-------- crates/rpc/rpc-layer/src/lib.rs | 6 +----- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/crates/rpc/rpc-layer/Cargo.toml b/crates/rpc/rpc-layer/Cargo.toml index 21aa3f04971fd..dc4614b178bb3 100644 --- a/crates/rpc/rpc-layer/Cargo.toml +++ b/crates/rpc/rpc-layer/Cargo.toml @@ -6,24 +6,28 @@ rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true -exclude.workspace = true + +[lints] +workspace = true [dependencies] +reth-primitives.workspace = true + http.workspace = true hyper.workspace = true tower.workspace = true http-body.workspace = true pin-project.workspace = true -tokio.workspace = true -jsonrpsee.workspace = true jsonwebtoken = "8" + rand.workspace = true -reth-primitives.workspace = true serde.workspace = true thiserror.workspace = true -tempfile.workspace = true -assert_matches.workspace = true tracing.workspace = true -[lints] -workspace = true +[dev-dependencies] +hyper = { workspace = true, features = ["client", "tcp"] } +assert_matches.workspace = true +tokio = { workspace = true, features = ["macros"] } +tempfile.workspace = true +jsonrpsee = { workspace = true, features = ["server"] } diff --git a/crates/rpc/rpc-layer/src/lib.rs b/crates/rpc/rpc-layer/src/lib.rs index 41310c3059bcb..dbe0700964194 100644 --- a/crates/rpc/rpc-layer/src/lib.rs +++ b/crates/rpc/rpc-layer/src/lib.rs @@ -1,4 +1,4 @@ -//! Reth RPC testing utilities. +//! Layer implementations used in RPC #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -8,11 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use assert_matches as _; use http::{HeaderMap, Response}; -use jsonrpsee as _; -use tempfile as _; -use tokio as _; mod auth_client_layer; mod auth_layer; From 530dbdbb54ab4630fbf21682cb8dbf0667e924a5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 May 2024 14:16:01 +0200 Subject: [PATCH 551/700] chore: dont exclude crate (#8275) --- crates/etl/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/etl/Cargo.toml b/crates/etl/Cargo.toml index 07af6c72968f8..59811f97d2d0e 100644 --- a/crates/etl/Cargo.toml +++ b/crates/etl/Cargo.toml @@ -6,7 +6,6 @@ rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true -exclude.workspace = true [dependencies] tempfile.workspace = true From aefcfff25fd3ec534b34337c9838c44ccdbab9b5 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 15 May 2024 17:19:41 +0300 Subject: [PATCH 552/700] feat(storage): implement `BundleStateDataProvider` for `BundleStateWithReceipts` (#8282) --- crates/blockchain-tree/src/bundle.rs | 6 +++- crates/blockchain-tree/src/chain.rs | 4 +-- crates/blockchain-tree/src/noop.rs | 6 ++-- crates/blockchain-tree/src/shareable.rs | 4 +-- .../bundle_state_with_receipts.rs | 20 +++++++++-- crates/storage/provider/src/providers/mod.rs | 16 ++++----- .../storage/provider/src/test_utils/mock.rs | 8 ++--- .../storage/provider/src/test_utils/noop.rs | 2 +- crates/storage/provider/src/traits/mod.rs | 5 +-- crates/storage/provider/src/traits/state.rs | 34 ++++++++++++++----- 10 files changed, 71 insertions(+), 34 deletions(-) diff --git a/crates/blockchain-tree/src/bundle.rs b/crates/blockchain-tree/src/bundle.rs index d8c93439e5b17..ccfbc2adc31c1 100644 --- a/crates/blockchain-tree/src/bundle.rs +++ b/crates/blockchain-tree/src/bundle.rs @@ -1,7 +1,7 @@ //! [BundleStateDataProvider] implementations used by the tree. use reth_primitives::{BlockHash, BlockNumber, ForkBlock}; -use reth_provider::{BundleStateDataProvider, BundleStateWithReceipts}; +use reth_provider::{BundleStateDataProvider, BundleStateForkProvider, BundleStateWithReceipts}; use std::collections::BTreeMap; /// Structure that combines references of required data to be a [`BundleStateDataProvider`]. @@ -30,7 +30,9 @@ impl<'a> BundleStateDataProvider for BundleStateDataRef<'a> { self.canonical_block_hashes.get(&block_number).cloned() } +} +impl<'a> BundleStateForkProvider for BundleStateDataRef<'a> { fn canonical_fork(&self) -> ForkBlock { self.canonical_fork } @@ -57,7 +59,9 @@ impl BundleStateDataProvider for BundleStateData { fn block_hash(&self, block_number: BlockNumber) -> Option { self.parent_block_hashes.get(&block_number).cloned() } +} +impl BundleStateForkProvider for BundleStateData { fn canonical_fork(&self) -> ForkBlock { self.canonical_fork } diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index db4b4627abe7e..ce6487a060b93 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -21,7 +21,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView}, - BundleStateDataProvider, BundleStateWithReceipts, Chain, ProviderError, StateRootProvider, + BundleStateWithReceipts, Chain, FullBundleStateDataProvider, ProviderError, StateRootProvider, }; use reth_revm::database::StateProviderDatabase; use reth_trie::updates::TrieUpdates; @@ -178,7 +178,7 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> RethResult<(BundleStateWithReceipts, Option)> where - BSDP: BundleStateDataProvider, + BSDP: FullBundleStateDataProvider, DB: Database + Clone, E: BlockExecutorProvider, { diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 776a153250bca..18423d3bb7e63 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -12,8 +12,8 @@ use reth_primitives::{ SealedHeader, }; use reth_provider::{ - BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonStateNotificationSender, - CanonStateNotifications, CanonStateSubscriptions, + BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, + CanonStateSubscriptions, FullBundleStateDataProvider, }; use std::collections::{BTreeMap, HashSet}; @@ -138,7 +138,7 @@ impl BlockchainTreePendingStateProvider for NoopBlockchainTree { fn find_pending_state_provider( &self, _block_hash: BlockHash, - ) -> Option> { + ) -> Option> { None } } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 77cc53c2d3096..66f76b0916f1b 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -17,7 +17,7 @@ use reth_primitives::{ SealedHeader, }; use reth_provider::{ - BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonStateSubscriptions, + BlockchainTreePendingStateProvider, CanonStateSubscriptions, FullBundleStateDataProvider, ProviderError, }; use std::{ @@ -199,7 +199,7 @@ where fn find_pending_state_provider( &self, block_hash: BlockHash, - ) -> Option> { + ) -> Option> { trace!(target: "blockchain_tree", ?block_hash, "Finding pending state provider"); let provider = self.tree.read().post_state_data(block_hash)?; Some(Box::new(provider)) diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 5f6d4af3f843b..947c6609b961d 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -1,4 +1,7 @@ -use crate::{providers::StaticFileProviderRWRefMut, StateChanges, StateReverts, StateWriter}; +use crate::{ + providers::StaticFileProviderRWRefMut, BundleStateDataProvider, StateChanges, StateReverts, + StateWriter, +}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, tables, @@ -9,8 +12,8 @@ use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ logs_bloom, revm::compat::{into_reth_acc, into_revm_acc}, - Account, Address, BlockNumber, Bloom, Bytecode, Log, Receipt, Receipts, StaticFileSegment, - StorageEntry, B256, U256, + Account, Address, BlockHash, BlockNumber, Bloom, Bytecode, Log, Receipt, Receipts, + StaticFileSegment, StorageEntry, B256, U256, }; use reth_trie::HashedPostState; pub use revm::db::states::OriginalValuesKnown; @@ -365,6 +368,17 @@ impl StateWriter for BundleStateWithReceipts { } } +impl BundleStateDataProvider for BundleStateWithReceipts { + fn state(&self) -> &BundleStateWithReceipts { + self + } + + /// Always returns [None] because we don't have any information about the block header. + fn block_hash(&self, _block_number: BlockNumber) -> Option { + None + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 8a06f0c0d2044..f9969a9500dcc 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,11 +1,11 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonChainTracker, - CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, ProviderError, PruneCheckpointReader, - ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, - StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, - TreeViewer, WithdrawalsProvider, + BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, + CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, + EvmEnvProvider, FullBundleStateDataProvider, HeaderProvider, ProviderError, + PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, + StateProviderBox, StateProviderFactory, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, TreeViewer, WithdrawalsProvider, }; use reth_db::{ database::Database, @@ -638,7 +638,7 @@ where fn pending_with_provider( &self, - bundle_state_data: Box, + bundle_state_data: Box, ) -> ProviderResult { let canonical_fork = bundle_state_data.canonical_fork(); trace!(target: "providers::blockchain", ?canonical_fork, "Returning post state provider"); @@ -871,7 +871,7 @@ where fn find_pending_state_provider( &self, block_hash: BlockHash, - ) -> Option> { + ) -> Option> { self.tree.find_pending_state_provider(block_hash) } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 96e137ac6fdf9..893bd052d0b31 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,9 +1,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BundleStateDataProvider, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, - ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullBundleStateDataProvider, + HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, + StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use parking_lot::Mutex; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; @@ -660,7 +660,7 @@ impl StateProviderFactory for MockEthProvider { fn pending_with_provider<'a>( &'a self, - _bundle_state_data: Box, + _bundle_state_data: Box, ) -> ProviderResult { Ok(Box::new(self.clone())) } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 373dc4d7e5f09..02890eaf1935a 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -419,7 +419,7 @@ impl StateProviderFactory for NoopProvider { fn pending_with_provider<'a>( &'a self, - _bundle_state_data: Box, + _bundle_state_data: Box, ) -> ProviderResult { Ok(Box::new(*self)) } diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index c966cd9efa09d..10984240a5d84 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -35,8 +35,9 @@ pub use receipts::{ReceiptProvider, ReceiptProviderIdExt}; mod state; pub use state::{ - BlockchainTreePendingStateProvider, BundleStateDataProvider, StateProvider, StateProviderBox, - StateProviderFactory, StateWriter, + BlockchainTreePendingStateProvider, BundleStateDataProvider, BundleStateForkProvider, + FullBundleStateDataProvider, StateProvider, StateProviderBox, StateProviderFactory, + StateWriter, }; mod trie; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 4cb74dec61cbf..ac72d52f9f449 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -187,7 +187,7 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// Used to inspect or execute transaction on the pending state. fn pending_with_provider( &self, - bundle_state_data: Box, + bundle_state_data: Box, ) -> ProviderResult; } @@ -201,7 +201,7 @@ pub trait BlockchainTreePendingStateProvider: Send + Sync { fn pending_state_provider( &self, block_hash: BlockHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.find_pending_state_provider(block_hash) .ok_or(ProviderError::StateForHashNotFound(block_hash)) } @@ -210,28 +210,46 @@ pub trait BlockchainTreePendingStateProvider: Send + Sync { fn find_pending_state_provider( &self, block_hash: BlockHash, - ) -> Option>; + ) -> Option>; } -/// Post state data needs for execution on it. -/// This trait is used to create a state provider over pending state. +/// Post state data needed for execution on it. /// -/// Pending state contains: +/// State contains: /// * [`BundleStateWithReceipts`] contains all changed of accounts and storage of pending chain /// * block hashes of pending chain and canonical blocks. -/// * canonical fork, the block on what pending chain was forked from. #[auto_impl(&, Box)] pub trait BundleStateDataProvider: Send + Sync { /// Return post state fn state(&self) -> &BundleStateWithReceipts; /// Return block hash by block number of pending or canonical chain. fn block_hash(&self, block_number: BlockNumber) -> Option; - /// return canonical fork, the block on what post state was forked from. +} + +/// Fork data needed for execution on it. +/// +/// It contains a canonical fork, the block on what pending chain was forked from. +#[auto_impl(&, Box)] +pub trait BundleStateForkProvider { + /// Return canonical fork, the block on what post state was forked from. /// /// Needed to create state provider. fn canonical_fork(&self) -> BlockNumHash; } +/// Full post state data needed for execution on it. +/// This trait is used to create a state provider over pending state. +/// +/// This trait is a combination of [`BundleStateDataProvider`] and [`BundleStateForkProvider`]. +/// +/// Pending state contains: +/// * [`BundleStateWithReceipts`] contains all changed of accounts and storage of pending chain +/// * block hashes of pending chain and canonical blocks. +/// * canonical fork, the block on what pending chain was forked from. +pub trait FullBundleStateDataProvider: BundleStateDataProvider + BundleStateForkProvider {} + +impl FullBundleStateDataProvider for T where T: BundleStateDataProvider + BundleStateForkProvider {} + /// A helper trait for [BundleStateWithReceipts] to write state and receipts to storage. pub trait StateWriter { /// Write the data and receipts to the database or static files if `static_file_producer` is From 0edf3509a96d1b1eef5108204c3ebf810b1b5058 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 16 May 2024 13:31:15 +0200 Subject: [PATCH 553/700] chore: remove unused file muxdemux.rs (#8287) --- crates/net/eth-wire/src/errors/eth.rs | 10 +- crates/net/eth-wire/src/errors/mod.rs | 2 - crates/net/eth-wire/src/errors/muxdemux.rs | 47 -- crates/net/eth-wire/src/lib.rs | 2 - crates/net/eth-wire/src/muxdemux.rs | 581 --------------------- 5 files changed, 1 insertion(+), 641 deletions(-) delete mode 100644 crates/net/eth-wire/src/errors/muxdemux.rs delete mode 100644 crates/net/eth-wire/src/muxdemux.rs diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 04c23a253e7bd..c9bf39882b83a 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -1,10 +1,7 @@ //! Error handling for (`EthStream`)[crate::EthStream] use crate::{ - errors::{MuxDemuxError, P2PStreamError}, - message::MessageError, - version::ParseVersionError, - DisconnectReason, + errors::P2PStreamError, message::MessageError, version::ParseVersionError, DisconnectReason, }; use reth_primitives::{Chain, GotExpected, GotExpectedBoxed, ValidationError, B256}; use std::io; @@ -16,9 +13,6 @@ pub enum EthStreamError { /// Error of the underlying P2P connection. P2PStreamError(#[from] P2PStreamError), #[error(transparent)] - /// Error of the underlying de-/muxed P2P connection. - MuxDemuxError(#[from] MuxDemuxError), - #[error(transparent)] /// Failed to parse peer's version. ParseVersionError(#[from] ParseVersionError), #[error(transparent)] @@ -52,8 +46,6 @@ impl EthStreamError { pub fn as_disconnected(&self) -> Option { if let EthStreamError::P2PStreamError(err) = self { err.as_disconnected() - } else if let EthStreamError::MuxDemuxError(MuxDemuxError::P2PStreamError(err)) = self { - err.as_disconnected() } else { None } diff --git a/crates/net/eth-wire/src/errors/mod.rs b/crates/net/eth-wire/src/errors/mod.rs index c231e48608e64..be3f8ced7f4f1 100644 --- a/crates/net/eth-wire/src/errors/mod.rs +++ b/crates/net/eth-wire/src/errors/mod.rs @@ -1,9 +1,7 @@ //! Error types for stream variants mod eth; -mod muxdemux; mod p2p; pub use eth::*; -pub use muxdemux::*; pub use p2p::*; diff --git a/crates/net/eth-wire/src/errors/muxdemux.rs b/crates/net/eth-wire/src/errors/muxdemux.rs deleted file mode 100644 index 74ca6e2fcf4cc..0000000000000 --- a/crates/net/eth-wire/src/errors/muxdemux.rs +++ /dev/null @@ -1,47 +0,0 @@ -use thiserror::Error; - -use crate::capability::{SharedCapabilityError, UnsupportedCapabilityError}; - -use super::P2PStreamError; - -/// Errors thrown by de-/muxing. -#[derive(Error, Debug)] -pub enum MuxDemuxError { - /// Error of the underlying P2P connection. - #[error(transparent)] - P2PStreamError(#[from] P2PStreamError), - /// Stream is in use by secondary stream impeding disconnect. - #[error("secondary streams are still running")] - StreamInUse, - /// Stream has already been set up for this capability stream type. - #[error("stream already init for stream type")] - StreamAlreadyExists, - /// Capability stream type is not shared with peer on underlying p2p connection. - #[error("stream type is not shared on this p2p connection")] - CapabilityNotShared, - /// Capability stream type has not been configured in [`crate::muxdemux::MuxDemuxer`]. - #[error("stream type is not configured")] - CapabilityNotConfigured, - /// Capability stream type has not been configured for - /// [`crate::capability::SharedCapabilities`] type. - #[error("stream type is not recognized")] - CapabilityNotRecognized, - /// Message ID is out of range. - #[error("message id out of range, {0}")] - MessageIdOutOfRange(u8), - /// Demux channel failed. - #[error("sending demuxed bytes to secondary stream failed")] - SendIngressBytesFailed, - /// Mux channel failed. - #[error("sending bytes from secondary stream to mux failed")] - SendEgressBytesFailed, - /// Attempt to disconnect the p2p stream via a stream clone. - #[error("secondary stream cannot disconnect p2p stream")] - CannotDisconnectP2PStream, - /// Shared capability error. - #[error(transparent)] - SharedCapabilityError(#[from] SharedCapabilityError), - /// Capability not supported on the p2p connection. - #[error(transparent)] - UnsupportedCapabilityError(#[from] UnsupportedCapabilityError), -} diff --git a/crates/net/eth-wire/src/lib.rs b/crates/net/eth-wire/src/lib.rs index e09ba95188071..3830baa1b7e55 100644 --- a/crates/net/eth-wire/src/lib.rs +++ b/crates/net/eth-wire/src/lib.rs @@ -21,7 +21,6 @@ pub mod errors; mod ethstream; mod hello; pub mod multiplex; -pub mod muxdemux; mod p2pstream; mod pinger; pub mod protocol; @@ -39,7 +38,6 @@ pub use crate::{ disconnect::{CanDisconnect, DisconnectReason}, ethstream::{EthStream, UnauthedEthStream, MAX_MESSAGE_SIZE}, hello::{HelloMessage, HelloMessageBuilder, HelloMessageWithProtocols}, - muxdemux::{MuxDemuxStream, StreamClone}, p2pstream::{ DisconnectP2P, P2PMessage, P2PMessageID, P2PStream, ProtocolVersion, UnauthedP2PStream, MAX_RESERVED_MESSAGE_ID, diff --git a/crates/net/eth-wire/src/muxdemux.rs b/crates/net/eth-wire/src/muxdemux.rs deleted file mode 100644 index 18112346ea31e..0000000000000 --- a/crates/net/eth-wire/src/muxdemux.rs +++ /dev/null @@ -1,581 +0,0 @@ -//! [`MuxDemuxer`] allows for multiple capability streams to share the same p2p connection. De-/ -//! muxing the connection offers two stream types [`MuxDemuxStream`] and [`StreamClone`]. -//! [`MuxDemuxStream`] is the main stream that wraps the p2p connection, only this stream can -//! advance transfer across the network. One [`MuxDemuxStream`] can have many [`StreamClone`]s, -//! these are weak clones of the stream and depend on advancing the [`MuxDemuxStream`] to make -//! progress. -//! -//! [`MuxDemuxer`] filters bytes according to message ID offset. The message ID offset is -//! negotiated upon start of the p2p connection. Bytes received by polling the [`MuxDemuxStream`] -//! or a [`StreamClone`] are specific to the capability stream wrapping it. When received the -//! message IDs are unmasked so that all message IDs start at 0x0. [`MuxDemuxStream`] and -//! [`StreamClone`] mask message IDs before sinking bytes to the [`MuxDemuxer`]. -//! -//! For example, `EthStream>>` is the main capability stream. -//! Subsequent capability streams clone the p2p connection via EthStream. -//! -//! When [`MuxDemuxStream`] is polled, [`MuxDemuxer`] receives bytes from the network. If these -//! bytes belong to the capability stream wrapping the [`MuxDemuxStream`] then they are passed up -//! directly. If these bytes however belong to another capability stream, then they are buffered -//! on a channel. When [`StreamClone`] is polled, bytes are read from this buffer. Similarly -//! [`StreamClone`] buffers egress bytes for [`MuxDemuxer`] that are read and sent to the network -//! when [`MuxDemuxStream`] is polled. - -use crate::{ - capability::{Capability, SharedCapabilities, SharedCapability}, - errors::MuxDemuxError, - CanDisconnect, DisconnectP2P, DisconnectReason, -}; -use derive_more::{Deref, DerefMut}; -use futures::{Sink, SinkExt, StreamExt}; -use reth_primitives::bytes::{Bytes, BytesMut}; -use std::{ - collections::HashMap, - pin::Pin, - task::{ready, Context, Poll}, -}; -use tokio::sync::mpsc; -use tokio_stream::Stream; - -use MuxDemuxError::*; - -/// Stream MUX/DEMUX acts like a regular stream and sink for the owning stream, and handles bytes -/// belonging to other streams over their respective channels. -#[derive(Debug)] -pub struct MuxDemuxer { - // receive and send muxed p2p outputs - inner: S, - // owner of the stream. stores message id offset for this capability. - owner: SharedCapability, - // receive muxed p2p inputs from stream clones - mux: mpsc::UnboundedReceiver, - // send demuxed p2p outputs to app - demux: HashMap>, - // sender to mux stored to make new stream clones - mux_tx: mpsc::UnboundedSender, - // capabilities supported by underlying p2p stream (makes testing easier to store here too). - shared_capabilities: SharedCapabilities, -} - -/// The main stream on top of the p2p stream. Wraps [`MuxDemuxer`] and enforces it can't be dropped -/// before all secondary streams are dropped (stream clones). -#[derive(Debug, Deref, DerefMut)] -pub struct MuxDemuxStream(MuxDemuxer); - -impl MuxDemuxStream { - /// Creates a new [`MuxDemuxer`]. - pub fn try_new( - inner: S, - cap: Capability, - shared_capabilities: SharedCapabilities, - ) -> Result { - let owner = Self::shared_cap(&cap, &shared_capabilities)?.clone(); - - let demux = HashMap::new(); - let (mux_tx, mux) = mpsc::unbounded_channel(); - - Ok(Self(MuxDemuxer { inner, owner, mux, demux, mux_tx, shared_capabilities })) - } - - /// Clones the stream if the given capability stream type is shared on the underlying p2p - /// connection. - pub fn try_clone_stream(&mut self, cap: &Capability) -> Result { - let cap = self.shared_capabilities.ensure_matching_capability(cap)?.clone(); - let ingress = self.reg_new_ingress_buffer(&cap)?; - let mux_tx = self.mux_tx.clone(); - - Ok(StreamClone { stream: ingress, sink: mux_tx, cap }) - } - - /// Starts a graceful disconnect. - pub fn start_disconnect(&mut self, reason: DisconnectReason) -> Result<(), MuxDemuxError> - where - S: DisconnectP2P, - { - if !self.can_drop() { - return Err(StreamInUse) - } - - self.inner.start_disconnect(reason).map_err(|e| e.into()) - } - - /// Returns `true` if the connection is about to disconnect. - pub fn is_disconnecting(&self) -> bool - where - S: DisconnectP2P, - { - self.inner.is_disconnecting() - } - - /// Shared capabilities of underlying p2p connection as negotiated by peers at connection - /// open. - pub fn shared_capabilities(&self) -> &SharedCapabilities { - &self.shared_capabilities - } - - fn shared_cap<'a>( - cap: &Capability, - shared_capabilities: &'a SharedCapabilities, - ) -> Result<&'a SharedCapability, MuxDemuxError> { - for shared_cap in shared_capabilities.iter_caps() { - match shared_cap { - SharedCapability::Eth { .. } if cap.is_eth() => return Ok(shared_cap), - SharedCapability::UnknownCapability { cap: unknown_cap, .. } - if cap == unknown_cap => - { - return Ok(shared_cap) - } - _ => continue, - } - } - - Err(CapabilityNotShared) - } - - fn reg_new_ingress_buffer( - &mut self, - cap: &SharedCapability, - ) -> Result, MuxDemuxError> { - if let Some(tx) = self.demux.get(cap) { - if !tx.is_closed() { - return Err(StreamAlreadyExists) - } - } - let (ingress_tx, ingress) = mpsc::unbounded_channel(); - self.demux.insert(cap.clone(), ingress_tx); - - Ok(ingress) - } - - fn unmask_msg_id(&self, id: &mut u8) -> Result<&SharedCapability, MuxDemuxError> { - for cap in self.shared_capabilities.iter_caps() { - let offset = cap.relative_message_id_offset(); - let next_offset = offset + cap.num_messages(); - if *id < next_offset { - *id -= offset; - return Ok(cap) - } - } - - Err(MessageIdOutOfRange(*id)) - } - - /// Masks message id with offset relative to the message id suffix reserved for capability - /// message ids. The p2p stream further masks the message id - fn mask_msg_id(&self, msg: Bytes) -> Bytes { - let mut masked_bytes = BytesMut::with_capacity(msg.len()); - masked_bytes.extend_from_slice(&msg); - masked_bytes[0] += self.owner.relative_message_id_offset(); - masked_bytes.freeze() - } - - /// Checks if all clones of this shared stream have been dropped, if true then returns // - /// function to drop the stream. - fn can_drop(&self) -> bool { - for tx in self.demux.values() { - if !tx.is_closed() { - return false - } - } - - true - } -} - -impl Stream for MuxDemuxStream -where - S: Stream> + CanDisconnect + Unpin, - MuxDemuxError: From + From<>::Error>, -{ - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut send_count = 0; - let mut mux_exhausted = false; - - loop { - // send buffered bytes from `StreamClone`s. try send at least as many messages as - // there are stream clones. - if self.inner.poll_ready_unpin(cx).is_ready() { - if let Poll::Ready(Some(item)) = self.mux.poll_recv(cx) { - self.inner.start_send_unpin(item)?; - if send_count < self.demux.len() { - send_count += 1; - continue - } - } else { - mux_exhausted = true; - } - } - - // advances the wire and either yields message for the owner or delegates message to a - // stream clone - let res = self.inner.poll_next_unpin(cx); - if res.is_pending() { - // no message is received. continue to send messages from stream clones as long as - // there are messages left to send. - if !mux_exhausted && self.inner.poll_ready_unpin(cx).is_ready() { - continue - } - // flush before returning pending - _ = self.inner.poll_flush_unpin(cx)?; - } - let mut bytes = match ready!(res) { - Some(Ok(bytes)) => bytes, - Some(Err(err)) => { - _ = self.inner.poll_flush_unpin(cx)?; - return Poll::Ready(Some(Err(err.into()))) - } - None => { - _ = self.inner.poll_flush_unpin(cx)?; - return Poll::Ready(None) - } - }; - - // normalize message id suffix for capability - let cap = self.unmask_msg_id(&mut bytes[0])?; - - // yield message for main stream - if *cap == self.owner { - _ = self.inner.poll_flush_unpin(cx)?; - return Poll::Ready(Some(Ok(bytes))) - } - - // delegate message for stream clone - let tx = self.demux.get(cap).ok_or(CapabilityNotConfigured)?; - tx.send(bytes).map_err(|_| SendIngressBytesFailed)?; - } - } -} - -impl Sink for MuxDemuxStream -where - S: Sink + CanDisconnect + Unpin, - MuxDemuxError: From, -{ - type Error = MuxDemuxError; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_ready_unpin(cx).map_err(Into::into) - } - - fn start_send(mut self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> { - let item = self.mask_msg_id(item); - self.inner.start_send_unpin(item).map_err(|e| e.into()) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_flush_unpin(cx).map_err(Into::into) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - while let Ok(item) = self.mux.try_recv() { - self.inner.start_send_unpin(item)?; - } - _ = self.inner.poll_flush_unpin(cx)?; - - self.inner.poll_close_unpin(cx).map_err(Into::into) - } -} - -impl CanDisconnect for MuxDemuxStream -where - S: Sink + CanDisconnect + Unpin + Send + Sync, - MuxDemuxError: From, -{ - async fn disconnect(&mut self, reason: DisconnectReason) -> Result<(), MuxDemuxError> { - if self.can_drop() { - return self.inner.disconnect(reason).await.map_err(Into::into) - } - Err(StreamInUse) - } -} - -/// More or less a weak clone of the stream wrapped in [`MuxDemuxer`] but the bytes belonging to -/// other capabilities have been filtered out. -#[derive(Debug)] -pub struct StreamClone { - // receive bytes from de-/muxer - stream: mpsc::UnboundedReceiver, - // send bytes to de-/muxer - sink: mpsc::UnboundedSender, - // message id offset for capability holding this clone - cap: SharedCapability, -} - -impl StreamClone { - fn mask_msg_id(&self, msg: Bytes) -> Bytes { - let mut masked_bytes = BytesMut::with_capacity(msg.len()); - masked_bytes.extend_from_slice(&msg); - masked_bytes[0] += self.cap.relative_message_id_offset(); - masked_bytes.freeze() - } -} - -impl Stream for StreamClone { - type Item = BytesMut; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.stream.poll_recv(cx) - } -} - -impl Sink for StreamClone { - type Error = MuxDemuxError; - - fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> { - let item = self.mask_msg_id(item); - self.sink.send(item).map_err(|_| SendEgressBytesFailed) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -impl CanDisconnect for StreamClone { - async fn disconnect(&mut self, _reason: DisconnectReason) -> Result<(), MuxDemuxError> { - Err(CannotDisconnectP2PStream) - } -} - -#[cfg(test)] -mod tests { - use crate::{ - capability::{Capability, SharedCapabilities}, - muxdemux::MuxDemuxStream, - protocol::Protocol, - EthVersion, HelloMessageWithProtocols, Status, StatusBuilder, StreamClone, - UnauthedEthStream, UnauthedP2PStream, - }; - use futures::{Future, SinkExt, StreamExt}; - use reth_network_types::pk2id; - use reth_primitives::{ - bytes::{BufMut, Bytes, BytesMut}, - ForkFilter, Hardfork, MAINNET, - }; - use secp256k1::{SecretKey, SECP256K1}; - use std::{net::SocketAddr, pin::Pin}; - use tokio::{ - net::{TcpListener, TcpStream}, - task::JoinHandle, - }; - use tokio_util::codec::{Decoder, Framed, LengthDelimitedCodec}; - - const ETH_68_CAP: Capability = Capability::eth(EthVersion::Eth68); - const ETH_68_PROTOCOL: Protocol = Protocol::new(ETH_68_CAP, 13); - const CUSTOM_CAP: Capability = Capability::new_static("snap", 1); - const CUSTOM_CAP_PROTOCOL: Protocol = Protocol::new(CUSTOM_CAP, 10); - // message IDs `0x00` and `0x01` are normalized for the custom protocol stream - const CUSTOM_REQUEST: [u8; 5] = [0x00, 0x00, 0x01, 0x0, 0xc0]; - const CUSTOM_RESPONSE: [u8; 5] = [0x01, 0x00, 0x01, 0x0, 0xc0]; - - fn shared_caps_eth68() -> SharedCapabilities { - let local_capabilities: Vec = vec![ETH_68_PROTOCOL]; - let peer_capabilities: Vec = vec![ETH_68_CAP]; - SharedCapabilities::try_new(local_capabilities, peer_capabilities).unwrap() - } - - fn shared_caps_eth68_and_custom() -> SharedCapabilities { - let local_capabilities: Vec = vec![ETH_68_PROTOCOL, CUSTOM_CAP_PROTOCOL]; - let peer_capabilities: Vec = vec![ETH_68_CAP, CUSTOM_CAP]; - SharedCapabilities::try_new(local_capabilities, peer_capabilities).unwrap() - } - - struct ConnectionBuilder { - local_addr: SocketAddr, - local_hello: HelloMessageWithProtocols, - status: Status, - fork_filter: ForkFilter, - } - - impl ConnectionBuilder { - fn new() -> Self { - let (_secret_key, pk) = SECP256K1.generate_keypair(&mut rand::thread_rng()); - - let hello = HelloMessageWithProtocols::builder(pk2id(&pk)) - .protocol(ETH_68_PROTOCOL) - .protocol(CUSTOM_CAP_PROTOCOL) - .build(); - - let local_addr = "127.0.0.1:30303".parse().unwrap(); - - Self { - local_hello: hello, - local_addr, - status: StatusBuilder::default().build(), - fork_filter: MAINNET - .hardfork_fork_filter(Hardfork::Frontier) - .expect("The Frontier fork filter should exist on mainnet"), - } - } - - /// Connects a custom sub protocol stream and executes the given closure with that - /// established stream (main stream is eth). - fn with_connect_custom_protocol( - self, - f_local: F, - f_remote: G, - ) -> (JoinHandle, JoinHandle) - where - F: FnOnce(StreamClone) -> Pin + Send)>> - + Send - + Sync - + 'static, - G: FnOnce(StreamClone) -> Pin + Send)>> - + Send - + Sync - + 'static, - { - let local_addr = self.local_addr; - - let local_hello = self.local_hello.clone(); - let status = self.status; - let fork_filter = self.fork_filter.clone(); - - let local_handle = tokio::spawn(async move { - let local_listener = TcpListener::bind(local_addr).await.unwrap(); - let (incoming, _) = local_listener.accept().await.unwrap(); - let stream = crate::PassthroughCodec::default().framed(incoming); - - let protocol_proxy = - connect_protocol(stream, local_hello, status, fork_filter).await; - - f_local(protocol_proxy).await - }); - - let remote_key = SecretKey::new(&mut rand::thread_rng()); - let remote_id = pk2id(&remote_key.public_key(SECP256K1)); - let mut remote_hello = self.local_hello.clone(); - remote_hello.id = remote_id; - let fork_filter = self.fork_filter; - - let remote_handle = tokio::spawn(async move { - let outgoing = TcpStream::connect(local_addr).await.unwrap(); - let stream = crate::PassthroughCodec::default().framed(outgoing); - - let protocol_proxy = - connect_protocol(stream, remote_hello, status, fork_filter).await; - - f_remote(protocol_proxy).await - }); - - (local_handle, remote_handle) - } - } - - async fn connect_protocol( - stream: Framed, - hello: HelloMessageWithProtocols, - status: Status, - fork_filter: ForkFilter, - ) -> StreamClone { - let unauthed_stream = UnauthedP2PStream::new(stream); - let (p2p_stream, _) = unauthed_stream.handshake(hello).await.unwrap(); - - // ensure that the two share capabilities - assert_eq!(*p2p_stream.shared_capabilities(), shared_caps_eth68_and_custom(),); - - let shared_caps = p2p_stream.shared_capabilities().clone(); - let main_cap = shared_caps.eth().unwrap(); - let proxy_server = - MuxDemuxStream::try_new(p2p_stream, main_cap.capability().into_owned(), shared_caps) - .expect("should start mxdmx stream"); - - let (mut main_stream, _) = - UnauthedEthStream::new(proxy_server).handshake(status, fork_filter).await.unwrap(); - - let protocol_proxy = - main_stream.inner_mut().try_clone_stream(&CUSTOM_CAP).expect("should clone stream"); - - tokio::spawn(async move { - loop { - _ = main_stream.next().await.unwrap() - } - }); - - protocol_proxy - } - - #[test] - fn test_unmask_msg_id() { - let mut msg = Vec::with_capacity(1); - msg.put_u8(0x07); // eth msg id - - let mxdmx_stream = - MuxDemuxStream::try_new((), Capability::eth(EthVersion::Eth67), shared_caps_eth68()) - .unwrap(); - _ = mxdmx_stream.unmask_msg_id(&mut msg[0]).unwrap(); - - assert_eq!(msg.as_slice(), &[0x07]); - } - - #[test] - fn test_mask_msg_id() { - let mut msg = Vec::with_capacity(2); - msg.put_u8(0x10); // eth msg id - msg.put_u8(0x20); // some msg data - - let mxdmx_stream = - MuxDemuxStream::try_new((), Capability::eth(EthVersion::Eth66), shared_caps_eth68()) - .unwrap(); - let egress_bytes = mxdmx_stream.mask_msg_id(msg.into()); - - assert_eq!(egress_bytes.as_ref(), &[0x10, 0x20]); - } - - #[test] - fn test_unmask_msg_id_cap_not_in_shared_range() { - let mut msg = BytesMut::with_capacity(1); - msg.put_u8(0x11); - - let mxdmx_stream = - MuxDemuxStream::try_new((), Capability::eth(EthVersion::Eth68), shared_caps_eth68()) - .unwrap(); - - assert!(mxdmx_stream.unmask_msg_id(&mut msg[0]).is_err()); - } - - #[tokio::test(flavor = "multi_thread")] - async fn test_mux_demux() { - let builder = ConnectionBuilder::new(); - - let request = Bytes::from(&CUSTOM_REQUEST[..]); - let response = Bytes::from(&CUSTOM_RESPONSE[..]); - let expected_request = request.clone(); - let expected_response = response.clone(); - - let (local_handle, remote_handle) = builder.with_connect_custom_protocol( - // send request from local addr - |mut protocol_proxy| { - Box::pin(async move { - protocol_proxy.send(request).await.unwrap(); - protocol_proxy.next().await.unwrap() - }) - }, - // respond from remote addr - |mut protocol_proxy| { - Box::pin(async move { - let request = protocol_proxy.next().await.unwrap(); - protocol_proxy.send(response).await.unwrap(); - request - }) - }, - ); - - let (local_res, remote_res) = tokio::join!(local_handle, remote_handle); - - // remote address receives request - assert_eq!(expected_request, remote_res.unwrap().freeze()); - // local address receives response - assert_eq!(expected_response, local_res.unwrap().freeze()); - } -} From 31b6bdd13c619bb2a7fc815d7fbe8ebf94292d10 Mon Sep 17 00:00:00 2001 From: Waylon Jepsen <57912727+0xJepsen@users.noreply.github.com> Date: Thu, 16 May 2024 19:31:47 -0600 Subject: [PATCH 554/700] wip: lru changes (#7484) Co-authored-by: Emilia Hane --- Cargo.lock | 2 +- crates/blockchain-tree/Cargo.toml | 2 +- crates/blockchain-tree/src/block_buffer.rs | 18 ++--- crates/blockchain-tree/src/config.rs | 6 +- crates/blockchain-tree/src/state.rs | 2 +- crates/net/network/src/cache.rs | 75 +++++++++---------- crates/net/network/src/lib.rs | 2 +- crates/net/network/src/state.rs | 5 +- .../net/network/src/transactions/constants.rs | 15 ++-- .../net/network/src/transactions/fetcher.rs | 56 +++++++------- crates/net/network/src/transactions/mod.rs | 23 +++--- 11 files changed, 96 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1d4f74de6a762..024cbfff55378 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6545,7 +6545,6 @@ dependencies = [ "aquamarine", "assert_matches", "linked_hash_set", - "lru", "metrics", "parking_lot 0.12.2", "reth-consensus", @@ -6554,6 +6553,7 @@ dependencies = [ "reth-evm-ethereum", "reth-interfaces", "reth-metrics", + "reth-network", "reth-primitives", "reth-provider", "reth-revm", diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 70ce9a2901c26..dc9e13866e3ef 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -21,11 +21,11 @@ reth-provider.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-parallel = { workspace = true, features = ["parallel"] } +reth-network = { workspace = true } reth-consensus.workspace = true # common parking_lot.workspace = true -lru = "0.12" tracing.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 14e89633729ff..19ff9368b695b 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -1,10 +1,7 @@ use crate::metrics::BlockBufferMetrics; -use lru::LruCache; +use reth_network::cache::LruCache; use reth_primitives::{BlockHash, BlockNumber, SealedBlockWithSenders}; -use std::{ - collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, - num::NonZeroUsize, -}; +use std::collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}; /// Contains the tree of pending blocks that cannot be executed due to missing parent. /// It allows to store unconnected blocks for potential future inclusion. @@ -32,19 +29,19 @@ pub struct BlockBuffer { /// first in line for evicting if `max_blocks` limit is hit. /// /// Used as counter of amount of blocks inside buffer. - pub(crate) lru: LruCache, + pub(crate) lru: LruCache, /// Various metrics for the block buffer. pub(crate) metrics: BlockBufferMetrics, } impl BlockBuffer { /// Create new buffer with max limit of blocks - pub fn new(limit: usize) -> Self { + pub fn new(limit: u32) -> Self { Self { blocks: Default::default(), parent_to_child: Default::default(), earliest_blocks: Default::default(), - lru: LruCache::new(NonZeroUsize::new(limit).unwrap()), + lru: LruCache::new(limit), metrics: Default::default(), } } @@ -76,7 +73,7 @@ impl BlockBuffer { self.earliest_blocks.entry(block.number).or_default().insert(hash); self.blocks.insert(hash, block); - if let Some((evicted_hash, _)) = self.lru.push(hash, ()).filter(|(b, _)| *b != hash) { + if let (_, Some(evicted_hash)) = self.lru.insert_and_get_evicted(hash) { // evict the block if limit is hit if let Some(evicted_block) = self.remove_block(&evicted_hash) { // evict the block if limit is hit @@ -85,7 +82,6 @@ impl BlockBuffer { } self.metrics.blocks.set(self.blocks.len() as f64); } - /// Removes the given block from the buffer and also all the children of the block. /// /// This is used to get all the blocks that are dependent on the block that is included. @@ -157,7 +153,7 @@ impl BlockBuffer { let block = self.blocks.remove(hash)?; self.remove_from_earliest_blocks(block.number, hash); self.remove_from_parent(block.parent_hash, hash); - self.lru.pop(hash); + self.lru.remove(hash); Some(block) } diff --git a/crates/blockchain-tree/src/config.rs b/crates/blockchain-tree/src/config.rs index 733cb6a1a0e0a..54ea855a42a3d 100644 --- a/crates/blockchain-tree/src/config.rs +++ b/crates/blockchain-tree/src/config.rs @@ -10,7 +10,7 @@ pub struct BlockchainTreeConfig { /// The number of blocks that can be re-orged (finalization windows) max_reorg_depth: u64, /// The number of unconnected blocks that we are buffering - max_unconnected_blocks: usize, + max_unconnected_blocks: u32, /// Number of additional block hashes to save in blockchain tree. For `BLOCKHASH` EVM opcode we /// need last 256 block hashes. /// @@ -43,7 +43,7 @@ impl BlockchainTreeConfig { max_reorg_depth: u64, max_blocks_in_chain: u64, num_of_additional_canonical_block_hashes: u64, - max_unconnected_blocks: usize, + max_unconnected_blocks: u32, ) -> Self { if max_reorg_depth > max_blocks_in_chain { panic!("Side chain size should be more than finalization window"); @@ -84,7 +84,7 @@ impl BlockchainTreeConfig { } /// Return max number of unconnected blocks that we are buffering - pub fn max_unconnected_blocks(&self) -> usize { + pub fn max_unconnected_blocks(&self) -> u32 { self.max_unconnected_blocks } } diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index 5013be8c1ad56..75b6b4a919341 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -26,7 +26,7 @@ impl TreeState { pub(crate) fn new( last_finalized_block_number: BlockNumber, last_canonical_hashes: impl IntoIterator, - buffer_limit: usize, + buffer_limit: u32, ) -> Self { Self { block_chain_id_generator: 0, diff --git a/crates/net/network/src/cache.rs b/crates/net/network/src/cache.rs index c7fa271f1cb66..2be4180d4886b 100644 --- a/crates/net/network/src/cache.rs +++ b/crates/net/network/src/cache.rs @@ -1,24 +1,27 @@ +//! Network cache support + use core::hash::BuildHasher; use derive_more::{Deref, DerefMut}; use itertools::Itertools; -use linked_hash_set::LinkedHashSet; +// use linked_hash_set::LinkedHashSet; use schnellru::{ByLength, Limiter, RandomState, Unlimited}; -use std::{borrow::Borrow, fmt, hash::Hash, num::NonZeroUsize}; +use std::{fmt, hash::Hash}; /// A minimal LRU cache based on a `LinkedHashSet` with limited capacity. /// /// If the length exceeds the set capacity, the oldest element will be removed /// In the limit, for each element inserted the oldest existing element will be removed. -#[derive(Clone)] -pub struct LruCache { - limit: NonZeroUsize, - inner: LinkedHashSet, +pub struct LruCache { + limit: u32, + inner: LruMap, } -impl LruCache { +impl LruCache { /// Creates a new [`LruCache`] using the given limit - pub fn new(limit: NonZeroUsize) -> Self { - Self { inner: LinkedHashSet::new(), limit } + pub fn new(limit: u32) -> Self { + // limit of lru map is one element more, so can give eviction feedback, which isn't + // supported by LruMap + Self { inner: LruMap::new(limit + 1), limit } } /// Insert an element into the set. @@ -37,14 +40,12 @@ impl LruCache { /// Same as [`Self::insert`] but returns a tuple, where the second index is the evicted value, /// if one was evicted. pub fn insert_and_get_evicted(&mut self, entry: T) -> (bool, Option) { - if self.inner.insert(entry) { - if self.limit.get() < self.inner.len() { - // remove the oldest element in the set - return (true, self.remove_lru()) - } - return (true, None) - } - (false, None) + let new = self.inner.peek(&entry).is_none(); + let evicted = + if new && (self.limit as usize) <= self.inner.len() { self.remove_lru() } else { None }; + _ = self.inner.get_or_insert(entry, || ()); + + (new, evicted) } /// Remove the least recently used entry and return it. @@ -53,26 +54,22 @@ impl LruCache { /// configured, this will return None. #[inline] fn remove_lru(&mut self) -> Option { - self.inner.pop_front() + self.inner.pop_oldest().map(|(k, _v)| k) } /// Expels the given value. Returns true if the value existed. pub fn remove(&mut self, value: &T) -> bool { - self.inner.remove(value) + self.inner.remove(value).is_some() } /// Returns `true` if the set contains a value. - pub fn contains(&self, value: &Q) -> bool - where - T: Borrow, - Q: Hash + Eq + ?Sized, - { - self.inner.contains(value) + pub fn contains(&self, value: &T) -> bool { + self.inner.peek(value).is_some() } /// Returns an iterator over all cached entries in lru order pub fn iter(&self) -> impl Iterator + '_ { - self.inner.iter().rev() + self.inner.iter().map(|(k, _v)| k) } /// Returns number of elements currently in cache. @@ -90,11 +87,11 @@ impl LruCache { impl Extend for LruCache where - T: Eq + Hash, + T: Eq + Hash + fmt::Debug, { fn extend>(&mut self, iter: I) { for item in iter.into_iter() { - self.insert(item); + _ = self.insert(item); } } } @@ -175,17 +172,15 @@ mod test { #[test] fn test_cache_should_insert_into_empty_set() { - let limit = NonZeroUsize::new(5).unwrap(); - let mut cache = LruCache::new(limit); + let mut cache = LruCache::new(5); let entry = "entry"; assert!(cache.insert(entry)); - assert!(cache.contains(entry)); + assert!(cache.contains(&entry)); } #[test] fn test_cache_should_not_insert_same_value_twice() { - let limit = NonZeroUsize::new(5).unwrap(); - let mut cache = LruCache::new(limit); + let mut cache = LruCache::new(5); let entry = "entry"; assert!(cache.insert(entry)); assert!(!cache.insert(entry)); @@ -193,25 +188,23 @@ mod test { #[test] fn test_cache_should_remove_oldest_element_when_exceeding_limit() { - let limit = NonZeroUsize::new(2).unwrap(); - let mut cache = LruCache::new(limit); + let mut cache = LruCache::new(2); let old_entry = "old_entry"; let new_entry = "new_entry"; cache.insert(old_entry); cache.insert("entry"); cache.insert(new_entry); - assert!(cache.contains(new_entry)); - assert!(!cache.contains(old_entry)); + assert!(cache.contains(&new_entry)); + assert!(!cache.contains(&old_entry)); } #[test] fn test_cache_should_extend_an_array() { - let limit = NonZeroUsize::new(5).unwrap(); - let mut cache = LruCache::new(limit); + let mut cache = LruCache::new(5); let entries = ["some_entry", "another_entry"]; cache.extend(entries); for e in entries { - assert!(cache.contains(e)); + assert!(cache.contains(&e)); } } @@ -243,7 +236,7 @@ mod test { #[derive(Debug, Hash, PartialEq, Eq)] struct Key(i8); - let mut cache = LruCache::new(NonZeroUsize::new(2).unwrap()); + let mut cache = LruCache::new(2); let key_1 = Key(1); cache.insert(key_1); let key_2 = Key(2); diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 4b7e28023901a..e02778f854640 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -116,7 +116,7 @@ pub mod test_utils; mod budget; mod builder; -mod cache; +pub mod cache; pub mod config; mod discovery; pub mod error; diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 309184ca32553..b0824e0f348a5 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -24,7 +24,6 @@ use reth_provider::BlockNumReader; use std::{ collections::{HashMap, VecDeque}, net::{IpAddr, SocketAddr}, - num::NonZeroUsize, sync::{ atomic::{AtomicU64, AtomicUsize}, Arc, @@ -35,7 +34,7 @@ use tokio::sync::oneshot; use tracing::{debug, trace}; /// Cache limit of blocks to keep track of for a single peer. -const PEER_BLOCK_CACHE_LIMIT: usize = 512; +const PEER_BLOCK_CACHE_LIMIT: u32 = 512; /// The [`NetworkState`] keeps track of the state of all peers in the network. /// @@ -142,7 +141,7 @@ where capabilities, request_tx, pending_response: None, - blocks: LruCache::new(NonZeroUsize::new(PEER_BLOCK_CACHE_LIMIT).unwrap()), + blocks: LruCache::new(PEER_BLOCK_CACHE_LIMIT), }, ); } diff --git a/crates/net/network/src/transactions/constants.rs b/crates/net/network/src/transactions/constants.rs index 107d9758beff0..59ec103cdace3 100644 --- a/crates/net/network/src/transactions/constants.rs +++ b/crates/net/network/src/transactions/constants.rs @@ -40,7 +40,7 @@ pub mod tx_manager { /// Default limit for number of transactions to keep track of for a single peer. /// /// Default is 10 KiB. - pub const DEFAULT_CAPACITY_CACHE_SEEN_BY_PEER: usize = 10 * 1024; + pub const DEFAULT_CAPACITY_CACHE_SEEN_BY_PEER: u32 = 10 * 1024; /// Default maximum pending pool imports to tolerate. /// @@ -52,7 +52,7 @@ pub mod tx_manager { /// Default limit for number of bad imports to keep track of. /// /// Default is 10 KiB. - pub const DEFAULT_CAPACITY_CACHE_BAD_IMPORTS: usize = 100 * 1024; + pub const DEFAULT_CAPACITY_CACHE_BAD_IMPORTS: u32 = 100 * 1024; } /// Constants used by [`TransactionFetcher`](super::TransactionFetcher). @@ -129,23 +129,24 @@ pub mod tx_fetcher { /// /// Default is 100 times the [`SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST`], /// which defaults to 256 hashes, so 25 600 hashes. - pub const DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH: usize = - 100 * SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST; + pub const DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH: u32 = + 100 * SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST as u32; /// Default max size for cache of inflight and pending transactions fetch. /// /// Default is [`DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH`] + /// [`DEFAULT_MAX_COUNT_INFLIGHT_REQUESTS_ON_FETCH_PENDING_HASHES`], which is 25600 hashes and /// 65 requests, so it is 25665 hashes. - pub const DEFAULT_MAX_CAPACITY_CACHE_INFLIGHT_AND_PENDING_FETCH: usize = + pub const DEFAULT_MAX_CAPACITY_CACHE_INFLIGHT_AND_PENDING_FETCH: u32 = DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH + - DEFAULT_MAX_COUNT_INFLIGHT_REQUESTS_ON_FETCH_PENDING_HASHES; + DEFAULT_MAX_COUNT_INFLIGHT_REQUESTS_ON_FETCH_PENDING_HASHES as u32; /// Default maximum number of hashes pending fetch to tolerate at any time. /// /// Default is half of [`DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH`], which defaults to 25 600 /// hashes, so 12 800 hashes. - pub const DEFAULT_MAX_COUNT_PENDING_FETCH: usize = DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH / 2; + pub const DEFAULT_MAX_COUNT_PENDING_FETCH: usize = + DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH as usize / 2; /* ====== LIMITED CAPACITY ON FETCH PENDING HASHES ====== */ diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 7c60b54979980..491c24f0fe15e 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -48,7 +48,6 @@ use schnellru::ByLength; use smallvec::{smallvec, SmallVec}; use std::{ collections::HashMap, - num::NonZeroUsize, pin::Pin, task::{ready, Context, Poll}, time::{Duration, Instant}, @@ -576,11 +575,8 @@ impl TransactionFetcher { #[cfg(debug_assertions)] previously_unseen_hashes.push(*hash); - // todo: allow `MAX_ALTERNATIVE_PEERS_PER_TX` to be zero - let limit = NonZeroUsize::new(DEFAULT_MAX_COUNT_FALLBACK_PEERS.into()).expect("MAX_ALTERNATIVE_PEERS_PER_TX should be non-zero"); - if self.hashes_fetch_inflight_and_pending_fetch.get_or_insert(*hash, || - TxFetchMetadata{retries: 0, fallback_peers: LruCache::new(limit), tx_encoded_length: None} + TxFetchMetadata{retries: 0, fallback_peers: LruCache::new(DEFAULT_MAX_COUNT_FALLBACK_PEERS as u32), tx_encoded_length: None} ).is_none() { debug!(target: "net::tx", @@ -669,11 +665,14 @@ impl TransactionFetcher { { for hash in new_announced_hashes.iter() { if self.hashes_pending_fetch.contains(hash) { - debug!(target: "net::tx", "`%new_announced_hashes` should been taken out of buffer before packing in a request, breaks invariant `@hashes_pending_fetch` and -`@inflight_requests`, -`@hashes_fetch_inflight_and_pending_fetch` for `%new_announced_hashes`: {:?}", - new_announced_hashes.iter().map(|hash| - (*hash, self.hashes_fetch_inflight_and_pending_fetch.get(hash).cloned())).collect::)>>()) + debug!(target: "net::tx", "`{}` should have been taken out of buffer before packing in a request, breaks invariant `@hashes_pending_fetch` and `@inflight_requests`, `@hashes_fetch_inflight_and_pending_fetch` for `{}`: {:?}", + format!("{:?}", new_announced_hashes), // Assuming new_announced_hashes can be debug-printed directly + format!("{:?}", new_announced_hashes), + new_announced_hashes.iter().map(|hash| { + let metadata = self.hashes_fetch_inflight_and_pending_fetch.get(hash); + // Assuming you only need `retries` and `tx_encoded_length` for debugging + (*hash, metadata.map(|m| (m.retries, m.tx_encoded_length))) + }).collect::)>)>>()) } } } @@ -908,12 +907,14 @@ impl TransactionFetcher { debug_assert!( self.active_peers.get(&peer_id).is_some(), - "`%peer_id` has been removed from `@active_peers` before inflight request(s) resolved, broken invariant `@active_peers` and `@inflight_requests`, -`%peer_id`: {}, -`@hashes_fetch_inflight_and_pending_fetch` for `%requested_hashes`: {:?}", - peer_id, requested_hashes.iter().map(|hash| - (*hash, self.hashes_fetch_inflight_and_pending_fetch.get(hash).cloned()) - ).collect::)>>()); + "`{}` has been removed from `@active_peers` before inflight request(s) resolved, broken invariant `@active_peers` and `@inflight_requests`, `%peer_id`: {}, `@hashes_fetch_inflight_and_pending_fetch` for `%requested_hashes`: {:?}", + peer_id, + peer_id, + requested_hashes.iter().map(|hash| { + let metadata = self.hashes_fetch_inflight_and_pending_fetch.get(hash); + (*hash, metadata.map(|m| (m.retries, m.tx_encoded_length))) + }).collect::)>)>>() + ); self.decrement_inflight_request_count_for(&peer_id); @@ -1057,14 +1058,9 @@ impl Default for TransactionFetcher { Self { active_peers: LruMap::new(DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS), inflight_requests: Default::default(), - hashes_pending_fetch: LruCache::new( - NonZeroUsize::new(DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH) - .expect("buffered cache limit should be non-zero"), - ), + hashes_pending_fetch: LruCache::new(DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH), hashes_fetch_inflight_and_pending_fetch: LruMap::new( - DEFAULT_MAX_CAPACITY_CACHE_INFLIGHT_AND_PENDING_FETCH - .try_into() - .expect("proper size for inflight and pending fetch cache"), + DEFAULT_MAX_CAPACITY_CACHE_INFLIGHT_AND_PENDING_FETCH, ), filter_valid_message: Default::default(), info: TransactionFetcherInfo::default(), @@ -1074,7 +1070,7 @@ impl Default for TransactionFetcher { } /// Metadata of a transaction hash that is yet to be fetched. -#[derive(Debug, Constructor, Clone)] +#[derive(Debug, Constructor)] pub struct TxFetchMetadata { /// The number of times a request attempt has been made for the hash. retries: u8, @@ -1464,17 +1460,21 @@ mod test { peers.insert(peer_1, peer_1_data); peers.insert(peer_2, peer_2_data); - // insert peer_2 as fallback peer for seen_hashes let mut backups = default_cache(); backups.insert(peer_2); // insert seen_hashes into tx fetcher for i in 0..3 { - let meta = TxFetchMetadata::new(0, backups.clone(), Some(seen_eth68_hashes_sizes[i])); + // insert peer_2 as fallback peer for seen_hashes + let mut backups = default_cache(); + backups.insert(peer_2); + let meta = TxFetchMetadata::new(0, backups, Some(seen_eth68_hashes_sizes[i])); tx_fetcher.hashes_fetch_inflight_and_pending_fetch.insert(seen_hashes[i], meta); } - let meta = TxFetchMetadata::new(0, backups.clone(), None); + let meta = TxFetchMetadata::new(0, backups, None); tx_fetcher.hashes_fetch_inflight_and_pending_fetch.insert(seen_hashes[3], meta); - // + + let mut backups = default_cache(); + backups.insert(peer_2); // insert pending hash without peer_1 as fallback peer, only with peer_2 as fallback peer let hash_other = B256::from_slice(&[5; 32]); tx_fetcher diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index ee14e4c82f08c..070b9c7a1478e 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -37,7 +37,6 @@ use reth_transaction_pool::{ }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - num::NonZeroUsize, pin::Pin, sync::{ atomic::{AtomicUsize, Ordering}, @@ -285,9 +284,7 @@ impl TransactionsManager { pending_pool_imports_info: PendingPoolImportsInfo::new( DEFAULT_MAX_COUNT_PENDING_POOL_IMPORTS, ), - bad_imports: LruCache::new( - NonZeroUsize::new(DEFAULT_CAPACITY_CACHE_BAD_IMPORTS).unwrap(), - ), + bad_imports: LruCache::new(DEFAULT_CAPACITY_CACHE_BAD_IMPORTS), peers: Default::default(), command_tx, command_rx: UnboundedReceiverStream::new(command_rx), @@ -1513,9 +1510,7 @@ impl PeerMetadata { /// Returns a new instance of [`PeerMetadata`]. fn new(request_tx: PeerRequestSender, version: EthVersion, client_version: Arc) -> Self { Self { - seen_transactions: LruCache::new( - NonZeroUsize::new(DEFAULT_CAPACITY_CACHE_SEEN_BY_PEER).unwrap(), - ), + seen_transactions: LruCache::new(DEFAULT_CAPACITY_CACHE_SEEN_BY_PEER), request_tx, version, client_version, @@ -1629,7 +1624,7 @@ mod tests { use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::test_utils::{testing_pool, MockTransaction}; use secp256k1::SecretKey; - use std::{future::poll_fn, hash}; + use std::{fmt, future::poll_fn, hash}; use tests::fetcher::TxFetchMetadata; async fn new_tx_manager() -> TransactionsManager { @@ -1655,9 +1650,8 @@ mod tests { transactions } - pub(super) fn default_cache() -> LruCache { - let limit = NonZeroUsize::new(DEFAULT_MAX_COUNT_FALLBACK_PEERS.into()).unwrap(); - LruCache::new(limit) + pub(super) fn default_cache() -> LruCache { + LruCache::new(DEFAULT_MAX_COUNT_FALLBACK_PEERS as u32) } // Returns (peer, channel-to-send-get-pooled-tx-response-on). @@ -2054,12 +2048,15 @@ mod tests { let retries = 1; let mut backups = default_cache(); backups.insert(peer_id_1); + + let mut backups1 = default_cache(); + backups1.insert(peer_id_1); tx_fetcher .hashes_fetch_inflight_and_pending_fetch - .insert(seen_hashes[1], TxFetchMetadata::new(retries, backups.clone(), None)); + .insert(seen_hashes[1], TxFetchMetadata::new(retries, backups, None)); tx_fetcher .hashes_fetch_inflight_and_pending_fetch - .insert(seen_hashes[0], TxFetchMetadata::new(retries, backups, None)); + .insert(seen_hashes[0], TxFetchMetadata::new(retries, backups1, None)); tx_fetcher.hashes_pending_fetch.insert(seen_hashes[1]); tx_fetcher.hashes_pending_fetch.insert(seen_hashes[0]); From 78c02299d86f8aad58d20ac5de9894bf8ae717b3 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 17 May 2024 03:37:42 +0200 Subject: [PATCH 555/700] rpc `txpool_content` typo (#8292) --- crates/rpc/rpc/src/txpool.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 9fb61c3916339..e6f7d66b51e8a 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -127,9 +127,9 @@ where /// block(s), as well as the ones that are being scheduled for future execution only. /// /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details - /// Handler for `txpool_inspect` + /// Handler for `txpool_content` async fn txpool_content(&self) -> Result { - trace!(target: "rpc::eth", "Serving txpool_inspect"); + trace!(target: "rpc::eth", "Serving txpool_content"); Ok(self.content()) } } From b177c29f93f81bcbf9551fe3898be44c897f8280 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 17 May 2024 11:01:13 +0300 Subject: [PATCH 556/700] feat(e2e): add helpers to TransactionTestContext (#8237) --- crates/e2e-test-utils/src/transaction.rs | 28 +++++++++++++++++++----- crates/ethereum/node/tests/e2e/blobs.rs | 4 ++-- crates/ethereum/node/tests/e2e/eth.rs | 4 ++-- crates/ethereum/node/tests/e2e/p2p.rs | 2 +- 4 files changed, 27 insertions(+), 11 deletions(-) diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index ea066304b35c6..8fe7efd0e77b9 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -12,15 +12,20 @@ use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, B256}; pub struct TransactionTestContext; impl TransactionTestContext { - /// Creates a static transfer and signs it - pub async fn transfer_tx(chain_id: u64, wallet: LocalWallet) -> Bytes { + /// Creates a static transfer and signs it, returning bytes + pub async fn transfer_tx(chain_id: u64, wallet: LocalWallet) -> TxEnvelope { let tx = tx(chain_id, None, 0); - let signer = EthereumSigner::from(wallet); - tx.build(&signer).await.unwrap().encoded_2718().into() + Self::sign_tx(wallet, tx).await + } + + /// Creates a static transfer and signs it, returning bytes + pub async fn transfer_tx_bytes(chain_id: u64, wallet: LocalWallet) -> Bytes { + let signed = Self::transfer_tx(chain_id, wallet).await; + signed.encoded_2718().into() } /// Creates a tx with blob sidecar and sign it - pub async fn tx_with_blobs(chain_id: u64, wallet: LocalWallet) -> eyre::Result { + pub async fn tx_with_blobs(chain_id: u64, wallet: LocalWallet) -> eyre::Result { let mut tx = tx(chain_id, None, 0); let mut builder = SidecarBuilder::::new(); @@ -30,8 +35,19 @@ impl TransactionTestContext { tx.set_blob_sidecar(sidecar); tx.set_max_fee_per_blob_gas(15e9 as u128); + let signed = Self::sign_tx(wallet, tx).await; + Ok(signed) + } + + /// Signs an arbitrary TransactionRequest using the provided wallet + pub async fn sign_tx(wallet: LocalWallet, tx: TransactionRequest) -> TxEnvelope { let signer = EthereumSigner::from(wallet); - let signed = tx.clone().build(&signer).await.unwrap(); + tx.build(&signer).await.unwrap() + } + + /// Creates a tx with blob sidecar and sign it, returning bytes + pub async fn tx_with_blobs_bytes(chain_id: u64, wallet: LocalWallet) -> eyre::Result { + let signed = Self::tx_with_blobs(chain_id, wallet).await?; Ok(signed.encoded_2718().into()) } diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index d8fca42d6257d..3411d6db52186 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -46,7 +46,7 @@ async fn can_handle_blobs() -> eyre::Result<()> { let second_wallet = wallets.last().unwrap(); // inject normal tx - let raw_tx = TransactionTestContext::transfer_tx(1, second_wallet.clone()).await; + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, second_wallet.clone()).await; let tx_hash = node.rpc.inject_tx(raw_tx).await?; // build payload with normal tx let (payload, attributes) = node.new_payload(eth_payload_attributes).await?; @@ -55,7 +55,7 @@ async fn can_handle_blobs() -> eyre::Result<()> { node.inner.pool.remove_transactions(vec![tx_hash]); // build blob tx - let blob_tx = TransactionTestContext::tx_with_blobs(1, blob_wallet.clone()).await?; + let blob_tx = TransactionTestContext::tx_with_blobs_bytes(1, blob_wallet.clone()).await?; // inject blob tx to the pool let blob_tx_hash = node.rpc.inject_tx(blob_tx).await?; diff --git a/crates/ethereum/node/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs index 4f566e7c8d4fa..6153a55d7f65e 100644 --- a/crates/ethereum/node/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -30,7 +30,7 @@ async fn can_run_eth_node() -> eyre::Result<()> { let mut node = nodes.pop().unwrap(); let wallet = Wallet::default(); - let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; // make the node advance let tx_hash = node.rpc.inject_tx(raw_tx).await?; @@ -78,7 +78,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { // Configure wallet from test mnemonic and create dummy transfer tx let wallet = Wallet::default(); - let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; // make the node advance let tx_hash = node.rpc.inject_tx(raw_tx).await?; diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 768d1ac5a11b1..c5d00b824c573 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -21,7 +21,7 @@ async fn can_sync() -> eyre::Result<()> { ) .await?; - let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; let mut second_node = nodes.pop().unwrap(); let mut first_node = nodes.pop().unwrap(); From 9441d984ae9e20b219a5c6c2dbc1032b1f90ee0e Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Fri, 17 May 2024 20:31:31 +1200 Subject: [PATCH 557/700] Move reth-primitives::fs module to fs-utils crate (#8286) Co-authored-by: Matthias Seitz --- Cargo.lock | 17 +++++++++++++++- Cargo.toml | 15 +++++++++++--- bin/reth/Cargo.toml | 1 + bin/reth/src/commands/db/stats.rs | 9 +++++---- .../src/commands/debug_cmd/build_block.rs | 2 +- bin/reth/src/commands/debug_cmd/execution.rs | 3 ++- .../commands/debug_cmd/in_memory_merkle.rs | 3 ++- bin/reth/src/commands/debug_cmd/merkle.rs | 3 ++- .../src/commands/debug_cmd/replay_engine.rs | 3 ++- bin/reth/src/commands/stage/drop.rs | 3 ++- bin/reth/src/commands/test_vectors/tables.rs | 2 +- bin/reth/src/utils.rs | 3 ++- crates/fs-util/Cargo.toml | 18 +++++++++++++++++ .../src/fs.rs => fs-util/src/lib.rs} | 12 +++++++++-- crates/interfaces/Cargo.toml | 1 + crates/interfaces/src/error.rs | 2 +- crates/interfaces/src/provider.rs | 4 ++-- crates/node-core/Cargo.toml | 1 + crates/node-core/src/args/secret_key.rs | 3 ++- crates/node-core/src/args/utils.rs | 3 ++- crates/node-core/src/engine/engine_store.rs | 2 +- crates/node-core/src/utils.rs | 5 ++--- crates/primitives/Cargo.toml | 20 ++++++++++++++++--- crates/primitives/src/lib.rs | 1 - crates/rpc/rpc-layer/Cargo.toml | 1 + crates/rpc/rpc-layer/src/jwt_secret.rs | 7 ++----- .../stages/benches/setup/account_hashing.rs | 4 ++-- crates/stages/benches/setup/mod.rs | 4 ++-- crates/storage/db/Cargo.toml | 1 + crates/storage/db/benches/utils.rs | 3 ++- crates/storage/db/src/lib.rs | 12 +++++------ crates/storage/db/src/static_file/mod.rs | 5 ++--- crates/storage/nippy-jar/Cargo.toml | 2 +- crates/storage/nippy-jar/src/error.rs | 2 +- crates/storage/nippy-jar/src/lib.rs | 4 ++-- crates/storage/provider/Cargo.toml | 1 + .../src/providers/static_file/manager.rs | 8 ++++---- crates/transaction-pool/Cargo.toml | 1 + crates/transaction-pool/src/maintain.rs | 16 ++++++++------- 39 files changed, 142 insertions(+), 65 deletions(-) create mode 100644 crates/fs-util/Cargo.toml rename crates/{primitives/src/fs.rs => fs-util/src/lib.rs} (95%) diff --git a/Cargo.lock b/Cargo.lock index 024cbfff55378..c049d8c960d2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6416,6 +6416,7 @@ dependencies = [ "reth-ethereum-payload-builder", "reth-evm", "reth-exex", + "reth-fs-util", "reth-interfaces", "reth-network", "reth-network-api", @@ -6660,6 +6661,7 @@ dependencies = [ "proptest-derive", "rand 0.8.5", "reth-codecs", + "reth-fs-util", "reth-interfaces", "reth-libmdbx", "reth-metrics", @@ -7033,6 +7035,14 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "reth-fs-util" +version = "0.2.0-beta.7" +dependencies = [ + "serde_json", + "thiserror", +] + [[package]] name = "reth-interfaces" version = "0.2.0-beta.7" @@ -7044,6 +7054,7 @@ dependencies = [ "rand 0.8.5", "reth-consensus", "reth-eth-wire-types", + "reth-fs-util", "reth-network-api", "reth-network-types", "reth-primitives", @@ -7253,7 +7264,7 @@ dependencies = [ "memmap2 0.7.1", "ph", "rand 0.8.5", - "reth-primitives", + "reth-fs-util", "serde", "sucds", "tempfile", @@ -7351,6 +7362,7 @@ dependencies = [ "reth-engine-primitives", "reth-etl", "reth-evm", + "reth-fs-util", "reth-interfaces", "reth-metrics", "reth-net-nat", @@ -7602,6 +7614,7 @@ dependencies = [ "reth-codecs", "reth-db", "reth-evm", + "reth-fs-util", "reth-interfaces", "reth-metrics", "reth-nippy-jar", @@ -7814,6 +7827,7 @@ dependencies = [ "jsonwebtoken 8.3.0", "pin-project", "rand 0.8.5", + "reth-fs-util", "reth-primitives", "serde", "tempfile", @@ -8006,6 +8020,7 @@ dependencies = [ "proptest", "rand 0.8.5", "reth-eth-wire", + "reth-fs-util", "reth-metrics", "reth-network-types", "reth-primitives", diff --git a/Cargo.toml b/Cargo.toml index 102fffa88a589..c81b76a7e7c3a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -239,6 +239,7 @@ reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } reth-exex = { path = "crates/exex" } +reth-fs-util = { path = "crates/fs-util" } reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-interfaces = { path = "crates/interfaces" } reth-ipc = { path = "crates/rpc/ipc" } @@ -280,8 +281,13 @@ reth-node-events = { path = "crates/node/events" } reth-testing-utils = { path = "testing/testing-utils" } # revm -revm = { version = "9.0.0", features = ["std", "secp256k1"], default-features = false } -revm-primitives = { version = "4.0.0", features = ["std"], default-features = false } +revm = { version = "9.0.0", features = [ + "std", + "secp256k1", +], default-features = false } +revm-primitives = { version = "4.0.0", features = [ + "std", +], default-features = false } revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "21a2db5" } # eth @@ -381,7 +387,10 @@ secp256k1 = { version = "0.28", default-features = false, features = [ "recovery", ] } # TODO: Remove `k256` feature: https://github.com/sigp/enr/pull/74 -enr = { version = "0.12.0", default-features = false, features = ["k256", "rust-secp256k1"] } +enr = { version = "0.12.0", default-features = false, features = [ + "k256", + "rust-secp256k1", +] } # for eip-4844 c-kzg = "1.0.0" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index c1ed8981a4ca3..b1d9b1638efa8 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -16,6 +16,7 @@ workspace = true # reth reth-config.workspace = true reth-primitives = { workspace = true, features = ["arbitrary", "clap"] } +reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-exex.workspace = true reth-provider = { workspace = true } diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index b47e7980b02e2..a59a904eb7424 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -15,6 +15,7 @@ use reth_db::{ Tables, TransactionBlocks, TransactionHashNumbers, TransactionSenders, Transactions, VersionHistory, }; +use reth_fs_util as fs; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_primitives::static_file::{find_fixed_range, SegmentRangeInclusive}; use reth_provider::providers::StaticFileProvider; @@ -203,16 +204,16 @@ impl Command { let columns = jar_provider.columns(); let rows = jar_provider.rows(); - let data_size = reth_primitives::fs::metadata(jar_provider.data_path()) + let data_size = fs::metadata(jar_provider.data_path()) .map(|metadata| metadata.len()) .unwrap_or_default(); - let index_size = reth_primitives::fs::metadata(jar_provider.index_path()) + let index_size = fs::metadata(jar_provider.index_path()) .map(|metadata| metadata.len()) .unwrap_or_default(); - let offsets_size = reth_primitives::fs::metadata(jar_provider.offsets_path()) + let offsets_size = fs::metadata(jar_provider.offsets_path()) .map(|metadata| metadata.len()) .unwrap_or_default(); - let config_size = reth_primitives::fs::metadata(jar_provider.config_path()) + let config_size = fs::metadata(jar_provider.config_path()) .map(|metadata| metadata.len()) .unwrap_or_default(); diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 72cc9e1fa5328..31585c2f6ea3b 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -22,12 +22,12 @@ use reth_cli_runner::CliContext; use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; +use reth_fs_util as fs; use reth_interfaces::RethResult; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::database::CachedReads; use reth_primitives::{ constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP}, - fs, revm_primitives::KzgSettings, stage::StageId, Address, BlobTransaction, BlobTransactionSidecar, Bytes, ChainSpec, PooledTransactionsElement, diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 8fecc928ab02c..3e6474236801f 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -22,12 +22,13 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; +use reth_fs_util as fs; use reth_interfaces::p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_core::init::init_genesis; use reth_primitives::{ - fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, + stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, }; use reth_provider::{ BlockExecutionWriter, HeaderSyncMode, ProviderFactory, StageCheckpointReader, diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index f51426015f0ea..6f7a580a4b703 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -16,10 +16,11 @@ use reth_cli_runner::CliContext; use reth_config::Config; use reth_db::{init_db, DatabaseEnv}; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; +use reth_fs_util as fs; use reth_interfaces::executor::BlockValidationError; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec, Receipts}; +use reth_primitives::{stage::StageId, BlockHashOrNumber, ChainSpec, Receipts}; use reth_provider::{ AccountExtReader, BundleStateWithReceipts, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index f452e2e52200e..40d79a85d5779 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -18,10 +18,11 @@ use reth_config::Config; use reth_consensus::Consensus; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; use reth_evm::execute::{BatchBlockExecutionOutput, BatchExecutor, BlockExecutorProvider}; +use reth_fs_util as fs; use reth_interfaces::p2p::full_block::FullBlockClient; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_primitives::{fs, stage::StageCheckpoint, BlockHashOrNumber, ChainSpec, PruneModes}; +use reth_primitives::{stage::StageCheckpoint, BlockHashOrNumber, ChainSpec, PruneModes}; use reth_provider::{ BlockNumReader, BlockWriter, BundleStateWithReceipts, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index b86e707a86740..72031ce1b86b6 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -18,11 +18,12 @@ use reth_cli_runner::CliContext; use reth_config::Config; use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; +use reth_fs_util as fs; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_node_core::engine::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{fs, ChainSpec, PruneModes}; +use reth_primitives::{ChainSpec, PruneModes}; use reth_provider::{ providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory, StaticFileProviderFactory, diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index 625a3f36b8078..73ac898c970a0 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -11,9 +11,10 @@ use crate::{ use clap::Parser; use itertools::Itertools; use reth_db::{open_db, static_file::iter_static_files, tables, transaction::DbTxMut, DatabaseEnv}; +use reth_fs_util as fs; use reth_node_core::init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}; use reth_primitives::{ - fs, stage::StageId, static_file::find_fixed_range, ChainSpec, StaticFileSegment, + stage::StageId, static_file::find_fixed_range, ChainSpec, StaticFileSegment, }; use reth_provider::{providers::StaticFileWriter, ProviderFactory, StaticFileProviderFactory}; use std::sync::Arc; diff --git a/bin/reth/src/commands/test_vectors/tables.rs b/bin/reth/src/commands/test_vectors/tables.rs index 181ed0e3e3a35..c44739ffbe2e0 100644 --- a/bin/reth/src/commands/test_vectors/tables.rs +++ b/bin/reth/src/commands/test_vectors/tables.rs @@ -11,7 +11,7 @@ use reth_db::{ table::{DupSort, Table, TableRow}, tables, }; -use reth_primitives::fs; +use reth_fs_util as fs; use tracing::error; const VECTORS_FOLDER: &str = "testdata/micro/db"; diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index 650fc9d700d94..025b059bcef12 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -9,7 +9,8 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, DatabaseError, RawTable, TableRawRow, }; -use reth_primitives::{fs, ChainSpec}; +use reth_fs_util as fs; +use reth_primitives::ChainSpec; use reth_provider::ProviderFactory; use std::{path::Path, rc::Rc, sync::Arc}; use tracing::info; diff --git a/crates/fs-util/Cargo.toml b/crates/fs-util/Cargo.toml new file mode 100644 index 0000000000000..aa8c322d20166 --- /dev/null +++ b/crates/fs-util/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "reth-fs-util" +version.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true +description = "Commonly used fs utils in reth." + +[lints] +workspace = true + +[dependencies] + +# misc +serde_json.workspace = true +thiserror.workspace = true diff --git a/crates/primitives/src/fs.rs b/crates/fs-util/src/lib.rs similarity index 95% rename from crates/primitives/src/fs.rs rename to crates/fs-util/src/lib.rs index 1bcb908dbdc58..c6c56dd44181b 100644 --- a/crates/primitives/src/fs.rs +++ b/crates/fs-util/src/lib.rs @@ -1,11 +1,21 @@ //! Wrapper for `std::fs` methods +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + use std::{ fs::{self, ReadDir}, io, path::{Path, PathBuf}, }; +/// Result alias for [FsPathError]. +pub type Result = std::result::Result; + /// Various error variants for `std::fs` operations that serve as an addition to the io::Error which /// does not provide any information about the path. #[derive(Debug, thiserror::Error)] @@ -187,8 +197,6 @@ impl FsPathError { } } -type Result = std::result::Result; - /// Wrapper for `std::fs::read_to_string` pub fn read_to_string(path: impl AsRef) -> Result { let path = path.as_ref(); diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index c2e276a3359e7..27e2d8f390eca 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -12,6 +12,7 @@ workspace = true [dependencies] reth-primitives.workspace = true +reth-fs-util.workspace = true reth-network-api.workspace = true reth-eth-wire-types.workspace = true reth-consensus.workspace = true diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index 38498c312ab1d..ec3da8ad01b76 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -5,8 +5,8 @@ use crate::{ provider::ProviderError, }; use reth_consensus::ConsensusError; +use reth_fs_util::FsPathError; use reth_network_api::NetworkError; -use reth_primitives::fs::FsPathError; /// Result alias for [`RethError`]. pub type RethResult = Result; diff --git a/crates/interfaces/src/provider.rs b/crates/interfaces/src/provider.rs index 7221c0cdfc9d9..6ae7aad8ed30c 100644 --- a/crates/interfaces/src/provider.rs +++ b/crates/interfaces/src/provider.rs @@ -130,8 +130,8 @@ pub enum ProviderError { ConsistentView(Box), } -impl From for ProviderError { - fn from(err: reth_primitives::fs::FsPathError) -> Self { +impl From for ProviderError { + fn from(err: reth_fs_util::FsPathError) -> Self { ProviderError::FsPathError(err.to_string()) } } diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 7637b3b2dcb1c..b0ed1fae159cd 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-interfaces = { workspace = true, features = ["clap"] } reth-provider.workspace = true diff --git a/crates/node-core/src/args/secret_key.rs b/crates/node-core/src/args/secret_key.rs index b93d909b4a1ba..a8b401bbb1cc0 100644 --- a/crates/node-core/src/args/secret_key.rs +++ b/crates/node-core/src/args/secret_key.rs @@ -1,5 +1,6 @@ +use reth_fs_util::{self as fs, FsPathError}; use reth_network::config::rng_secret_key; -use reth_primitives::{fs, fs::FsPathError, hex::encode as hex_encode}; +use reth_primitives::hex::encode as hex_encode; use secp256k1::{Error as SecretKeyBaseError, SecretKey}; use std::{ io, diff --git a/crates/node-core/src/args/utils.rs b/crates/node-core/src/args/utils.rs index cf0bee96c92d6..5e3b0271c8076 100644 --- a/crates/node-core/src/args/utils.rs +++ b/crates/node-core/src/args/utils.rs @@ -1,6 +1,7 @@ //! Clap parser utilities -use reth_primitives::{fs, AllGenesisFormats, BlockHashOrNumber, ChainSpec, B256}; +use reth_fs_util as fs; +use reth_primitives::{AllGenesisFormats, BlockHashOrNumber, ChainSpec, B256}; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}, path::PathBuf, diff --git a/crates/node-core/src/engine/engine_store.rs b/crates/node-core/src/engine/engine_store.rs index 524e2c89bc269..2a1ffc3b0ed20 100644 --- a/crates/node-core/src/engine/engine_store.rs +++ b/crates/node-core/src/engine/engine_store.rs @@ -3,7 +3,7 @@ use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; -use reth_primitives::fs; +use reth_fs_util as fs; use reth_rpc_types::{ engine::{CancunPayloadFields, ForkchoiceState}, ExecutionPayload, diff --git a/crates/node-core/src/utils.rs b/crates/node-core/src/utils.rs index 32dc509fa140a..84a3bef7be58e 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node-core/src/utils.rs @@ -3,15 +3,14 @@ use eyre::Result; use reth_consensus_common::validation::validate_block_standalone; +use reth_fs_util as fs; use reth_interfaces::p2p::{ bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; use reth_network::NetworkManager; -use reth_primitives::{ - fs, BlockHashOrNumber, ChainSpec, HeadersDirection, SealedBlock, SealedHeader, -}; +use reth_primitives::{BlockHashOrNumber, ChainSpec, HeadersDirection, SealedBlock, SealedHeader}; use reth_provider::BlockReader; use reth_rpc_layer::{JwtError, JwtSecret}; use std::{ diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index d9d6c592e79d1..34100b24b70e1 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -30,7 +30,11 @@ alloy-eips = { workspace = true, features = ["serde"] } nybbles = { workspace = true, features = ["serde", "rlp"] } # crypto -secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "recovery", + "rand", +] } # for eip-4844 c-kzg = { workspace = true, features = ["serde"], optional = true } @@ -83,7 +87,11 @@ plain_hasher = "0.2" sucds = "0.8.1" criterion.workspace = true -pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } +pprof = { workspace = true, features = [ + "flamegraph", + "frame-pointer", + "criterion", +] } [features] default = ["c-kzg", "zstd-codec"] @@ -100,7 +108,13 @@ arbitrary = [ "dep:proptest-derive", "zstd-codec", ] -c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg", "dep:tempfile", "alloy-eips/kzg"] +c-kzg = [ + "dep:c-kzg", + "revm/c-kzg", + "revm-primitives/c-kzg", + "dep:tempfile", + "alloy-eips/kzg", +] zstd-codec = ["dep:zstd"] clap = ["dep:clap"] optimism = [ diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index d7317951e187a..1dd2562e9d63d 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -30,7 +30,6 @@ pub mod constants; pub mod eip4844; mod error; mod exex; -pub mod fs; pub mod genesis; mod header; mod integer_list; diff --git a/crates/rpc/rpc-layer/Cargo.toml b/crates/rpc/rpc-layer/Cargo.toml index dc4614b178bb3..546770f94377e 100644 --- a/crates/rpc/rpc-layer/Cargo.toml +++ b/crates/rpc/rpc-layer/Cargo.toml @@ -12,6 +12,7 @@ workspace = true [dependencies] reth-primitives.workspace = true +reth-fs-util.workspace = true http.workspace = true hyper.workspace = true diff --git a/crates/rpc/rpc-layer/src/jwt_secret.rs b/crates/rpc/rpc-layer/src/jwt_secret.rs index b31cd27c63550..b3d536078e71b 100644 --- a/crates/rpc/rpc-layer/src/jwt_secret.rs +++ b/crates/rpc/rpc-layer/src/jwt_secret.rs @@ -1,10 +1,7 @@ use jsonwebtoken::{decode, errors::ErrorKind, Algorithm, DecodingKey, Validation}; use rand::Rng; -use reth_primitives::{ - fs, - fs::FsPathError, - hex::{self, encode as hex_encode}, -}; +use reth_fs_util::{self as fs, FsPathError}; +use reth_primitives::hex::{self, encode as hex_encode}; use serde::{Deserialize, Serialize}; use std::{ path::Path, diff --git a/crates/stages/benches/setup/account_hashing.rs b/crates/stages/benches/setup/account_hashing.rs index d300265355e7f..a1df1757b65ca 100644 --- a/crates/stages/benches/setup/account_hashing.rs +++ b/crates/stages/benches/setup/account_hashing.rs @@ -4,12 +4,12 @@ use super::constants; use reth_db::{ cursor::DbCursorRO, database::Database, tables, transaction::DbTx, DatabaseError as DbError, }; -use reth_primitives::{fs, stage::StageCheckpoint, BlockNumber}; +use reth_primitives::{stage::StageCheckpoint, BlockNumber}; use reth_stages::{ stages::{AccountHashingStage, SeedOpts}, test_utils::TestStageDB, }; -use std::{ops::RangeInclusive, path::Path}; +use std::{fs, ops::RangeInclusive, path::Path}; /// Prepares a database for [`AccountHashingStage`] /// If the environment variable [`constants::ACCOUNT_HASHING_DB`] is set, it will use that one and diff --git a/crates/stages/benches/setup/mod.rs b/crates/stages/benches/setup/mod.rs index 2151f26c80714..06e15192b159f 100644 --- a/crates/stages/benches/setup/mod.rs +++ b/crates/stages/benches/setup/mod.rs @@ -14,13 +14,13 @@ use reth_interfaces::test_utils::{ random_eoa_accounts, }, }; -use reth_primitives::{fs, Account, Address, SealedBlock, B256, U256}; +use reth_primitives::{Account, Address, SealedBlock, B256, U256}; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::{StorageKind, TestStageDB}, }; use reth_trie::StateRoot; -use std::{collections::BTreeMap, path::Path, sync::Arc}; +use std::{collections::BTreeMap, fs, path::Path, sync::Arc}; use tokio::runtime::Handle; mod constants; diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 97b556346d7c4..a764f270d618e 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-fs-util.workspace = true reth-interfaces.workspace = true reth-codecs.workspace = true reth-libmdbx = { workspace = true, optional = true, features = [ diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 0e904558419b0..a5f7d4bbd7140 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -5,7 +5,8 @@ use reth_db::{ transaction::DbTxMut, DatabaseEnv, }; -use reth_primitives::{fs, Bytes}; +use reth_fs_util as fs; +use reth_primitives::Bytes; use std::{path::Path, sync::Arc}; /// Path where the DB is initialized for benchmarks. diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index 5425c80743969..6b6a22319f84f 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -99,7 +99,7 @@ pub fn create_db>(path: P, args: DatabaseArguments) -> eyre::Resu let rpath = path.as_ref(); if is_database_empty(rpath) { - reth_primitives::fs::create_dir_all(rpath) + reth_fs_util::create_dir_all(rpath) .wrap_err_with(|| format!("Could not create database directory {}", rpath.display()))?; create_db_version_file(rpath)?; } else { @@ -175,8 +175,8 @@ pub mod test_utils { database_metrics::{DatabaseMetadata, DatabaseMetadataValue, DatabaseMetrics}, models::client_version::ClientVersion, }; + use reth_fs_util; use reth_libmdbx::MaxReadTransactionDuration; - use reth_primitives::fs; use std::{path::PathBuf, sync::Arc}; use tempfile::TempDir; @@ -202,7 +202,7 @@ pub mod test_utils { fn drop(&mut self) { if let Some(db) = self.db.take() { drop(db); - let _ = fs::remove_dir_all(&self.path); + let _ = reth_fs_util::remove_dir_all(&self.path); } } } @@ -318,7 +318,6 @@ mod tests { }; use assert_matches::assert_matches; use reth_libmdbx::MaxReadTransactionDuration; - use reth_primitives::fs; use tempfile::tempdir; #[test] @@ -342,7 +341,8 @@ mod tests { // Database is not empty, version file is malformed { - fs::write(path.path().join(db_version_file_path(&path)), "invalid-version").unwrap(); + reth_fs_util::write(path.path().join(db_version_file_path(&path)), "invalid-version") + .unwrap(); let db = init_db(&path, args.clone()); assert!(db.is_err()); assert_matches!( @@ -353,7 +353,7 @@ mod tests { // Database is not empty, version file contains not matching version { - fs::write(path.path().join(db_version_file_path(&path)), "0").unwrap(); + reth_fs_util::write(path.path().join(db_version_file_path(&path)), "0").unwrap(); let db = init_db(&path, args); assert!(db.is_err()); assert_matches!( diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index eed27e0de954d..17736b56522e1 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -31,12 +31,11 @@ type SortedStaticFiles = pub fn iter_static_files(path: impl AsRef) -> Result { let path = path.as_ref(); if !path.exists() { - reth_primitives::fs::create_dir_all(path) - .map_err(|err| NippyJarError::Custom(err.to_string()))?; + reth_fs_util::create_dir_all(path).map_err(|err| NippyJarError::Custom(err.to_string()))?; } let mut static_files = SortedStaticFiles::default(); - let entries = reth_primitives::fs::read_dir(path) + let entries = reth_fs_util::read_dir(path) .map_err(|err| NippyJarError::Custom(err.to_string()))? .filter_map(Result::ok) .collect::>(); diff --git a/crates/storage/nippy-jar/Cargo.toml b/crates/storage/nippy-jar/Cargo.toml index d979759b4873f..fb485e32ac9cc 100644 --- a/crates/storage/nippy-jar/Cargo.toml +++ b/crates/storage/nippy-jar/Cargo.toml @@ -16,7 +16,7 @@ name = "reth_nippy_jar" [dependencies] # reth -reth-primitives.workspace = true +reth-fs-util.workspace = true # filter ph = "0.8.0" diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index d447770580803..2f7bcf804cf83 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -8,7 +8,7 @@ pub enum NippyJarError { #[error(transparent)] Disconnect(#[from] std::io::Error), #[error(transparent)] - FileSystem(#[from] reth_primitives::fs::FsPathError), + FileSystem(#[from] reth_fs_util::FsPathError), #[error("{0}")] Custom(String), #[error(transparent)] diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 1abbfba75cc26..435e91e877d4d 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -245,7 +245,7 @@ impl NippyJar { // Read [`Self`] located at the data file. let config_path = path.with_extension(CONFIG_FILE_EXTENSION); let config_file = File::open(&config_path) - .map_err(|err| reth_primitives::fs::FsPathError::open(err, config_path))?; + .map_err(|err| reth_fs_util::FsPathError::open(err, config_path))?; let mut obj: Self = bincode::deserialize_from(&config_file)?; obj.path = path.to_path_buf(); @@ -290,7 +290,7 @@ impl NippyJar { [self.data_path().into(), self.index_path(), self.offsets_path(), self.config_path()] { if path.exists() { - reth_primitives::fs::remove_file(path)?; + reth_fs_util::remove_file(path)?; } } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 1272a824c34b3..4fe4ffbb95994 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-fs-util.workspace = true reth-interfaces.workspace = true reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 7814a709768ce..610021d70cf09 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -132,16 +132,16 @@ impl StaticFileProvider { entries += jar_provider.rows(); - let data_size = reth_primitives::fs::metadata(jar_provider.data_path()) + let data_size = reth_fs_util::metadata(jar_provider.data_path()) .map(|metadata| metadata.len()) .unwrap_or_default(); - let index_size = reth_primitives::fs::metadata(jar_provider.index_path()) + let index_size = reth_fs_util::metadata(jar_provider.index_path()) .map(|metadata| metadata.len()) .unwrap_or_default(); - let offsets_size = reth_primitives::fs::metadata(jar_provider.offsets_path()) + let offsets_size = reth_fs_util::metadata(jar_provider.offsets_path()) .map(|metadata| metadata.len()) .unwrap_or_default(); - let config_size = reth_primitives::fs::metadata(jar_provider.config_path()) + let config_size = reth_fs_util::metadata(jar_provider.config_path()) .map(|metadata| metadata.len()) .unwrap_or_default(); diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index ebb6e497f9aad..82020e17257a1 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-eth-wire.workspace = true reth-primitives.workspace = true +reth-fs-util.workspace = true reth-provider.workspace = true reth-tasks.workspace = true revm.workspace = true diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 52f39cd360f25..923683d8a9bec 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -11,10 +11,11 @@ use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, }; +use reth_fs_util::FsPathError; use reth_primitives::{ - fs::FsPathError, Address, BlockHash, BlockNumber, BlockNumberOrTag, - FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, - TransactionSigned, TryFromRecoveredTransaction, + Address, BlockHash, BlockNumber, BlockNumberOrTag, FromRecoveredPooledTransaction, + IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, TransactionSigned, + TryFromRecoveredTransaction, }; use reth_provider::{ BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotification, ChainSpecProvider, @@ -578,7 +579,7 @@ where } debug!(target: "txpool", txs_file =?file_path, "Check local persistent storage for saved transactions"); - let data = reth_primitives::fs::read(file_path)?; + let data = reth_fs_util::read(file_path)?; if data.is_empty() { return Ok(()) @@ -598,7 +599,7 @@ where let outcome = pool.add_transactions(crate::TransactionOrigin::Local, pool_transactions).await; info!(target: "txpool", txs_file =?file_path, num_txs=%outcome.len(), "Successfully reinserted local transactions from file"); - reth_primitives::fs::remove_file(file_path)?; + reth_fs_util::remove_file(file_path)?; Ok(()) } @@ -623,7 +624,7 @@ where info!(target: "txpool", txs_file =?file_path, num_txs=%num_txs, "Saving current local transactions"); let parent_dir = file_path.parent().map(std::fs::create_dir_all).transpose(); - match parent_dir.map(|_| reth_primitives::fs::write(file_path, buf)) { + match parent_dir.map(|_| reth_fs_util::write(file_path, buf)) { Ok(_) => { info!(target: "txpool", txs_file=?file_path, "Wrote local transactions to file"); } @@ -680,7 +681,8 @@ mod tests { blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, CoinbaseTipOrdering, EthPooledTransaction, Pool, PoolTransaction, TransactionOrigin, }; - use reth_primitives::{fs, hex, PooledTransactionsElement, MAINNET, U256}; + use reth_fs_util as fs; + use reth_primitives::{hex, PooledTransactionsElement, MAINNET, U256}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_tasks::TaskManager; From ac0a005dd0ca7a03ae61af7ab7a3ef4bf07b1f3b Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 17 May 2024 10:49:52 +0200 Subject: [PATCH 558/700] docs: add comment about potential overflow of base fee per gas (#8280) --- crates/rpc/rpc-types-compat/src/engine/payload.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index f504c169cb7b0..f3478d189ab33 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -42,6 +42,10 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result Date: Sat, 18 May 2024 23:59:00 +0800 Subject: [PATCH 559/700] fix(make): use the default rust dir for build/build-op (#8259) Signed-off-by: jsvisa --- Makefile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index ada2149b81d8c..a6a385a133145 100644 --- a/Makefile +++ b/Makefile @@ -59,11 +59,11 @@ install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. .PHONY: build build: ## Build the reth binary into `target` directory. - $(MAKE) build-native-$(shell rustc -Vv | grep host | cut -d ' ' -f2) + cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" .PHONY: build-op build-op: ## Build the op-reth binary into `target` directory. - $(MAKE) op-build-native-$(shell rustc -Vv | grep host | cut -d ' ' -f2) + cargo build --bin op-reth --features "optimism,$(FEATURES)" --profile "$(PROFILE)" # Builds the reth binary natively. build-native-%: @@ -303,8 +303,7 @@ db-tools: ## Compile MDBX debugging tools. @echo "Run \"$(DB_TOOLS_DIR)/mdbx_chk\" for the MDBX db file integrity check." .PHONY: update-book-cli -update-book-cli: ## Update book cli documentation. - cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" +update-book-cli: build ## Update book cli documentation. @echo "Updating book cli doc..." @./book/cli/update.sh $(BUILD_PATH)/$(PROFILE)/reth From 3541edf86235b2b8e1d2a2fcfdaf42c043eb4844 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 19 May 2024 10:39:19 +0200 Subject: [PATCH 560/700] fix(op): reorder supported chain (#8310) --- crates/node-core/src/args/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/node-core/src/args/utils.rs b/crates/node-core/src/args/utils.rs index 5e3b0271c8076..72b84914f5ba0 100644 --- a/crates/node-core/src/args/utils.rs +++ b/crates/node-core/src/args/utils.rs @@ -20,7 +20,7 @@ use reth_primitives::{GOERLI, HOLESKY, MAINNET, SEPOLIA}; #[cfg(feature = "optimism")] /// Chains supported by op-reth. First value should be used as the default. -pub const SUPPORTED_CHAINS: &[&str] = &["base", "base-sepolia", "optimism", "optimism-sepolia"]; +pub const SUPPORTED_CHAINS: &[&str] = &["optimism", "optimism-sepolia", "base", "base-sepolia"]; #[cfg(not(feature = "optimism"))] /// Chains supported by reth. First value should be used as the default. pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "goerli", "holesky", "dev"]; From e45496a2a401e7c8e230d320091b5231bf33f724 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 19 May 2024 08:58:27 +0000 Subject: [PATCH 561/700] chore(deps): weekly `cargo update` (#8311) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 299 ++++++++++++++++++++++++++++------------------------- 1 file changed, 156 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c049d8c960d2e..6c4ed49154a05 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -118,9 +118,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6c2674230e94ea98767550b02853bf7024b46f784827be95acfc5f5f1a445f" +checksum = "03fd095a9d70f4b1c5c102c84a4c782867a5c6416dbf6dcd42a63e7c7a89d3c8" dependencies = [ "alloy-rlp", "arbitrary", @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" +source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -158,9 +158,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545885d9b0b2c30fd344ae291439b4bfe59e48dd62fbc862f8503d98088967dc" +checksum = "8425a283510106b1a6ad25dd4bb648ecde7da3fd2baeb9400a85ad62f51ec90b" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" +source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -222,7 +222,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" +source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786689872ec4e7d354810ab0dffd48bb40b838c047522eb031cbd47d15634849" +checksum = "7e30946aa6173020259055a44971f5cf40a7d76c931d209caeb51b333263df4f" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -288,9 +288,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525448f6afc1b70dd0f9d0a8145631bf2f5e434678ab23ab18409ca264cae6b3" +checksum = "db8aa973e647ec336810a9356af8aea787249c9d00b1525359f3db29a68d231b" dependencies = [ "alloy-rlp", "arbitrary", @@ -360,7 +360,7 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" +source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -490,7 +490,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#af25c53f99a4549ece47625b7d65f088c3487864" +source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" dependencies = [ "alloy-primitives", "serde", @@ -529,9 +529,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.2" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dbd17d67f3e89478c8a634416358e539e577899666c927bc3d2b1328ee9b6ca" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.64", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89c80a2cb97e7aa48611cbb63950336f9824a174cdf670527cc6465078a26ea1" +checksum = "2c6da95adcf4760bb4b108fefa51d50096c5e5fdd29ee72fed3e86ee414f2e34" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -541,16 +555,16 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58894b58ac50979eeac6249661991ac40b9d541830d9a725f7714cc9ef08c23" +checksum = "32c8da04c1343871fb6ce5a489218f9c85323c8340a36e9106b5fc98d4dd59d5" dependencies = [ "alloy-json-abi", "const-hex", @@ -559,24 +573,24 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.63", + "syn 2.0.64", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" +checksum = "368cae4dc052cad1d8f72eb2ae0c38027116933eeb49213c200a9e9875f208d7" dependencies = [ "winnow 0.6.8", ] [[package]] name = "alloy-sol-types" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399287f68d1081ed8b1f4903c49687658b95b142207d7cb4ae2f4813915343ef" +checksum = "40a64d2d2395c1ac636b62419a7b17ec39031d6b2367e66e9acbf566e6055e9c" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -708,9 +722,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.83" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "aquamarine" @@ -723,7 +737,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -896,12 +910,11 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.2.1" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener 5.3.0", "event-listener-strategy 0.5.2", "futures-core", "pin-project-lite", @@ -967,7 +980,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -984,7 +997,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -1022,7 +1035,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -1151,7 +1164,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.63", + "syn 2.0.64", "which", ] @@ -1247,7 +1260,7 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-lock", "async-task", "futures-io", @@ -1362,7 +1375,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", "synstructure", ] @@ -1456,9 +1469,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" +checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" dependencies = [ "bytemuck_derive", ] @@ -1471,7 +1484,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -1505,9 +1518,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" dependencies = [ "serde", ] @@ -1685,7 +1698,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -1821,9 +1834,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.11.3" +version = "1.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" +checksum = "70ff96486ccc291d36a958107caf2c0af8c78c0af7d31ae2f35ce055130de1a6" dependencies = [ "cfg-if", "cpufeatures", @@ -2127,7 +2140,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -2237,12 +2250,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ - "darling_core 0.20.8", - "darling_macro 0.20.8", + "darling_core 0.20.9", + "darling_macro 0.20.9", ] [[package]] @@ -2275,16 +2288,16 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 2.0.63", + "strsim 0.11.1", + "syn 2.0.64", ] [[package]] @@ -2311,13 +2324,13 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ - "darling_core 0.20.8", + "darling_core 0.20.9", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -2434,7 +2447,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -2586,7 +2599,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -2698,9 +2711,9 @@ dependencies = [ [[package]] name = "either" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "elliptic-curve" @@ -2783,7 +2796,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -2796,7 +2809,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -2807,7 +2820,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -3077,9 +3090,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "findshlibs" @@ -3240,7 +3253,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -3492,9 +3505,9 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ "hashbrown 0.14.5", ] @@ -3850,7 +3863,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -4000,7 +4013,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -4186,9 +4199,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", ] @@ -4414,7 +4427,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -4557,9 +4570,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.154" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libffi" @@ -4810,9 +4823,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" @@ -4977,7 +4990,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -5058,9 +5071,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] @@ -5101,7 +5114,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -5387,7 +5400,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -5653,7 +5666,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -5682,7 +5695,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -5699,9 +5712,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +checksum = "464db0c665917b13ebb5d453ccdec4add5658ee1adc7affc7677615356a8afaf" dependencies = [ "atomic-waker", "fastrand 2.1.0", @@ -5873,7 +5886,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -6600,7 +6613,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -7138,7 +7151,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.63", + "syn 2.0.64", "trybuild", ] @@ -8328,7 +8341,7 @@ dependencies = [ "bitflags 2.5.0", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.9.0", + "hashlink 0.9.1", "libsqlite3-sys", "smallvec", ] @@ -8403,7 +8416,7 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.3", + "rustls-webpki 0.102.4", "subtle", "zeroize", ] @@ -8470,9 +8483,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.3" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -8481,9 +8494,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "092474d1a01ea8278f69e6a358998405fae5b8b963ddaeb2b0b04a128bf1dfb0" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "rusty-fork" @@ -8549,9 +8562,9 @@ dependencies = [ [[package]] name = "schnellru" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b0cf7da6fc4477944d5529807234f66802fcb618fc62b9c05bedca7f9be6c43" +checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" dependencies = [ "ahash", "cfg-if", @@ -8691,9 +8704,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.201" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" +checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" dependencies = [ "serde_derive", ] @@ -8709,13 +8722,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.201" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" +checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -8743,9 +8756,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -8786,10 +8799,10 @@ version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ - "darling 0.20.8", + "darling 0.20.9", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -8814,7 +8827,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -9098,7 +9111,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -9156,7 +9169,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -9224,9 +9237,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.63" +version = "2.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf5be731623ca1a1fb7d8be6f261a3be6d3e2337b8a1f97be944d020c8fcb704" +checksum = "7ad3dee41f36859875573074334c200d1add8e4a87bb37113ebd31d926b7b11f" dependencies = [ "proc-macro2", "quote", @@ -9235,14 +9248,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa0cefd02f532035d83cfec82647c6eb53140b0485220760e669f4bad489e36" +checksum = "b8db114c44cf843a8bacd37a146e37987a0b823a0e8bc4fdc610c9c72ab397a5" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -9259,7 +9272,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -9345,13 +9358,13 @@ version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abe1689311f7edc6bab4033a259a3c37510b41063e4b01e57970105c0c764428" dependencies = [ - "darling 0.20.8", + "darling 0.20.9", "itertools 0.12.1", "once_cell", "prettyplease", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -9375,22 +9388,22 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "579e9083ca58dd9dcf91a9923bb9054071b9ebbd800b342194c9feb0ee89fc18" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -9554,7 +9567,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -9607,21 +9620,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.12" +version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" +checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.12", + "toml_edit 0.22.13", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -9639,9 +9652,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.12" +version = "0.22.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" +checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c" dependencies = [ "indexmap 2.2.6", "serde", @@ -9745,7 +9758,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -9944,9 +9957,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ddb747392ea12569d501a5bbca08852e4c8cd88b92566074b2243b8846f09e6" +checksum = "33a5f13f11071020bb12de7a16b925d2d58636175c20c11dc5f96cb64bb6c9b3" dependencies = [ "glob", "serde", @@ -10161,9 +10174,9 @@ dependencies = [ [[package]] name = "waker-fn" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" [[package]] name = "walkdir" @@ -10217,7 +10230,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", "wasm-bindgen-shared", ] @@ -10251,7 +10264,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10604,7 +10617,7 @@ checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", "synstructure", ] @@ -10625,7 +10638,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -10645,7 +10658,7 @@ checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", "synstructure", ] @@ -10666,7 +10679,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] @@ -10688,7 +10701,7 @@ checksum = "7b4e5997cbf58990550ef1f0e5124a05e47e1ebd33a84af25739be6031a62c20" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.64", ] [[package]] From bcfa5780e8db7afddf35b31819b570d01876b878 Mon Sep 17 00:00:00 2001 From: Brecht Devos Date: Sun, 19 May 2024 11:57:44 +0200 Subject: [PATCH 562/700] fix: calculate parity of legacy EIP155 txs correctly in alloy compat (#8302) --- crates/primitives/src/alloy_compat.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 6fdd477dd3bb4..5e2bff8173cae 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -28,7 +28,7 @@ impl TryFrom for Block { s: signature.s, odd_y_parity: signature .y_parity - .unwrap_or(alloy_rpc_types::Parity(false)) + .unwrap_or_else(|| alloy_rpc_types::Parity(!signature.v.bit(0))) .0, }, )) From 4b2d5eb83b7732ea61ad2f96a2a4fbf940a8126f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 19 May 2024 13:20:24 +0200 Subject: [PATCH 563/700] chore: bump alloy (#8312) --- Cargo.lock | 114 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 28 ++++++------- 2 files changed, 72 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c4ed49154a05..8a8c769bb311b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "c-kzg", "serde", ] @@ -177,11 +177,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "arbitrary", "c-kzg", "derive_more", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "serde", "serde_json", ] @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-primitives", "k256", "serde_json", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -334,6 +334,7 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", + "pin-project", "reqwest 0.12.4", "serde_json", "tokio", @@ -366,7 +367,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +387,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -426,36 +427,37 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "serde", ] [[package]] name = "alloy-rpc-types-beacon" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-primitives", "alloy-rpc-types-engine", "serde", "serde_with", + "thiserror", ] [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -468,11 +470,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "serde", "serde_json", ] @@ -480,7 +482,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "serde", @@ -500,7 +502,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "async-trait", @@ -513,9 +515,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -602,7 +604,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -620,7 +622,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=dd7a999#dd7a999d9efe259c47a34dde046952de795a8f6a" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -2989,7 +2991,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6591,8 +6593,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-primitives", "arbitrary", "bytes", @@ -6798,9 +6800,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7004,7 +7006,7 @@ dependencies = [ name = "reth-evm-ethereum" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "reth-evm", "reth-interfaces", "reth-primitives", @@ -7565,11 +7567,11 @@ name = "reth-primitives" version = "0.2.0-beta.7" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-trie", "arbitrary", "assert_matches", @@ -7669,7 +7671,7 @@ dependencies = [ name = "reth-revm" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "reth-consensus-common", "reth-interfaces", "reth-primitives", @@ -7855,7 +7857,7 @@ name = "reth-rpc-types" version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", "alloy-rpc-types-engine", @@ -7880,7 +7882,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7987,7 +7989,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "reth-primitives", "secp256k1 0.28.2", ] @@ -8122,10 +8124,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=21a2db5#21a2db5a3a828a35e82b116e5d046a9efaca1449" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=5a4fd5e#5a4fd5e394d8bdf1337ac076d0b5fde4f2dd617c" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=dd7a999)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/Cargo.toml b/Cargo.toml index c81b76a7e7c3a..2759aff2a76a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -288,7 +288,7 @@ revm = { version = "9.0.0", features = [ revm-primitives = { version = "4.0.0", features = [ "std", ], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "21a2db5" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "5a4fd5e" } # eth alloy-chains = "0.1.15" @@ -297,21 +297,21 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.4.0" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "dd7a999" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "dd7a999" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "64feb9b" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } # misc auto_impl = "1" From 3daec1d9b97c2d654c7f84e8521ac552dcaf4928 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 19 May 2024 14:31:56 +0200 Subject: [PATCH 564/700] feat: remove relay types (#8314) --- Cargo.lock | 62 --- crates/rpc/rpc-api/src/validation.rs | 4 +- crates/rpc/rpc-types/Cargo.toml | 5 - crates/rpc/rpc-types/src/lib.rs | 4 +- crates/rpc/rpc-types/src/relay/error.rs | 40 -- crates/rpc/rpc-types/src/relay/mod.rs | 391 ------------------ .../relay/signed_bid_submission_capella.ssz | Bin 352239 -> 0 bytes .../test_data/relay/single_payload.json | 246 ----------- 8 files changed, 6 insertions(+), 746 deletions(-) delete mode 100644 crates/rpc/rpc-types/src/relay/error.rs delete mode 100644 crates/rpc/rpc-types/src/relay/mod.rs delete mode 100644 crates/rpc/rpc-types/test_data/relay/signed_bid_submission_capella.ssz delete mode 100644 crates/rpc/rpc-types/test_data/relay/single_payload.json diff --git a/Cargo.lock b/Cargo.lock index 8a8c769bb311b..7097fb1e769f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -185,8 +185,6 @@ dependencies = [ "arbitrary", "c-kzg", "derive_more", - "ethereum_ssz", - "ethereum_ssz_derive", "once_cell", "proptest", "proptest-derive", @@ -458,8 +456,6 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "ethereum_ssz", - "ethereum_ssz_derive", "jsonrpsee-types", "jsonwebtoken 9.3.0", "rand 0.8.5", @@ -2240,16 +2236,6 @@ dependencies = [ "darling_macro 0.10.2", ] -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - [[package]] name = "darling" version = "0.20.9" @@ -2274,20 +2260,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - [[package]] name = "darling_core" version = "0.20.9" @@ -2313,17 +2285,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.20.9" @@ -2879,18 +2840,6 @@ dependencies = [ "smallvec", ] -[[package]] -name = "ethereum_ssz_derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6085d7fd3cf84bd2b8fec150d54c8467fb491d8db9c460607c5534f653a0ee38" -dependencies = [ - "darling 0.13.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "event-listener" version = "2.5.3" @@ -7864,17 +7813,13 @@ dependencies = [ "alloy-rpc-types-trace", "arbitrary", "bytes", - "ethereum_ssz", - "ethereum_ssz_derive", "jsonrpsee-types", "proptest", "proptest-derive", "rand 0.8.5", "serde", "serde_json", - "serde_with", "similar-asserts", - "thiserror", ] [[package]] @@ -8313,7 +8258,6 @@ dependencies = [ "ark-ff 0.3.0", "ark-ff 0.4.2", "bytes", - "ethereum_ssz", "fastrlp", "num-bigint", "num-traits", @@ -9140,12 +9084,6 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index deff73e47cf89..d66cb0efa6bb7 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -1,7 +1,9 @@ //! API for block submission validation. use jsonrpsee::proc_macros::rpc; -use reth_rpc_types::relay::{BuilderBlockValidationRequest, BuilderBlockValidationRequestV2}; +use reth_rpc_types::beacon::relay::{ + BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, +}; /// Block validation rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "flashbots"))] diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 83ad91f5c4586..7ef01886850d1 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -20,20 +20,15 @@ alloy-rpc-types-anvil.workspace = true alloy-rpc-types-trace.workspace = true alloy-rpc-types-beacon.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } -ethereum_ssz_derive = { version = "0.5", optional = true } -ethereum_ssz = { version = "0.5", optional = true } # misc -thiserror.workspace = true serde = { workspace = true, features = ["derive"] } -serde_with = "3.3" serde_json.workspace = true jsonrpsee-types = { workspace = true, optional = true } [features] default = ["jsonrpsee-types"] arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] -ssz = ["dep:ethereum_ssz" ,"dep:ethereum_ssz_derive", "alloy-primitives/ssz", "alloy-rpc-types/ssz", "alloy-rpc-types-engine/ssz"] [dev-dependencies] diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 5966a9b72c6ea..442f00769ec8d 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -14,7 +14,6 @@ mod eth; mod mev; mod net; mod peer; -pub mod relay; mod rpc; // re-export for convenience @@ -31,6 +30,9 @@ pub mod trace { // Anvil specific rpc types coming from alloy. pub use alloy_rpc_types_anvil as anvil; +// re-export beacon +pub use alloy_rpc_types_beacon as beacon; + // Ethereum specific rpc types related to typed transaction requests and the engine API. pub use eth::{ engine, diff --git a/crates/rpc/rpc-types/src/relay/error.rs b/crates/rpc/rpc-types/src/relay/error.rs deleted file mode 100644 index 92c51049801a4..0000000000000 --- a/crates/rpc/rpc-types/src/relay/error.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! Error types for the relay. - -use alloy_primitives::B256; - -/// Error thrown by the `validateBuilderSubmission` endpoints if the message differs from payload. -#[derive(Debug, thiserror::Error)] -pub enum ValidateBuilderSubmissionEqualityError { - /// Thrown if parent hash mismatches - #[error("incorrect ParentHash {actual}, expected {expected}")] - IncorrectParentHash { - /// The expected parent hash - expected: B256, - /// The actual parent hash - actual: B256, - }, - /// Thrown if block hash mismatches - #[error("incorrect BlockHash {actual}, expected {expected}")] - IncorrectBlockHash { - /// The expected block hash - expected: B256, - /// The actual block hash - actual: B256, - }, - /// Thrown if block hash mismatches - #[error("incorrect GasLimit {actual}, expected {expected}")] - IncorrectGasLimit { - /// The expected gas limit - expected: u64, - /// The actual gas limit - actual: B256, - }, - /// Thrown if block hash mismatches - #[error("incorrect GasUsed {actual}, expected {expected}")] - IncorrectGasUsed { - /// The expected gas used - expected: u64, - /// The actual gas used - actual: B256, - }, -} diff --git a/crates/rpc/rpc-types/src/relay/mod.rs b/crates/rpc/rpc-types/src/relay/mod.rs deleted file mode 100644 index 2a46d7ffbe45a..0000000000000 --- a/crates/rpc/rpc-types/src/relay/mod.rs +++ /dev/null @@ -1,391 +0,0 @@ -//! Relay API bindings: - -use crate::engine::{ - BlobsBundleV1, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, -}; -use alloy_primitives::{Address, B256, U256}; -use alloy_rpc_types_beacon::{BlsPublicKey, BlsSignature}; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -pub mod error; - -/// Represents an entry of the `/relay/v1/builder/validators` endpoint -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Validator { - /// The slot number for the validator entry. - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - /// The index of the validator. - #[serde_as(as = "DisplayFromStr")] - pub validator_index: u64, - /// Details of the validator registration. - pub entry: ValidatorRegistration, -} - -/// Details of a validator registration. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ValidatorRegistration { - /// The registration message. - pub message: ValidatorRegistrationMessage, - /// The signature for the registration. - pub signature: BlsSignature, -} - -/// Represents the message of a validator registration. -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ValidatorRegistrationMessage { - /// The fee recipient's address. - #[serde(rename = "fee_recipient")] - pub fee_recipient: Address, - - /// The gas limit for the registration. - #[serde_as(as = "DisplayFromStr")] - pub gas_limit: u64, - - /// The timestamp of the registration. - #[serde_as(as = "DisplayFromStr")] - pub timestamp: u64, - - /// The public key of the validator. - pub pubkey: BlsPublicKey, -} - -/// Represents public information about a block sent by a builder to the relay, or from the relay to -/// the proposer. Depending on the context, value might represent the claimed value by a builder -/// (not necessarily a value confirmed by the relay). -#[serde_as] -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "ssz", derive(ssz_derive::Encode, ssz_derive::Decode))] -pub struct BidTrace { - /// The slot associated with the block. - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - /// The parent hash of the block. - pub parent_hash: B256, - /// The hash of the block. - pub block_hash: B256, - /// The public key of the builder. - pub builder_pubkey: BlsPublicKey, - /// The public key of the proposer. - pub proposer_pubkey: BlsPublicKey, - /// The recipient of the proposer's fee. - pub proposer_fee_recipient: Address, - /// The gas limit associated with the block. - #[serde_as(as = "DisplayFromStr")] - pub gas_limit: u64, - /// The gas used within the block. - #[serde_as(as = "DisplayFromStr")] - pub gas_used: u64, - /// The value associated with the block. - #[serde_as(as = "DisplayFromStr")] - pub value: U256, -} - -/// SignedBidTrace is a BidTrace with a signature -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "ssz", derive(ssz_derive::Encode, ssz_derive::Decode))] -pub struct SignedBidTrace { - /// The BidTrace message associated with the submission. - pub message: BidTrace, - /// The signature associated with the submission. - pub signature: BlsSignature, -} - -/// Submission for the `/relay/v1/builder/blocks` endpoint (Bellatrix). -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(deny_unknown_fields)] -#[cfg_attr(feature = "ssz", derive(ssz_derive::Decode, ssz_derive::Encode))] -pub struct SignedBidSubmissionV1 { - /// The BidTrace message associated with the submission. - pub message: BidTrace, - /// The execution payload for the submission. - #[serde(with = "alloy_rpc_types_beacon::payload::beacon_payload_v1")] - pub execution_payload: ExecutionPayloadV1, - /// The signature associated with the submission. - pub signature: BlsSignature, -} - -/// Submission for the `/relay/v1/builder/blocks` endpoint (Capella). -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(deny_unknown_fields)] -#[cfg_attr(feature = "ssz", derive(ssz_derive::Decode, ssz_derive::Encode))] -pub struct SignedBidSubmissionV2 { - /// The BidTrace message associated with the submission. - pub message: BidTrace, - /// The execution payload for the submission. - #[serde(with = "alloy_rpc_types_beacon::payload::beacon_payload_v2")] - pub execution_payload: ExecutionPayloadV2, - /// The signature associated with the submission. - pub signature: BlsSignature, -} - -/// Submission for the `/relay/v1/builder/blocks` endpoint (Deneb). -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(deny_unknown_fields)] -#[cfg_attr(feature = "ssz", derive(ssz_derive::Decode, ssz_derive::Encode))] -pub struct SignedBidSubmissionV3 { - /// The BidTrace message associated with the submission. - pub message: BidTrace, - /// The execution payload for the submission. - #[serde(with = "alloy_rpc_types_beacon::payload::beacon_payload_v3")] - pub execution_payload: ExecutionPayloadV3, - /// The Deneb block bundle for this bid. - pub blobs_bundle: BlobsBundleV1, - /// The signature associated with the submission. - pub signature: BlsSignature, -} - -/// SubmitBlockRequest is the request from the builder to submit a block. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct SubmitBlockRequest { - /// The BidTrace message associated with the block submission. - pub message: BidTrace, - /// The execution payload for the block submission. - #[serde(with = "alloy_rpc_types_beacon::payload::beacon_payload")] - pub execution_payload: ExecutionPayload, - /// The signature associated with the block submission. - pub signature: BlsSignature, -} - -/// A Request to validate a [SubmitBlockRequest] -#[serde_as] -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BuilderBlockValidationRequest { - /// The [SubmitBlockRequest] data to be validated. - #[serde(flatten)] - pub request: SubmitBlockRequest, - /// The registered gas limit for the validation request. - #[serde_as(as = "DisplayFromStr")] - pub registered_gas_limit: u64, -} - -/// A Request to validate a [SubmitBlockRequest] -#[serde_as] -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BuilderBlockValidationRequestV2 { - /// The [SubmitBlockRequest] data to be validated. - #[serde(flatten)] - pub request: SubmitBlockRequest, - /// The registered gas limit for the validation request. - #[serde_as(as = "DisplayFromStr")] - pub registered_gas_limit: u64, - /// The withdrawals root for the validation request. - pub withdrawals_root: B256, -} - -/// Query for the GET `/relay/v1/data/bidtraces/proposer_payload_delivered` -/// -/// Provides [BidTrace]s for payloads that were delivered to proposers. -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ProposerPayloadsDeliveredQuery { - /// A specific slot - #[serde(skip_serializing_if = "Option::is_none")] - pub slot: Option, - /// Maximum number of entries (200 max) - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// Search for a specific blockhash - #[serde(skip_serializing_if = "Option::is_none")] - pub block_hash: Option, - /// Search for a specific EL block number - #[serde(skip_serializing_if = "Option::is_none")] - pub block_number: Option, - /// Filter results by a proposer public key - #[serde(skip_serializing_if = "Option::is_none")] - pub proposer_pubkey: Option, - /// Filter results by a builder public key - #[serde(skip_serializing_if = "Option::is_none")] - pub builder_pubkey: Option, - /// How to order results - #[serde(skip_serializing_if = "Option::is_none")] - pub order_by: Option, -} - -impl ProposerPayloadsDeliveredQuery { - /// Sets the specific slot - pub fn slot(mut self, slot: u64) -> Self { - self.slot = Some(slot); - self - } - - /// Sets the maximum number of entries (200 max) - pub fn limit(mut self, limit: u64) -> Self { - self.limit = Some(limit); - self - } - - /// Sets the specific blockhash - pub fn block_hash(mut self, block_hash: B256) -> Self { - self.block_hash = Some(block_hash); - self - } - - /// Sets the specific EL block number - pub fn block_number(mut self, block_number: u64) -> Self { - self.block_number = Some(block_number); - self - } - - /// Sets the proposer public key - pub fn proposer_pubkey(mut self, proposer_pubkey: BlsPublicKey) -> Self { - self.proposer_pubkey = Some(proposer_pubkey); - self - } - - /// Sets the builder public key - pub fn builder_pubkey(mut self, builder_pubkey: BlsPublicKey) -> Self { - self.builder_pubkey = Some(builder_pubkey); - self - } - - /// Configures how to order results - pub fn order_by(mut self, order_by: OrderBy) -> Self { - self.order_by = Some(order_by); - self - } - - /// Order results by descending value (highest value first) - pub fn order_by_desc(self) -> Self { - self.order_by(OrderBy::Desc) - } - - /// Order results by ascending value (lowest value first) - pub fn order_by_asc(self) -> Self { - self.order_by(OrderBy::Asc) - } -} - -/// OrderBy : Sort results in either ascending or descending values. * `-value` - descending value -/// (highest value first) * `value` - ascending value (lowest value first) Sort results in either -/// ascending or descending values. * `-value` - descending value (highest value first) * `value` -/// - ascending value (lowest value first) -#[derive( - Default, Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, -)] -pub enum OrderBy { - /// Sort result by descending value (highest value first) - #[default] - #[serde(rename = "-value")] - Desc, - /// Sort result by ascending value (lowest value first) - #[serde(rename = "value")] - Asc, -} - -/// Query for the GET `/relay/v1/data/bidtraces/builder_blocks_received` endpoint. -/// This endpoint provides BidTraces for builder block submissions that match the query and were -/// verified successfully. -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BuilderBlocksReceivedQuery { - /// A specific slot - #[serde(skip_serializing_if = "Option::is_none")] - pub slot: Option, - /// Maximum number of entries (200 max) - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// Search for a specific blockhash - #[serde(skip_serializing_if = "Option::is_none")] - pub block_hash: Option, - /// Search for a specific EL block number - #[serde(skip_serializing_if = "Option::is_none")] - pub block_number: Option, - /// Search for a specific builder public key. - #[serde(skip_serializing_if = "Option::is_none")] - pub builder_pubkey: Option, -} - -impl BuilderBlocksReceivedQuery { - /// Sets the specific slot - pub fn slot(mut self, slot: u64) -> Self { - self.slot = Some(slot); - self - } - - /// Sets the maximum number of entries (200 max) - pub fn limit(mut self, limit: u64) -> Self { - self.limit = Some(limit); - self - } - - /// Sets the specific blockhash - pub fn block_hash(mut self, block_hash: B256) -> Self { - self.block_hash = Some(block_hash); - self - } - - /// Sets the specific EL block number - pub fn block_number(mut self, block_number: u64) -> Self { - self.block_number = Some(block_number); - self - } - - /// Sets the specific builder public key - pub fn builder_pubkey(mut self, builder_pubkey: BlsPublicKey) -> Self { - self.builder_pubkey = Some(builder_pubkey); - self - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_validator() { - let s = r#"[{"slot":"7689441","validator_index":"748813","entry":{"message":{"fee_recipient":"0xbe87be8ac54fb2a4ecb8d7935d0fc80f72c28f9f","gas_limit":"30000000","timestamp":"1688333351","pubkey":"0xb56ff6826cfa6b82fc6c2974988b1576fe5c34bd6c672f911e1d3eec1134822581d6d68f68992ad1f945b0c80468d941"},"signature":"0x8b42028d248f5a2fd41ab425408470ffde1d941ee83db3d9bde583feb22413608673dc27930383893410ef05e52ed8cf0e0291d8ed111189a065f9598176d1c51cabeaba8f628b2f92626bb58d2068292eb7682673a31473d0cdbe278e67c723"}},{"slot":"7689443","validator_index":"503252","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1680328764","pubkey":"0xa8ac80cd889110f407fd6e42d08c71faf158aa917c4c5f5d65bfcea7c4ae5231df9e14721c1980e18233fb1e79316cf6"},"signature":"0xa3e0e3190acc0be2c3a5f9c75cea5d79bebbb955f3f477794e8d7c0cbf73cd61fa0b0c3bfeb5cd9ba53a60c7bf9958640a63ecbd355a8ddb717f2ac8f413dbe4865cbae46291cb590410fa51471df9eaccb3602718df4c17f8eee750df8b5491"}},{"slot":"7689445","validator_index":"792322","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1691185175","pubkey":"0xa25455216e254c96d7ddfc029da033f6de715e91ab09b2fb6a97613b13a8e9189c43f0eaabd671b3c9aab051f6ff0630"},"signature":"0xb63ea72aac017cdfaa7b0fb9cbaffbe3c72b8ce2dd986b6e21834fc2f0accf9184e301de6c5f066bb7075d3f5d020a9e169dee80998685e20553c197ab27ef4d7b0a19f062796a228308ef33a01d0a08dbe35b02e7dca762e247c84e5ea9d170"}},{"slot":"7689446","validator_index":"888141","entry":{"message":{"fee_recipient":"0x73b9067aeeeab2e6263951ad9637971145566ba6","gas_limit":"30000000","timestamp":"1692606575","pubkey":"0x921b0309fffd798599f9564103f9ab34ebe2a3ea17ab39439e5e033ec5affb925a5ad65c0863a1d0f84365e4c1eec952"},"signature":"0x8a88068c7926d81348b773977050d093450b673af1762c0f0b416e4fcc76f277f2f117138780e62e49f5ac02d13ba5ed0e2d76df363003c4ff7ad40713ed3ef6aa3eb57580c8a3e1e6fe7245db700e541d1f2a7e88ec426c3fba82fa91b647a4"}},{"slot":"7689451","validator_index":"336979","entry":{"message":{"fee_recipient":"0xebec795c9c8bbd61ffc14a6662944748f299cacf","gas_limit":"30000000","timestamp":"1680621366","pubkey":"0x8b46eb0f36a51fcfab66910380be4d68c6323291eada9f68ad628a798a9c21ed858d62f1b15c08484b13c9cdcd0fc657"},"signature":"0x910afc415aed14a0c49cc1c2d29743018a69e517de84cee9e4ff2ea21a0d3b95c25b0cd59b8a2539fbe8e73d5e53963a0afbcf9f86db4de4ba03aa1f80d9dc1ecca18e597829e5e9269ce08b99ff347eba43c0d9c87c174f3a30422d4de800c8"}},{"slot":"7689452","validator_index":"390650","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1678460723","pubkey":"0x854464f0b798d1510a0f76a77190f15e9e67d5ac348647f5fe906539cf4ff7101fb1463f4c408b72e6fae9cfbd21ffd3"},"signature":"0xb5c3aa515cdf723f03fafd092150d6fc5453f6bcec873194927f64f65aa96241f4c5ed417e0676163c5a07af0d63f83811268096e520af3b6a5d5031e619609a0999efc03cc94a30a1175e5e5a52c66d868ebb527669be27a7b81920e44c511a"}},{"slot":"7689453","validator_index":"316626","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1680791443","pubkey":"0xa7a4ecdc53283698af917eaf0ba68e350f5cd136c2b12fa10e59f1e1fd15130da1889a28975777887d84638c85e9036c"},"signature":"0xa3f7604dafd63d9a22d66c228d7d90e83a69d9fa4b920dceeb2e7d43ef49307645725f7c4890fefce18ba41d36b4d23c09cef5109827c7fb3bc78f8526ba0bfeceb950a6f0b5d5d8ad1c8dc740267f053b4f9113141c27e1528c8557b9175e3f"}},{"slot":"7689455","validator_index":"733684","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1688895576","pubkey":"0xb2731b8efe43d723c4b621bd23e48dd00e1fe7816414e708f3607da6f17f5ba5c172ceac005591cb3e28320ae6aa81cf"},"signature":"0x85c8fd5201705413e004b0b4ef773c28d95864deab2e7698da6ea8b27c46867d03e50dae4ad523cebc1ea6205b7925810347e14f8db5396d11200c0cd099faefe254bc2844b29bf15d4d62e15f876f08ee53e2cd33ceee698d69f4f70e8eac82"}},{"slot":"7689457","validator_index":"950865","entry":{"message":{"fee_recipient":"0x8b733fe59d1190b3efa5be3f21a574ef2ab0c62b","gas_limit":"30000000","timestamp":"1696575191","pubkey":"0xa7c8fbb503de34fb92d43490533c35f0418a52ff5834462213950217b592f975caa3ac1daa3f1cdd89362d6f48ef46c1"},"signature":"0x9671727de015aa343db8a8068e27b555b59b6dcfc9b7f8b47bce820fe60cd1179dfdfc91270918edf40a918b513e5a16069e23aaf1cdd37fce0d63e3ade7ca9270ed4a64f70eb64915791e47074bf76aa3225ebd727336444b44826f2cf2a002"}},{"slot":"7689459","validator_index":"322181","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1677755315","pubkey":"0x9381507f58bd51e0ce662e6bb27796e416988bd1f64f219f8cffd2137e933a5fb4c8196ca80c653fd5f69ef378f038aa"},"signature":"0xaeb27e1b729dded42d28efcfadfc6691851749ccb19779f488a37a31e12a56cfd856e81d251fe0e7868aa539f66116b11216a9599bd51a06f386d2255381fcd9d7c698df980964a7e54428cee458e28e3ca5078db92e6837cba72276e7af3334"}},{"slot":"7689461","validator_index":"482285","entry":{"message":{"fee_recipient":"0x6d2e03b7effeae98bd302a9f836d0d6ab0002766","gas_limit":"30000000","timestamp":"1680010474","pubkey":"0x940c6e411db07f151c7c647cb09842f5979db8d89c11c3c1a08894588f1d561e59bc15bd085dc9a025aac52b1cf83d73"},"signature":"0xb4b9fab65a2c01208fd17117b83d59b1e0bb92ede9f7ac3f48f55957551b36add2c3d880e76118fecf1e01496bc3b065194ae6bcb317f1c583f70e2a67cf2a28f4f68d80fc3697941d3700468ac29aafd0118778112d253eb3c31d6bcbdc0c13"}},{"slot":"7689466","validator_index":"98883","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1680479471","pubkey":"0x8d50dad3c465c2c5cd327fd725e239915c0ba43adfdc106909665222c43b2705e9db77f8102308445ae5301131d2a843"},"signature":"0x8a597b9c3160f12bed715a0311134ce04875d05050eb6a349fcc470a610f039ce5a07eebf5332db4f2126b77ebdd1beb0df83784e01061489333aba009ecdb9767e61933f78d2fd96520769afffa3be4455d8dfc6de3fb1b2cee2133f8dd15cf"}},{"slot":"7689470","validator_index":"533204","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1695122498","pubkey":"0x88b31798f15c2857200c60ac923c5a310c204edbce0d49c755f3f0c65e688ab065870385a5b2b18972917c566ecc02a4"},"signature":"0x96033c3ac9d7083d889a723ecd99c79cb2ab3caebeac5435d2319fd300b796ca4f6261eca211b0dbb6df98ce23eba75b04203e026c5aee373d2ba579904ea06284ff58df7bd45ea9a4d9cc9b24ef15ee57e8894193d1c6c8883dace63feb77b7"}},{"slot":"7689471","validator_index":"129205","entry":{"message":{"fee_recipient":"0xed33259a056f4fb449ffb7b7e2ecb43a9b5685bf","gas_limit":"30000000","timestamp":"1695122497","pubkey":"0xb83dc440882e55185ef74631329f954e0f2d547e4f8882bd4470698d1883754eb9b5ee17a091de9730c80d5d1982f6e7"},"signature":"0xabc4d7cc48c2b4608ba49275620837a543e8a6a79d65395c9fca9794442acacf6df2fb1aca71f85472c521c4cf5797f702bd8adef32d7cd38d98334a0a11f8d197a702fa70f48d8512676e64e55a98914a0acc89b60a37046efb646f684a3917"}},{"slot":"7689472","validator_index":"225708","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1680528109","pubkey":"0xa35ae9c944764f4abb247471ad4744b5acec3156a5ec0e72f67a4417268c7c0a8c2775a9639e70ef345176c41b3cd1ba"},"signature":"0x96dd275d5eadd7648e0b8ef44070da65bd2e8d597b51e292d1f775af36595300dcd69e466b51e422f938e695c9cbacd71249d7dfadc9fdf358170082645f63a2ddc6fd82e6a68100460b6beac7603af09012ef06705900410204e8adb6c08d21"}},{"slot":"7689473","validator_index":"959108","entry":{"message":{"fee_recipient":"0xf197c6f2ac14d25ee2789a73e4847732c7f16bc9","gas_limit":"30000000","timestamp":"1696666607","pubkey":"0xaa4c7bc4848f7ea9939112c726e710db5603bc562ef006b6bf5f4644f31b9ab4daf8e3ff72f0140c077f756073dbe3bd"},"signature":"0x8de420ab9db85a658c2ba5feb59020d1e5c0c5e385f55cb877304a348609ad64ec3f3d7be1c6e847df8687bf9c045f2c06e56b4302a04df07801fbcccaf32dbeaeb854680536e13c7c2dc9372272fbf65651298bfb12bdedb58ddda3de5b26c2"}},{"slot":"7689477","validator_index":"258637","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1693935108","pubkey":"0x944bad9bc9ad0fa7b287b1e85e3bf0a5e246c871f2ce62c974d46b2968f853bdc06c22933e2be0549967343d4b01310b"},"signature":"0x906badf5ea9b63a210e7e5baa2f9b4871c9176a26b55282b148fb6eb3f433a41cabe61be612b02c1d6c071f13a374ee118b1fe71d0461c12a9595e0ed458123b0a1bbfef389aec8954803af60eca8ae982f767aa2f7f4c051f38ef630eaef8bf"}},{"slot":"7689479","validator_index":"641748","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1685364312","pubkey":"0xaf548fad3364238b9a909dc56735249b91e0fd5e330f65aa9072fe7f73024b4d8febc7cc2bd401ad8ace9a1043440b22"},"signature":"0xb36cb46aeb188463e9fec2f89b6dcb2e52489af7c852915011ff625fb24d82ded781ae864ccbd354cbbed1f02037a67a152fecc2735b598ab31c4620e7151dd20eb761c620c49dcb31704f43b631979fc545be4292bc0f193a1919255db7a5b8"}},{"slot":"7689481","validator_index":"779837","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1687790504","pubkey":"0xa39f287b3195ecaeb1c18c4f957d881a86a31f809078fe1d7525acfa751b7b0c43f369813d63d72fdd35d0e58f73bea9"},"signature":"0xb0bd69f16df95d09f02768c0625eb1d99dd6385a7db8aa72606a4b0b05a36add5671b02c554914e087f302bf9e17698f0b551a5b517ebdad68118f672bf80ea8de118d9e06808a39cf107bbc13a0cdfbfd0d5e1daf39ad4d66364a0047609dea"}},{"slot":"7689484","validator_index":"903816","entry":{"message":{"fee_recipient":"0xebec795c9c8bbd61ffc14a6662944748f299cacf","gas_limit":"30000000","timestamp":"1695066839","pubkey":"0xa1401efd4b681f3123da1c5de0f786e8c7879ceebc399227db994debf416e198ec25afecd1ee443808affd93143d183e"},"signature":"0x90234ccb98ca78ba35ae5925af7eb985c3cf6fd5f94291f881f315cf1072ab45a7dd812af52d8aede2f08d8179a5a7eb02b2b01fc8a2a792ef7077010df9f444866a03b8ec4798834dc9af8ff55fcd52f399b41d9dd9b0959d456f24aa38ac3c"}},{"slot":"7689485","validator_index":"364451","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1678669663","pubkey":"0x879658976e272aafab49be7105b27f9dea07e79f386dc4a165e17b082059c4b5628d8677203cd809a9332580d9cc28fe"},"signature":"0x8a3013425fd933630521a88a96dcc51e82f8f23cc5c243102415f7782799d39a3450bc5dc6b8a59331f78199e7729896186e700b166841195057ed6efbbd36a5a352cbe6a69ecbec27d74f9b2336f290e19940623a9712b70b59724f879f4e77"}},{"slot":"7689487","validator_index":"641598","entry":{"message":{"fee_recipient":"0x2370d6d6a4e6de417393d54abb144e740f662e01","gas_limit":"30000000","timestamp":"1683216978","pubkey":"0xa226a5c9121aee6a0e20759d877386bb50cb07de978eb89cb0a09dd9d7159117e4d610b3b80233c48bd84a1b9df5f1b2"},"signature":"0x983a0e5721dc39ae3bc35a83999f964ff7840e25e1033f033d51f40408acd07b4f8bda2bbd27f9fe793dd26e2dfe150c03577d0e2ff16d88cef0fb3bb849602d7287aac89199a4b39b1903a8dd9cd9e206ff68c391732fc6e6ef5ff2c89cb439"}},{"slot":"7689490","validator_index":"954682","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1696695974","pubkey":"0xadbe2aeecfc01016938dc0a90986d36975acdd1e3cbb3461bb917a7eaf260c1a387867e47f3d9a1dd56778f552a0ed6a"},"signature":"0x85dea1b1b01ecaf3240571ecddcfc4eaa06b4e23b1c2cc6db646164716f96b8ad46bf0687f5bb840a7468514ac18439205bfb16941cdafc6853c4c65271cd113be72f9428469d0815a7169c70ae37709a19ad669e709d6a9cfd90bc471309bc6"}},{"slot":"7689496","validator_index":"833362","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1690132607","pubkey":"0xa4e9ee836facfaf67dab10c59c10ed6d3a0f007799129f13b009be56ed87ad6b08b30b315f38b6cc38f2fdb747dac587"},"signature":"0xb73b40c9766004d6471b3355fc6ffa765a442c0687852687ed89120cdcebf03c37ed2c087fd254492b2b7f11c2446ec5116e40f442367196af967e6905ca5fb333a2b3a9705c0d302817038199b43c1dd36124fe6085d610c491176d1d5d0cff"}},{"slot":"7689498","validator_index":"202233","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1678891455","pubkey":"0x8cb0401c024bb74a481b9253ce61af57ad263e7ab542166922c13e46d76c2593b4735a85e5fbaba9d1cd63d8996981e1"},"signature":"0xb9364c714be2c11651b8c6854b0fc5872d8b109fa1c8b67e4e1bf71555a364e3003a88d98292a09487d20b3e89a0716c1030a559ce70aeef582fab3d6820fde678249d3952c809c53e56940cc74ba6fcc112bb94adf72d41451e5e69788f98da"}},{"slot":"7689500","validator_index":"380674","entry":{"message":{"fee_recipient":"0x388c818ca8b9251b393131c08a736a67ccb19297","gas_limit":"30000000","timestamp":"1693299181","pubkey":"0x9801236e1e78426d7b4be81f828419bd1aac3b2041ebed2af9683eca259e9915c6f5d45d9e3021a8218f8b6a73550ee4"},"signature":"0x98961cb2f920898e656ddaf66d11bcfd808179cf8f313687260eb760bd065f1f5ae79a37587575a1c6c46671845c63e409cc01bca97823adc0e6dbbc280327df886df4fb8aa7e1d310311bc80e29fed90a6ae3346017d1b5d20b32beed8fd477"}},{"slot":"7689502","validator_index":"486859","entry":{"message":{"fee_recipient":"0x0b91a6bafdae6ae32d864ed9a0e883a5ca9a02dd","gas_limit":"30000000","timestamp":"1680003847","pubkey":"0x99225f70bb2c9310835a367e95b34c0d6021b5ec9bf350f4d8f0fc4dce34c9c16f4299b788ad0d27001b0efd2712d494"},"signature":"0x86ab16d4b4e686b20d1bb9d532975961acd797e02d9c9e48d13805ec2ba71df9e69a63c3b254b8e640fcc26f651ad243155430095caa6c5770b52039f1d6a9312e0d8f9dd2fb4fe2d35d372075a93b14e745be91e7eb1f28f0f5bf2c62f7584e"}},{"slot":"7689503","validator_index":"70348","entry":{"message":{"fee_recipient":"0x6d2e03b7effeae98bd302a9f836d0d6ab0002766","gas_limit":"30000000","timestamp":"1680011532","pubkey":"0x820dd8b5396377da3f2d4972d4c94fbad401bbf4b3a56e570a532f77c1802f2cc310bf969bb6aa96d90ea2708c562ed6"},"signature":"0xb042608d02c4ca053c6b9e22804af99421c47eda96ce585d1df6f37cbf59cfd6830a3592f6de543232135c42bb3da9cd13ecf5aea2e3b802114dc08877e9023a7cf6d75e28ca30d1df3f3c98bddd36b4f521e63895179a7e8c3752f5cbc681ea"}}]"#; - - let validators: Vec = serde_json::from_str(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(validators).unwrap()); - } - - #[test] - fn bellatrix_bid_submission() { - let s = r#"{"message":{"slot":"1","parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","builder_pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a", "proposer_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","proposer_fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","gas_limit":"1","gas_used":"1","value":"1"},"execution_payload":{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"1","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions":["0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"]},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}"#; - - let bid = serde_json::from_str::(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(bid).unwrap()); - } - - #[test] - fn capella_bid_submission() { - let s = r#"{"message":{"slot":"1","parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","builder_pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a", "proposer_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","proposer_fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","gas_limit":"1","gas_used":"1","value":"1"},"execution_payload":{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"1","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions":["0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"],"withdrawals":[{"index":"1","validator_index":"1","address":"0xabcf8e0d4e9587369b2301d0790347320302cc09","amount":"32000000000"}]},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}"#; - - let bid = serde_json::from_str::(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(bid).unwrap()); - } - - #[test] - fn deneb_bid_submission() { - let s = r#"{"message":{"slot":"1","parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","builder_pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a", "proposer_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","proposer_fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","gas_limit":"1","gas_used":"1","value":"1"},"execution_payload":{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"1","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions":["0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"],"withdrawals":[{"index":"1","validator_index":"1","address":"0xabcf8e0d4e9587369b2301d0790347320302cc09","amount":"32000000000"}], "blob_gas_used":"1","excess_blob_gas":"1"},"blobs_bundle":{"commitments":[],"proofs":[],"blobs":[]},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}"#; - - let bid = serde_json::from_str::(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(bid).unwrap()); - } - - #[cfg(feature = "ssz")] - #[test] - fn capella_bid_submission_ssz() { - use ssz::{Decode, Encode}; - - let bytes = - include_bytes!("../../test_data/relay/signed_bid_submission_capella.ssz").to_vec(); - let bid = SignedBidSubmissionV2::from_ssz_bytes(&bytes).unwrap(); - assert_eq!(bytes, bid.as_ssz_bytes()); - } - - #[test] - fn test_can_parse_validation_request_body() { - const VALIDATION_REQUEST_BODY: &str = - include_str!("../../test_data/relay/single_payload.json"); - - let _validation_request_body: BuilderBlockValidationRequest = - serde_json::from_str(VALIDATION_REQUEST_BODY).unwrap(); - } -} diff --git a/crates/rpc/rpc-types/test_data/relay/signed_bid_submission_capella.ssz b/crates/rpc/rpc-types/test_data/relay/signed_bid_submission_capella.ssz deleted file mode 100644 index b0dd2999d1026d256ad787241acc47e352ffb5b1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 352239 zcmeFa1zc6#(lEZyfkR7&gmj32Al==ebf+NQ4bmMdDj-U3q>&a015rs4K}4`XL|Oy{ zrG;-}JRbBQ&vV~<-|zlo`&;Z;Gb?7z%9&aFSks^j>^ygPFGOo}FxJl+8nJYPZ<8=8 z53e7Nd26BljH|zH?f3w7kMfzZ611i+*6+)uY+F3 z*pjj6NvEhl*n>{k4)5Ll318oKV0FNwH=8>sl#eEq>gw=kS)se{4_$N+G>;b3by!Xe zReeTN6dNM#T%y)dqIOgFo-}jK+6STM4!Di$t*k)(wsxaal=yS)?CeL(Y2fU$SCO8h z=oj=y)|2_4-K(0n;ihEuxe+tlDaMYoLisJ+6b%-(?M9RiH^Je5ICfoj>;&E9)z9nWrqe<~hvMXwQZ+ zGFh=AN?)lTUOy+jqO8C$`e_xQeNh#TNNv#+#*(kJ!j@+UNAIIy1P+%vlHtT0;{X6@ zX-e#Ig-pTWxXgQFO>-)q%WGq=jpDwJhhN7_L#DRP zR{|UWwu#r=(NT_?Z^-?|KOI!Y8xsVe;_U3>;^1oKeTrV*#m(B&%G)6j0=>LF9Zvdq zpNfN$twF&74FC>7_4Pr42@?R4p@4=30A5gN!2tlW0{{?1001jcuqOn7=TJ}~0)R#+ z@DT&R0u<;-0pJo8)X4yVksJWRC;(s;3LXamfSVEkY^WeSH2|bTq5cq5HZ1@grUQUc zD5TN@03HJXG(&-(5dh|(pvnxzq2R>=m2nsV9h;;yf`;Gwc#R&j}T_Cx*0zicu08G0B0JA5Q z*9(%ZHvlXK0)Sc`glh+Y;0dTaXa(XXgaL=4^@iaz3`nnn0ZR)oV3ryVj4Q%{VGlTv zmJbJd``|$QCLFlWfCjiSq5<-(Xux$Sgs`CjB!|&}awrI~qXE-UNI8NAba0>n5u9iM z9v2#r1_jllXuu{Ein!4LQXVuw>lhl)3WXqEG=Sn9I?z0f4%}Qs2RxS0ftf9I;O#a# zps$Yr)Qu3ps#_#FnR&Fp`he!e2yLvCD`Pr1S5CbyD=zIN@=$*(Z)-R9J`QHB;BS@y z4XmSJ(}GkRuY!vh0U3saI;J?DWgz2w$4(oaFvAAk4lcYJkW<^jn$3Id3QW7|5PX^s zNMN!)(>&Jd&?Brvc2qT@bJaBhb|?Di`47E7BrFC@>C3c#N>zOiZyHd#6tgA3Vw*#; z6^nK@B)=^T2;%~`8CUQ23Iw8?diVqv`nio>Ph5C0n=VHzoXyeDempc9^wMaNdbz&# z7y~3(V4lIs6kB6f47gi=Ie+M+N&V^CSMb$p1W`2ole^a}qK~Mm+XzLBNvB}On7YO` zuE6z#Lg1>8ZF%m^uHcCJ=NM1a7@PK4f95W+5`f#FScmDx}r0J9<=U=Km0D>_=iU#@H+$Bk<-cK57hml zq{{l|(MF!zB@C{?k4IuZpo%8BvB^RoNfb7cQfTPS#rMANF*oj;6#=r+fY2vaFTxsO zV=UazWcc#~&9I-(i9kC=H6!0a-Jlp4_Lv-(X65yBKdC4nv;)#HQWqQ^C`vt_W!F7J zeUqwN=!WKJab+(z*9?~&`(`@f{pcu4MMgCvM^M0@kZ~Gb<@Fp7h6LknKlCotkVe6B z{YSsjF)z&nu0sy5Se6Iqi*7!Ji<|NK9ukm{mHQck|SSjP*-fL(vf839frZ;V0Z{>Ks;GBjK)U(FbNG zx1U5TA56LZB#AH77_WnpUK4#gvDa)dB0xP>>A={Vh_5SChpGMS`DJ1CHIAp+Qnmph zPKF8gFj3YV8+)T=XUbd#klu=l^QPBb*V=q56_?;<-A@Dpr9Nj~C11Yi(zw+iIR@2- zerMrleV8KC;jYU!pPp!Bp};`HqltIDpkK^OiTCMbeg>lC|T z*!(_iBK4{j=Krw7bKSmrm{>SRD6W|O33OaR{cwS}54u^hS#sB+85yWt&mBs|E_krg z;$FB;GiSOP)XsPu)J{3d-C%-tG&rqWC)wqaosY+jOiFPvFx&%Ua4C4s!1R!?4bQa+eDkdiSHF7SmtY=D#4+z~3EEAO8ym z6db=*U6m@=WC3+k{=$-enII2Jd#5cnXm^A^OsNW1kUR#R>rp>k;6sr+X4-3Wr_NTW zcKID>h%XkI*nIw_Z6&RL;)>$Yxh9YcFgiiPWEUD`8?JaLy5YK;L`CVBGlZ=IkytK| zZCgCo;I5G{ACKY#;HgGMQtFZ=8q5Q|1kfb3XcdM>26fuTK3-<;tOOV zYSQheUd}oAI6wNM`K8Ut2jkbY7pc+5NYEV{u3e4DCAd(P&eKqe5q>6-kMPONilc)O zHQllhhg)0P7p&r#YSsbHCbnC>=SN`#((%AQmfpu9F9M<_ zQ;<*KQ44wQ1EY(9>UXr#FDpY862r!$vC&^Ri0xEpOyE-8 zXCR{CHL#MK;zh3{ru zNaR$mt=X(Dk4p|-(mwc-f-yFS?}5yQ2o|K?m9}|^(smvuHe}SQ9ren*W45ATtje0v zDlOo+urBzx0yHNtCF@swZUt`?mMqA+F?u4vAeXV!E0@p;qgJ2~{pr^RPDW$KVrDn* zG;l~h6mK|QJB}I4K2^{e%kJ{|=Gr*e!A$V7l&S_)VVzy*(lILCI3D^aJOPa34J^2a z-ea2?soM17PQFNdhz9cV;m|M^TVD1Gj3J9UW_sdea4f$hjJ!qbswSIa_DJy822Mre zz&qT$dtwfl z+ZuB#y{9oGiQ+1J z)ejMY$rFtclsW`_`x}YIubFZjH-FlmWGjzG=iC>VahuIa#7mqwv}1G~I0n%16jHD% zd%@NDOXsi$n8!4Oif@SXBxFB0ro}!tB)&1yH2KW6|^$X66wpStQO@J!THb=$2a#XXBwf=Yx)l(I5AYmwG^4aOM} zR~wDE6tMtA!pEvoI+@)Ex37|VSy9!sjc#tl2z4y+gf+AnmZU8s^I)9v+PTBxrONZ^ z;s&$f7oB+r_j?4=9e+pe!hpMCC?ZF}K*EU(e)^JQ zew}K^$~$8tYD|^uTF>OxE?GZeJZg$%0{St@blIG-?Rlye1e*~nyEOcU2L9^gz)Xi` z%oBCx7s~KeZ>X;kUN6H9zx~}A+MX+>rUtsvO6SW+)xj+;`=QEV!8igDfd96Pf^-;U zcvwyldq9bvO~8XIqn!Y1XkI?}oMU7D9vQ}F9{~ff&H-v)mY$G}V}4sNj1It>CMFTWdp;d(oK^*SOFCgT^C3SR@Fv3pI=2u3_gPC?ZPD9k10t5tELH@%s- zSrxfFzUbPDY2>^_A)X!7Uwr{_ef3DDpl?#X-7=L=-wSL{i`x5moB(5CZ9%+T(x-~! zw?XaU@stFtKy+W$?1vQZ$SG>|qM|d8@^v4+>s5?qQYQvqeTs;HWk&f4qyW)aC2Xx- zk+1`vDVcn|A%a_uk6)JGSpwMW^^PrbwuCj+zXZ^X z@ikU-nvP|Ex%$rFRKp=Qo%Uia`WeT|#2>tBlHJCBfECJ+ZLH4DT^IET!`+~3Oytf_Ii-VHUIQp-)HaH>zs*$L9D~58dlw$VH|j!5rqIADs?{Az^ld z9IOrGnSuM9DQ!%;A#3l?r#5&p>RK#(R-kpJhGk&+v69Qsu7lmrls_Rzx+1Ow=B(;n z&jRLYT@5d0ilC+B zGX4S*SD&%+8wsm!_By;z;j6wo^+x?tuh)Dl8oJgyb?L0np9PDp-A;tD8OFgn1Xst# z2%(C@KwHLkSln-CjrSp_Yni%=;)mzZicJ}*LF``bE*F6#*Uz~EeRGxGG;X)BxFkxI4 zX7BOy*Fy?^*k2pu4dbaGBmU3FLs;t5a3b}gVugF}wxk!t5i*{8nt;CNfL?r4>9CyBJ>(qH zd?UM9{XMeN?i&oYcRWks=f0YQmkSwu@NleCKQhiZ53;CL0`BArMWs&9JY5OYl@;MfA3<=!R(JV>kPSTXM$aEAi92qx< z;z6eGg1>u9cHw@HH2;o5#=ydmV<_OT?R_2;dA^}g@Q0##b9?6hM%Zsk?UNmHX@-IC zr*&QJQcvV7>RnP;!*&y;<4a|tR?N>WSC)3HlFASV`Tbx;4n|c5+g`WxnFTRgvsP}x zJbf?R<;f$kcm}AU!&e&-5%7p8BYSS>@c+tLDJbGuV2aH61iHnAI;yhH?APF>V6CKD z^^I7wRmK<%`*#iU()S1;C+yGX-t8uD9%KK@Uel3fghf*dY?rE8e^Pft3GGbi@l#)U zKNLCidc>zwKAl|FLNr=@)6R%4XY`6N>pP6X+-h>c6QDl7p*K3Q#ftiy`kSAp&&WDE z6*4T4`xjYKUB__9^?$wNRf>R_BlE7;kkWjlRHTM!>nskw=?S)(7g*vx&i7Gxm5LpO z%P*oCMv1t5nkwuuf9R7EmAub**oFjtlI0Qkf?a2$7{ ziCHcTgAy+T2g!7MO33Xt9B@PDyD4FxmFt|IOMa{`y4mz9TLQjXkMKrj84|Z}rNiO7 zbOYDh?R}1VIIFymz!=5*VJQDy+!;}_mAW`K9OgH9dlw$#X9BE;MRp7MuNcnYx!cBV z^k2n|7u3p>{O%08nqVq>x8cFlzCO~i2kV|r4Ye|OJs!YJ1_ck3Iu;F-)id_iS?T+t zwYMB%w-g=3Nb^23Gj;qbc`AIh3UL6L^MVxWB(F9ZW zYI${c>p_>SLqc5o5$%ChF7x2gy8QaIVdo8N1z|22&Id&U9aG_-?(!YQHott8jvup_TF(NH zv2oJOMOSFLyga$9VX6WQ4sHjDB;t02l9l|47hG#6Um=;ACOu^T&PGcyu)Dx z-`L!7(MllI8o6`KMb*iFQ&fNT=WgI<@4~>5bWl-yQ`_%!tBt&fjF66Ul=M7r|ET6- za#i1j3(DJ|km~t@i-^mGZ}_J(^Ig39WgPj2I&+e=PTwz&JM1$m4qx>^ghwMYjgcCh z61JYjJ)TIctHm{h|D-wak>9q(SXdOXN^2q{r*O#xkL>5LS05Y}4O^zS#97yekE5@s zN0bSY#b!{Jwu90p34=A~8lO-Q2bMn1uH}1PcU@4en`ih)@5R@H?O{YabAjkvm6%xk zZl@xi#h?_^U)sQLOAp=>t3;;P{nx&nw3xo>AsFgRgV22kln=tG-JHvs5Sj}xu#T(O z=qM~St$B>V!teTG6ThSed1z=L*LqG%dWgv#U(i~V6SQycd*S5MZ_!C5`e>=&jDp&< z?dbKR=PL|LqfMaWo8hj>@U@HMGM}?QW}vxvo$k%sLG3Hbhec*M{^I=~Ycf+;pGaWh z64o6!jvgJ^5F5vldb1T}%YGnPS->A4>Nh+UHU1mc#$=!anf@~fl7ay{? z$u=iUs5O)UOzMHm0^qUhM2`x1gCeOd&x!^H&^nb&;VaaP+?FIOVv1dU zUvLw?>V=?18i+e;3}Lx%{m0HIBdInH0?W9gI+9hdX@1%QYNy~hK)AWU5 z+xhwH$|+~Vb*_({BB8LKVtx4>bY_Zlll3Iz*WP3+JlmpZryt5 zdT(cSBSjvIwU<$k{s#H0oVB|K87jRTS&%4T7_^nlC?{Xa%rV`3CB}z%&{*mGiX_qB z!K?Ir^vTOPMmoGQAkRGkR$yfzjt%?e7|UyPIVCH9w94ss&Rr_21G&@cQYsz!K}K(an`)##6pU!~zZk_Y`FowaFyOA(KaX(KZY+9`N}afS`a;3*(}RdH4o0j2fQ|6=u{nw;cBX)h znoDY8nKI1JS2Hb9mJUtWx0$PtiY83w-WmrNUzrQ2PvZX^-S^qK#eGivhEMg~&!E() z_4}|BB;yy4R<$$RbV!tbl5Y2nKR40c&*ZlpQ!)!J!A85*iLXoWi??`A)O{{vqu!ZQ zl~Y4D>}p+~;}l{Kde`3gSFA~2BtH6$6HM#kO>}Rxr3+iCkQYJs4N*S|))yk=cX>^B zr<3n2X)yQ9Z`+OC9fQ{3C&-v^dq&NEeJA#qtJfu&CR@3rsRQeg>AT?XHZy2x$;`mG z5Zmh7`w6RelV{ZN_+;dXS)x;s7jDWth%>)as|^O-yHC@R*^(C*b@<-ftBdilE|c3W zGb7O=gS}$UM;QBePB0wEE6dIaW{*o|zuwnEwF~az^Vi7bOFIT3raLM)=I)1VYNQIv zJcmdR5wUxnX}T9^y>_X{URJQI2&DFr-A>1pRwHtD$W{r$KV6vfe+^VHlr9Hv_FH(Hj|UiGmyvxvPasrgAwZN6Ao@aFJi>96N_ z2_-dZD{Q|d$754g+PxHBmKP)2v-O{cR_6=sM>W@QFWmPFYlPV> zwkPQCA&ehr`X9g^?>!g(@9Fo+kKVFJxj)chKY%^n&3oYgE_yHiF~aro1${^MEoQI$ z-!r1ILgPPqYozb2j1j?%EzOgAPJ>as$@F<=Q|r-vJ20e7Bd1hAjTapEKixKUa&c(6 zY{n}3o?LsxbU_Zkd{9eZtxzN#@~$)>;K-edu1$i{>j)j)H|{Q+2mb7I1iml3e{S4Y zrz6g}k^vI~HnExquz-eP9ZnHa+?c?O{=&$~fSy;~-3Fkl@aOK~NS>#>Z%gHP+|~nI z>+K{Qn#jXBTYMI4hFy;AOx^Da(>Ghq`J1I*%z*z*p^EB2fI_)qUIb^0^ zL!3sIHr%}yr6(&6W~BeHnNI#iLsaXnWA=X*Cm_8S{YNQ#(ZA#O8EI^Xpth%`u1%iR zAUd2&K3iGPw=H`%zf;`PE!<`X*I&2a^xA2VgoOWcUG`^_6N##-0ioT298$O3xeV(1 zrtZ5KC#5y9?Q|CdQrjbHyHXA%BqhMH*09o~E$0><9d`_PaUvMSO8Y2Bix2%+5JueX z&z@a*W;csZWYX@{q`z!xFcACPYtuSf3~B>Qu{*0Z&G!<^3iJAf+Pn`L>tk^rx3d9V zof#RP868ZRS+9(F8>pmcctYsN@Fn-^qahdHY#HT!`DzA;LExShe?{~1N7?kBNH{Lm&&a<3?Wy^EFH3r#GQoZ5 z`{m!W{vTrgbNTzsdgv0|UitScACtmEsP*ZIV|(TQjtBRl??>L%ew11mQVFQOuYiuc zK@MEGtJyt+9ulI=S+H`_@{(=NLdor`f&NT7fh;Bbpbj?)OAoH>E!@CN0*Ovd6~`F0 zMVdF^N{NGP#_syOzR`;Lrx_;7 zI0V))Rgye4rbZa^dpqV`UIT#?zoHHNw&dpdiDJSR>XrwM7|M~D-vuHt&Y9jAm}A?8 z{5>-AI|>Pdb7LQm6&gKKY82q^!SPS{M^P9`6twBsGb|l_jy=v`;4`Mx^uUce)-%U8K-nP$`E8YtV$7e)+toe>-*B{vV2!)}XcCskuqh|HY71X|f;jTM zb9RiXk!S~rQ$qP3CVg@VzJ*h$*|aF*BN!qPNda9Q5t_hhiCA3>8Ka;?u zcpRCrH@})&3xfobWQs&>ON-Zqbm9wnS8Z}nc!(P>2m?ghqz1uh;BlMHqi%U*$@-gP zNe(08gGTLPpvEx~;@k{0gZ|W<75Ew&bV&`y_Ib1_i|}bgL;o)@ zbD`nF@O}#9pF?Q-WU<_b-u!1eY~;^$KzbkguEprLv`{zvhTDbMYvlZuXwL*c4m^LQ zLtZ3hG$=LZEE|zAjq5^wD%>YWiQ)Svu zc6>B2q?68$j|P>sAh!DI?W2%?;&*=~`Hemrz0QW#C+SuXHluFVTviUv{=wld@1JKh z7^v~&uJPLIFZ@JW@K0I?3k#(cnH~!3Czl=>g_*P$@=1Xz-JG^zkL|4;2U)#J&dy9c z;AcK`={;ry$yM%*!`Dua?dW;PXSBV%OMlseerLUeA^t$Eblpsj-}Q%D#8Y!3uQMBd zVfjb&=LJ9**o($`hdqCbQPq>dAs~kvUtfxEwBJlbi=ANAEKQ~12`GD;3STu$?TE>( zZiZl04Bam(O4y7z9U4&*0ceVpZLgpm)(}CAyom4E-%xo`|8@7$C;7H17r5(VOilX^ zqp?n6rm^#MKN|DPo0@8DV02#d7wpd{)hZu8!=r(p2viVc+AKiE|4Z(rFz}4zfuj2# ziqqFUX1hwIqSx*bAw&$;1T$|`r@rUn%D@37&mXeL@;$PB$*!mUpjnp?PcgaMPgOYH5VF! zk~2=19TRbM0xTShb+Hq2{<=e*Ea8@Ewm>*f*BHerLR6+5BQ9eI*k- zf!T~hnIwB6bM*GO{^#$OA*vNUHQ?^B8SD4rn z<8JXYq5*dv441SU1zO5k@lW7yn|Gr#=eL1!hwRRTnq^2uad5P7`wS63_?$iZP&uB^ zA&@4HtA6Yge6Z@j*yNC~|&e_=jHu8dD(@ z#O#;n#9BH_0I8M_(PnMQ*#bxDVpoqR1J<_mEZ0H)2#d&YMcF48-ZCw_B`$F>lD{MI zC1Ml0-F{fs_<1bzw=tE7+^YY7OnntL=`ocF{sG?d>pj`kN6;xE^eKbfVv)N68dJ{< zNGEJ1Dz99!oAo1F#M4;N3>_C>`M4O&6vXbIXr2nv(5Ye4)!2v~!=^Tt$Dz6sFknt> zUUGF@jFJI>XR#cIuf9N>`Y!yh6B;hikP>2j!TN1UOmxSYn;w}d`d9&sl_G!LhiR~> z&Ml5W=HoYb{|^kLf6Ma=0vcKos@IQvUiv7IkU75^DBl?(dR$s^`}M&&^p6v1cL>kO zfI6KI-57urOf%8CjlqGeTRjoH+YLIEq+(6;LnUbHA9rrDA&>_ax;73Z5j!ZAOjpqisUZiVf*t`zt_(&D`E zSl5{PXxsjRHr!ZU-QuZB%E+WtQ^z2ayb^e*SBdYEbx*aXOr8Kvx~SK?I5rguvYbeh zlGUEJlPb`cKVE+w?RG}P&bDHEVfto3azWfWa9VXsMb2(2*hfANUN(epazya$&bCtV zBC>F0R{mpS!~h*_7f!(XNDC&cf{EE>C0YCx)v^U>Zj7euJJT*C5W4)qG!FLREbD=r2dW2(-ze5x^gdf=H3JC$)GkBeJ+rrm{M zy(kU%Be}AoZ+8WMD3D+8IsS+DeHa*9yW;SkF$WXty!DxxiyrmP+}$6A&*~4?aAl)S zy)v=~g&7&IMqcj^De4nSBW zlYi15*txW_@a|DSmRRFU|1@r~P_LJ{OT3e@LqVt9Cnw0dU$h0tgF;dETCqMVcNlJ5 zRZ0>xjkvNAejjH$ZTiSLV1U}ERCee1waZ}&-6BM#PoOaNqVhieja!7;XSVSUm&iX$ zyl_Zm?o0!@x9ZC>y-eS`Z*;ZJZ?;tsfkgeS`J83d_dPiUY|qnSUv2TqO~t%zRenIh zR9Fy`_u9_Nf~*2X?zytSLxagU?(Mg-K&fzFT|`<$P=a4q79lz9nN@=i@b{TD{rOJM z`MYZm1lRG3_;>V^nDd`Ya0e;O+fH4H8d4BX9c%wYrT0$WY81cnMylUjwcN7?@edR` z;-*F#LOZA3fB#LypUO`dXsbt>&&smy_f(U3(B6b8*S?!YTgfk`L65$ZJ=LYB1mx44 zWt%wNjoraMKC&4Vi3rB#olnImze)(bCJ)!=9JWKiaB zKI5TN=gL;%eST)=GgVAv(V~5LoKjMlt12kxZ~z`-pyK<<_}0tkpAkPa;;7zt2dbd0 zqvI|A`F94(Mt40)li#%6g)7Rr&Cm}%2CbydHr|n?F9WH~a+@BUp_D06yb?69n$Z55 zdZ8@bc)V|0UzBWI!$)Yxju4)OJcs_V34~voc9gYx>zz3aI#K(V|)fyRHgETYLek&Y{i_tdrTHvy`>LG!Ok71kwyKaH=@lX>wqHt z1IfD=K?&<}45}Ni4=-UF?#%ulKZ=YV{O}S_Fpx|iY0Z|!j6pQUY z@T~p3>H+O}pPP)ivGeF8nd7Lx+=W@6qB$O2^ZC8jsy^j_-$1`UNJ264#cb${kt>)! zabJMFsI2^vYTxo$GGD~)lhF_QNWNb6$jbaC?!T^jtSS|@&dlk)AvA2$h_B?aamk1cL45Z=-CT>h!(MkiQdua9F0#DW7%~Rrz z<+m2gEtus9PV(u$Cuey;q&mv{wUeii##h8M{gm|q!H_hDpjK-2=faP$?2oM;628Cp zs zfp~#j$$ewW<5_{!MlDGB>iCee`KS?rj(f?W6EcU2+Sj$6WIx$T74ms*MyiZLtIn|> zS64BnZPFvwyu&w4%=N z?y+rtRokab=Nl_tpDP>}Fy7zN_Tkus5vR<)dITO$@H5A{(kPWgcvsOlj&d+kj0VHk ziMj91htGMrK%n2wCWCyz?Vp=9)1KihO!3D(mN3$`>rT}+5iKwI_&cv(F+b;Y7+8?+ z(pxvLO4|K?Es`5sDh}SgdGpi=!+n1{)svdHV+KLlt$Y!c_+om#bQdy}>$77md64tj zCy!ZdE6ng`W$m@5m z0_Kv_GeX&Cu{5v1*U*i3YZ(~K5-2@uKdpRdWB+;gTJGNV=5OLa!v+3Fml^)U?lt&c zo%RDS{R7zJy=MV`&oTj{M=p16HUnee)vEn70s_TC;fyqYez^!_Ix_tC%16$D5(ZIX z|2|R{r5_EqfoMz3@vX!WOTCU4J*Qet%6M-)mk0iRUbi0%?w-9rut;xM7*gb)fM~CS zUU+P*3ha{y+=sp&dDr+sDNCdhP~H^@1M5Cck>4GFzLvSFdnt0DiL7T%sD!Q7{Bj(4 zs4<;};vDD@RAar+-@~!wSWe;RUsvi!kW`_~Rrboy^VYqzmv45yb2mbSzeU=w_PnMa zdycyEF7#Ut|NJ)}XtWtzIkDW}ol;n;x( zUOVJO5@$MEUQ&ebT$xAg@nzTd9?*uH>_~A%EjD6Oi@LnS`lQZ?&3IzRpuY+9{y*YX zyH(MukYq<0JnhDr_d>&3j=TfC1i3>Y;b%$^wVqG;ymC~k{-BYLsBPj4i7O6U7n|Pm zjtJs%QPhB@q<5>gZgtYDWyf4%3An;SHO8J@XmyiIilWl^>2_HA-?n+fWT)4}98z(1 zw9DC~55oGDY6X-F>*fL-rTHEBU3x5vX)g?#J1^E`CC7q(4s-s=;!+ls>2yN8foA$b z*Ox;V&QYn4J~+K8$lV0_@~RN>NIm|ax5BklUFbHw5baF8w55AqGmBrkZf=|5O`Km^ zJX2ued6_KmPn*U$@>h%@8Gpzw0YjOfW%X+hO?-GRrfBDao7# zCfN(uv5zMeGYed=Z9U+@BL|rde*GSj7Etk*0uq2|O6Ws037}gS5cfdtM-=dmno+Pk zW@r<2+9xu--!(BKh4E2?)mtBj^Mr=et?iL8-s>RcRUIaxk(-DEUJJK_tPw78wr^f6 z$fl6K9qOxna-JXhgxPD?XV*Laj3w5vF3-!fDXgY<21iN?2(RW!WX6#`AwF=L9Qf)c zKWqWFR-6#sXs!@@6MHLg@MPHhp=-^H3^8KH_`Y;L>>yPn93wLAZAQ&~3p=6D_hv(m zPp3xdTe0iX*;8Jfhv-<5@C_4&ZmucNd>i57ca&@utPVb%cPx|Zwj@EN zJ6B2LBMRLZMGq4d`06DD)2=GzmQyytFy_Mk+Z~=h)nmD%93Ja~%PKLL{y4)O8Tri2 z(scLxXyooUTZMOS9;GzR={%#ko{@_C!7T%)MW)?L%6tRr0bb12q&u^|iA9>B>m#xq zLC>Pz9Av^~sfr_rd54#n@fx(0)f1egRkXvQRa=o0>y+(nQ5Z{3S7Mlt4)QUCA#BK5nNksJFvyzjodX9C!Mrz$m=ZUJaVBzvMCUO}2kog}H}<6DwzrhdP&h zkeXBSPVCwa9EkgIQ-8-W$y27vFiKy6#;vgXc9o6$0%9hi_YTBJ+BA5lC_{?5*p;mMKQ(E}o&?yDF)B9@EE zvn|p7n=>0i4LCw&T_ek4J9XUVcr;(jNP1nGm|$`KMps1W^1e z((hyI>xXbJaUV;{-^inKA9_7}X`uj18^4pczEt=Pzix<~9J%fp(BVRH0LwuYbkQ){ zPhTqa=dj$ICzzJ+=NiPD6G!m1ckP&OrI1*UxOnp)`UuC~_pDYNtfllBxOV4yB~`zE zXtnR}+bd-I6E zBM!dm27NENWB89o9H{E#jCkstqHSEPt>H`D6r;$mW9?SJR&ndJZdlZDj~84}g;DyT zpVQiDt*VS@h5m)C!r`;g7`Rr|jM49}E3(j0tAdyfhax}P6dZk5&vxkA8-KH>=$8KB zis7^;ZEX@1z8H}}3#kxv;qhygxcOq5h|b$Y{Y$oyHT!DRuwSNQw2CeU?cTq$|KB4z)icsdbh;m}ZeBduJ&HJc=61fp z)Du0U^=i6YXLGs<|maLqbXC(YvDAiCDE zs3ICl^Yw$p$GJ1x1o&_sr$Rphx5x9}B03mo*K#_mOE)ams4{Vm_8{TvE8LY)6F${v zwXRbL%$DOzpyReYGw0M@=~uCRl9PcqW!%j}RqrbLJkTkHyL`c`vHv%S4pZtGs{ncI zX#>3BidTIVrm++BBYOxA`u`cf?bCbGcL@I*J1h(|a0|E@PibeSnyzA>i0OJ}FczFKWxW+&^rn%Np~}fJ8OvDJF>9%r0eD$$mP@ zj3uZ#Q4TGUr9VoI^MWtQ1(Ek(D$5{$3x+Nt0x=UMobw_q`Upqb2oQCs>(OZ( zWzh>|BSp(5*MdeqgZDAR8WCecr|b7KSM4db?*!>TCqUTfFh=qGISoL+4}DLQ-1mI@ z<=>}x0I^Sked>peFquicK91>c7K62=XD%OG_2u)$XdI4dyr{%J_R3^Q2fCbQBkj93 zF)nwy4x9N})w(O(_Pt#bM@U5WF5oINIbunPMbg8&6?SHLs^a6Z=phw+ z$}o>d1!2sDhp@LzL_N2Wh1t(*O$)99<&N`tp|NI<|$_knNaa}eM81r$Qq#k`clk}^e zpaAxsA(|)3YQcB=mO?9s#?%gkYsM>ys4Sg=Cp4v1WR6f|ZT#n~!@i>(VJtS?W8HL{IO89N@{r&Zve=}h#%ReujAb+)mzK7DTfz{PGeqAj+Q zsuA9r55WF26LZRc&JUm!B%SPUvvuZMFV!{Bnq^$Z9PbL*Cl3-CMgm{iXy5F5i}U;* zRV|wn@e~~FVxLSP&&0h9e&#k=|KZFDo5YWjVK#R^5IE!#atMWUS2M%|>NrcX zKILysb(PfCz5qGHL6((HW^&|JwjAR983X-5AKo%Sd-~n<)6Q7pO7ai16Deg=3Nvr2 ziBl4WNRg&ud^i$dIDS$$=HdGm`SMw9!N!^!q zGDTDhsmn3BL}|Ig^X?e4@#w-Yv3lFLLHw&pVsvO zc&&J;u8&|~!w3Cr0T&?bbr&~P(W6YDw7;u!zUf4|n63vyP_bq3Ga060$k~}bsdNt7tg)bD? zM0PrBnl!DV%?}a`J=nmNXltnAp2{r8LBr0I=emC*8mRE1aS}5}&*G$%$L|qQmE*1r zHI9@?4id4C% zu=1KA&&@SwcXvzDqqj5kX6VRH#Sh&+h?x=h6l3UMI68ry;PRO9YW|&87x_W=LVh;e zcr1sy>#m0piq~~CNHI(D#au9sWK1i&``YtRM}~xmet|X1#B;JlNzZ*DtHfkIC`f4P zHBls=O?CcwkB)q>p6g@9m`mqAZGE~Y4bEd+?lcuh6=b@C&Fd6Vpcx;8n=}vm(jlzl+Rd`ouZ7YAGUIB_z1Jgb>`_gL}}BV8JE0ySvVN z&di)zyWU&Bd#h&ntE)fjd6snVmT&i3taT7RHN~^eUr@=2I3cjXI8lsMEt_Tf3ZyW? zTbl*$sKSWvgxR7|HiwQ)>lR^VkS?LC&0V;-2EV?AM`*0KAx;3r*bZi}&MpD191al#2 zSrYD(ChXXBpX4&VqnE%1^3G#f5x_hDOzu=Y4L3w;Qjbdi7oG2Kb04ybY47r>cq;sB z6w|ky5>H}PTi8QEeSLKU5(TZ3yllBBD0j;&tMUkp7EWQ3N!mFXlkyVakKcXbG9(x1bHuRu z9MYX>q<@zn$TW4-)R~nhDYoGYe~K4MIQXIHx-LS$y`Nbog39dNjqEvyN4e)t)gp-u zl-~5DzD9dIATIk}a*b+0>ytx4Bw>#HZ(RR}RLbWon7xOC46DYlI*giG{+GxELV}%w zg8ekym!c@{H;9@7U?)4+(}MVq7fU|z#35<7gl>01@xz)-=n7nY>MUA|02S4w4T)*v`^CsYHB(>t z4xN6NMaOevQhaUv+~T-Y0bP7JzAldqmg?vaR+uVH&##uf1i5_AO~R3LF*cHhhH+^V zNxL)aFklo1jYgf^UZ~9us)gD5kn$#E%p;tWK}DF=*{jE;@m*=?hjP=2vyv_AAquHZ zd1E(PuHw{&!yWzl^Vq12HlSk18GSRN#>uFZ z?3rUpx9h7ivyOfGj{++T+DMyT7T7MrB zbMQHYVvVu58=`#kNRm6xlcwaD1%T%e52qQBMg9kRqX(dD?+^INb=pdqZ&>Sat zZ|YY2d0|7fIt84oiXN||S#&4DDr&@4Lg}g9sYIWfE~a970O6H0EK)r%7l>A zDR2GS^DSS0rZJG4SYR9*9TLW-`A^)};it#C^K1Bj;6DkqfGN+y=x*13asA)23tLpM(6Sn_i`ugBTIgFc>xM(Z|^rDYVF)b(%0<}e2ur4 zp;U#~_Tb~~`I@5b>YU6Ou1(k}tZm+A1rcAB;gzb)`uOSANjWG0`^|ooxcZV8(7)R? ze!csFIy6qhWE8Y+*rUGa1@S&%$hA8g)t+Gv#dmm*ttCoEu57D5pq^rkNPXlFmugw? z5aqC`7J9D$Tp*D9nuu$Or3=K;pco{+H6mprZ))fQ^7-CUpCc&*EjkksQmD*OI+vfj zRY0zAP$U#OG#1-)wa)hCD4Jf}hDH&{XIoszWB$hFQP07Rg=wR55qT^@@Z63e&jFjC zBj7uQTfvIV-nhh31)x6i>^^HD2te|!i>5T4_~mI|_d;$1k08G4DUCpId2X8X&7O<5 zD^PEB9{`QdGlKDTuP!dDEUzg!MNbvTea|*DCH7+p3i{qO{pzf|Y#Moj_%Kn<)E#!D z)y;*dBPy|M`P#9IT}mrZf46hymGC)=jm0~)$G7Klx{^~ZAINP~Dk~3|kwaMia7pj` zHx@cmMMBhX5vgXv&6YNWX4aVrsV@W(T<|A51Ryn2CApA2Y2?Vb`bTC zNIiIp!z^Kbwa%d>!v?-@@l(NPpgxiRI$$bEQbxLkHaCkPn%IK7gbK*Z=mX29Mxc*f z_-q7X%`J^JuRT_PTzsZ%Ey~x)$`7ip`j~&jnOnwH1IStWapBcX>$+9M(~jN4bA6Hi zc{3Bp35}xZ<=$G>(6#!1-2yrE4(T211No96nkVDN*$rn~f%0j+Q;h1RUk#8?X+8Pk zUh&Y;|5h?5l%}d03QZIM@@0d?{)WiD88*#Wy*2XdG8l0;Cm?^w1lcP6OfOjMYM=md z`m3^OGeMMJAighu>yte_?69s?A;GDDq9}nSP)~<>ST+=R%`k*ffw(`u-v0NVQWVHL zdykBs%89{|-yMGUqG=}F^fo}mTrgLEjcK`!>T2YYaeTr(YdBR^;RNdMG3cU9y+fg6 zwS?aL7&U#HOGY3 zYHy4TqbDAqKBux!Spr$W{>iS`JtW_KB${Vt4#;gN@_r$LM99y)w|<+wP&6kvwnMXgH2;ni& zaTPEE{r8bFPYTC*+k}46RoRFAG~T`2{(r*I|MyYiCxe=&%IEZN0}b7)^Of+G+%F;e z56`Zjp-tX<5$oC$g^eB3$vdy(5dCK^Z@XuS{`Ft@lA+uyK4hcD6Rv&W`P)6R zwtiDB+*+AYGS8mvoH7_?Z()y6i2hYygkXCI{duiVnyKuf!1Io3{3`^Ph6}z;r&DZs zTaLC;@F?dwM<(_Tczz$HgQsVHs;H9GDTc%Hw{rZca1ie>2~}uxZ#5Mu-NtlGxXJjn z%cpN;0rlC^uh9hft#vg5&`0tm4WmMfb1*=ze~7L44fElhK*cc?!Q}lLXlq3d#gx+3K=qg#fvCY47<@FKZk=F3Aeys4M=RvO#d^x}>naBKc5m zbh7~v4bWR9~QeCG{W~;k|qHWL>(k$-SnQ?w=DrAjhsv z7_Z93By6vTtvo4I&33v-@B{KZ^5GJdhigi?1wILjUnkBNTzEb}o_vQMo>Xr>Z!iWk zp{uGc-HSd!2jpMlP?+Cwf8A*b&q3M9h?B{mG=OOTIK_D8T@O4A>5iHg;b=ZCv6~`x z0`=(SmEpfL_Fz%4bjE)*fsMZe=ls9RasPdoLU&J>jg}L?w%^mCp-O-cNHQb=sNbSW z-l*d5GsDQBT(N$auPQPcCl2I9k!Dn?jZ5%99$!hhqsx~&d*vYJPY$=dfrV^?h4?7V zI_XM&4v*m6Pe8p|GSr$vA2*1&zc3(dL`oo5!66RFhkvGCdp}lrPzQWGBg9RR7UPkF z7@vgv@XJJa`}>XCE`s4McxTqZ@D*ZyvM^li*k$0jlojRej9lj@y0K5z4fKyYe*Pn< zP4$9(%A$wZXFChWa+nC@VVdrimu4ehh{Ljj+lVp*1Ae0`0XeTozl-EZMzpU(Q+KYJRe=y1zNaH>MgKY~NUBYz;D#+l7j3~)kyEFmEChZr9S)Rey#{}xqV zyF+5ZUQ6S@ox2bP`UeQ~j1RYj8ZIcFkvY%8GJ!uHo&fpqFanlee{a;M;EAeXPUXtD}AHb~X2%9Hmp%?=>Jf*Ajk=tTR}Fe4t7 zrLf4;L<-FqUBTD@^-Y-p*thYa^v*DZ(BZtalHSdGhCm+Ob?}<{Qt>depZKt$%l$as zlw=L$Wac}O6Dyw-@MA#N(w;AVZ*Vdo%1`akR#Z!Ci;D~D0Bx0tf=#rMVJmAgXda029lZT;DHg$M_LQ(RPs;Ko8_}Jm0H_bl;n{yR)ZXXCyPWHKV{3kF zQ*r?0p>GGUy6w4bCkoZ?@vO_V7dIIpo?n3H6M8e5{YO7~LptuCiH%g~d=UNTPOwD` ze+Bt0&NPd(`mRm9;e*mB(Er^3oY02bO$p}AKb6cN*6m5f0%Cl~4Ab8?70&=l@glhQ z_Yw1VcYcESU{iSp=?4RW`A$QB>xM&E!uS$&Cy4rwDQ6I645wMQ`B@8 zc>Xhzyk^0bQuW>1ARC#`0p=_F7^E3ymF|1wgSY9EbX*-~^>0Lk1-j!SaQ4ahs{ zr`ZG4&JIi(a;88wZuwtfUm^OZk-D6P$=WPFPXWWYy5=FjjO6Gupx)?3vGd!ORtd(u z!{Q3ewzj9^8ASaTq`yBw@Q7$}ob3OFmTG9~+xh+tP(LF4J)KPV$TVqtJ8e$euZg|t zf)dCtM(c7q;Sd!%B_i;~oz3E~KS{s>xl9D_Uq1VlBL|6#Sd>BZ08yDci22Qt1>Sok zABx#c;lWil`D^(_+h&OS=c}p>QZD*J=zV1OcXMAq1LxHXqJFF*R4NEZJ}qi06U52RtwBbIel7G56G%^snx9UVD4?cE7%>M*d$X}1<)ZID`4TU|^q&GMgr6A@< z;*N?|gEKgq-^9S-u2^JWf{q21f%iw=S5US&3G(eYF~R#nEQZ&%Bo^j%d5)3K3s%unwZw_t0P8P7C2yC#@|CjGu*Qzk5GD z2Ed@OC~?p;Ut2RJxzI^JuLAw~>PvQqPr1pO&gr=kz6qIO_~PmUc^xO8p0re20ZY1GmyjXk4Rs5vGU2e+~f=Cug&K+VL5gJ}JHa8M zKz$Sf>%~Va_SF`Z*K{)3=V=`D9*FvFd$mg6nMt=GM7{gP-%P~dtNEurpkBRMSuOJR zpkt9I4Yuiodwr6972@}EX|r@G8TveIRypW$8tL&xCO>!=sQ=%jA;t>-IyQz+2Tt6B zY$^fQmh3&ThCS9z;ClhNE6Z8OaEv4cnqzdALpY6G@dC~sLoDu}*KCiuaG?(*dCamV zVz>KUHi=2ysQ>Z=W)yHLLQ#FGRJ1d-Q!%9R5Z|31w2-*6`?&HJiJJX$u{{k=8myWx z2g_3tuN)gQH6JE>EURMb7(Kp0sm#Ph84=faUZCB-(oOr#Pz!Xk590gj2=wQ>nfZ6L zIa(=OiT-^@xVQri9&~8^aK2(P?(9u`c>z}GNrsswz4HirKVtvRswIin^@65zf{^K)ZjX2WE)jg2+59{G0{VZ+MhN-`nw5{|b3QvUTWk{a}i2 zwC$@8N;|CGc@Mh!z`-XJtMp61<{m?-?7ce~W(mWfDEW5ozdV5%qcv-wl*Ec}PSpvT zzHeLiGt0-H2FyNGe+-o37}WbSi|bSbKSe&pJH?cr@$MpG$Up!F@44;p`+S9Hx%gPN zIF~WRSO94K-|0F21h~%^Enf+ZTh`K>8t^j6)t-r^UQ4~4A93yE5&NC+zJfDL2bvt4 z@m;wYzp&S=J(afL(ET3!&I!tKOZS#~2Qilam;cs_DpY^ZTt}HM<-JeK%OIRta!K~c zXeBaq>O$@Q1OyhBsSetHM18A~Tq zv=-!v=jdEVH58x@5vvDU|9AS9$O^v2y{I-woMKCjnsCWyB7U;g*lHBh(7m!q6fP0FLK zf+UYrZDwF*-iZZP#B|!VOcY!~NjQFJ1!-P!C``UMWyw#ek-_aqjrx-W5n(>>>tv0J zAo_}b`M+FWLp|84xTg)|D_`ox3@VF55jTm@GLq^}^gDY{TzIrbfS?^aN}pT0USujO zb*uiqk8ZE>dc}RFRHhRA3|Jqon z_J6jqklOtJ`^G}DT$pW?A1IpzvQU)i?HZzu0ud%F0}SzQAYu8JoM`MfE&r>V1?0h` z!@0`}W>5!z*wDo&vtiiV$C8o)@c&)%6z07GuTbnxnMLSU@w!*tvY8U*yBBrl9|6DA z$<{a{UZIdQ0wx&msJOUP@zzaEx_A_owuBi9paS0Rua*6*Cx&_F3x1y6WecTA7hs<$ zAH?Sv8P4J854sqiq_}N~c!Dj-vCWmnnU%=+ji!kIpdn8*U{c8@hA7e3^vT72!y3B& z`+*t$1+=)C61RC$e=J#h0{G|0>;f(4W)1(}(*^Kxd`so$e8Lui?3wQ`6Z5v|+SG6F zg9q6ni^F!HardDXWzgVx{}@X6h*v5;g-z-DU$-?S&gsc!v{*B`8ef1SGv%dqC$f=?(JrH19qB()INf31$gw z8)1S9hQTfzFk(pjMyD>R2q#99Z~{fh`m4};aJAfas6r5K%Z{5w=(eDy5N#H>-3jdO z)AZ+_YvFE0*r)ikYpMuZt2^~-raxJwSuBNv2Q}0A+rK%sFXk1=sN0Pu1Hjc$H=+10;xrc+jHdN)#tH-s_(Zq&@ADo@cYZ} zA05)uKX`{rSG+UA*pmuSsF-t{=SIq}hh`{9yUdZ(N~`+DBJdQy$=zCN+Dp}XadE8g z)fVoh0$q4!>OrDsBFl^aO)A4Vw>n>YkR-3pYGnXya?lcB0yDO=DmWSQf#2(&t#pPN z^m&}LS6~?B7V=;fyJV^B8t{Xtr$TImz}tPaU;BkAF1^MrE_Y7->kvC@l0Z z)_c#nJoOkWyRUfu#2?f%&K8X5*%k2o*tWQMet>>USk{B{uMZTohh&D!B+`Bo*Ajx4 z5sFhyoEimaPGLB&e*Ubxh&mZnJ*GxwmUEjU068Zymbb5{UIZlco`FZZiNs^>S!KSI zaNQ_`l{;_2{qV$bL4@U2aiLuPQ~@tw&S-e+VQIW8IizGNIeArXKO=(@vWxvanNf$w zPYk{!x2;O@V|<~(O%krfL@$^7!7PXKs z%g>ca4($7iKnPPyJ<;wj@ul{n2O;B}7F`X-`K!62h#tx#0TcmZhIPTg+oZa!$ZnKw zHldl^tua_AwjY^zDk)W|B@?p3ECl7B}?5YT}3iqJ~EmXK=KX~f`awrVrq@hG;DlAZ~%%>Pf75*tS zE6*(DFCs60pzvXOQ=lsw87lI*rWIUyUZ#^0r)?YcPoue1C?CT-gjurXBw!|j8cUKd z?sV#E1$6eqH9J>mU#x1z$E5irM7nbtJ>Xr0(UQ%N=KG!1LY{n{Z&kEQ32PBk_2AtM zxRrBQIiOSQOL(&SaT3XzvFjzdqbItW9>|fsO8+PGC5>`RtVc7 zT0cytT_4Z3{=kVJVEu>lqnou49YHW>Ja0BE%X{cwqR%plw~-`4Hgu&k{-SprEN>a{ zpa1+y49h=J!N-L~Ir#Jv|7!kAGt)UD#q*P%z2z-Up>KNr_swpp68R_SpxI_KqcQ9x zq1DtbA$NL>ymSwHY<_1x=Rq@fRd0G&*tjRq*U4(coP$on^t6%H--oAc!wT%!=kmEu z$RCejd;(j22k<4na|rYGCnQ+2T-J?)bD!A{R@dFOb}L#zQ(*JWZ!CRU^B|eSs>(vx zip<9>wsz}x87I9Ak|pk7LJ5+Xjl3L&Se@)T1x*otbFv!A_^5P{YOa($Pw^R4c0oYe zahl`@nlFNmjp}DCO#ZFcGzcc8Dv!fm8jT-7>NnmMJ9cToY}6SU75eBZX^DC9G37Hq z2P14Nq)Ex4h#r!p@D5f9I<<*?gkB`aZ4JX^x>Jl$tsXq@eR163zYRrErZI}k?Heqy zpH*^h*&Bx#Dp2}7WxNw0(p_JoREU`hntwa!>so@>%~9+Hm-~@v}p!PEQ{$b;w#<78Glj)A331GkpBFP1-3Eo958VA z_|SRIy+p)DM24<@GZ($A>((+ z-tA=MH7uPLr_cCqGpfUkjj1r1?Xk10`=n4X5)PV)SpMGAIcPV?v=R~C=U~>xr6==s z<<>3tvH6z>gVx`_4tfGocn6&}NC6NNBftQ(d(kCRTgNT|*P;PGQfdFYyr|lBf@T?RS+8su0&J zpOTEWnWQ8TV~1Z7whrpLJ3?sI;u||>$}fs4}k4r-UGJ7izQpY)Q3)o$rAD7a_E%y@wOO3OhXW2g0t@IzM2!kfMv z>Vfh+AZKwB>fRpyo^7`?O)mAPDO{ixbrQ(8?aGMABUFb2azB*&=8N4!QQ|`cjvYy$ z)RmibTdnJ!+X6QmwS!bPae;ajhF2e}h)Jl@y*sq|K%-0=n@SHLzjqtPWg1JX%!g&& zT7OhcENjz!1@bbC5ym=X5hp4EHiVEn0iN9cl0qPVe;gebfKNOcoYd`DaB%;&pNs9lD3l~{^z5V0iABcsSgOwwMCW|TgVPt)ozhMjehVd{nK!4N(Zbj4N z-7)V}l#4NmsNwwS(HtN*7pIxv)J_sVK#mEqvEV#^t)S(CG)t2F3Dcj$*6Jw)KsfW30c zD!4S=qSS2&E?+T|F3lYx7{?bwjD-b;y_kCZo7y~Nb>V>m6$3oK7ygpwJCX0RwA|t#S{hL)ts3gU8eC6lhW>AUCxm$m>_{ zJjD}`=)3n~0&xbxL)6dLq*Qt8cd@_bV^Su{zi)3M;hENy9uneRcCeVuIMjB$ z2SoWjNoO{&6*?Q8Yxjz|54$5FSK~nhJLjTN^3G%Qpx2e*pp0}qW(l2f5rF5I$dsG|NBChjXU?C2e0gw<>~A`BpW`5{vlW|D>Sdl21(0_X)aC3c4yA&E!#`5*|X^$>TWVKf>>Di zZSKNkeWl*aNFA4Zhp>`79TyByzweF%MtvK0%7~Eu)WG)Zyb*N8tONS1tK@V$^&e#m zAa?Nd_SAn6`J4uEeSa4UrZE_*jhK=qdfly1uKtpmB_yAux{!^g^?IPLHw!P@j#f8riYUS4#HbqdM={;UamMG8O?rqb$l!Z z^81??ye6`DAHbdN7Pf}7HNl#&e}Mc{Eok{+>%jqqG;C6fwLBB%b7%^X>)17EiIUZY zz~hFTemxGBt#WsTcs`YMiYXj1nP^>m(250(#wOoU(!zoIJ!b?AkYQl+7G}8!%6xfn z@2qYWkY8^sHTaIl`Q6PL zK3Z(JHNkva`D;iv8OWKM*)J|8-KRTIpzTXp#EClAmBfHtLGsA|ij7O^oaCvU$WA6| z`*#>QkZbow@D>x?Vppew9<- zrqSq(NQu{-kb$&>Z@~r61>}46)?PC8k@yLF=%Z1Ek)*E1Wa7wMWu z%5&&@A{KtO9a#lK#B{qYTiqO9`2@|PURB2s;@(!#LNNgKZ9)YHwnI|5f6+dVb4o16 z*Wq`m0=aEd^ezL!p=JBngrszxLT47q4aD!~K+7me3WKW4)7Ebo!A9jSQ6Nqm_HJijMh6IS7NWi@5a zG{f|`w%|8cr$Hc3)UGOJ{4|9%_r9JIPCzRewNkJd$g_ME57>=P1p<2lKTUG0?=1A| zKrCz<&gO4%449k}KZ~qiNRxlq6y(bX>KDF}!uP|~sIfYusN{V%%Yw1`ZVcqVM>?bb z<`l#xck>q$JGgCplm7s*a8HF>IT4Y`8XvmDqb!)FG-9202J!on&`#Urs3)nNL*(>T zo8Jw{MsFOB8Sx_)$ z9Ww4LgP02t?Xxt;SCGx;-%Pp((M9Q?obc`8WGbNEWmt7}2;Y4l({mcjJ~e!mk;#%0 z$f4u+`$EE5OcknaiPgCez@`^Lnd)VUDew$JQnpeQdu)c-LC ziohTPRLKt$zls+urAO!fd3^z>=NYize9iAL(&~|3Wso)K3dwnV0&>_N7I&ZamU|?| z^C+og6L4!=Rv`FUKZm6OS_=Eo-$TNt-vaCt>DtpkeTqb8=YtzrOJm8pzd#mvJc=W; z2FUSSPCBHklA2sCEA0}Cqp`;`c_8|iFZ`u08Q*7)v`2}jEG#9Kzv)aujL(9;e1A?A zMP|fj@r~u3xA%7V0Ywe;M{q)|?CjS_tP8&x!(EwBEaq|c2Xex-Tf8CllfX&;zf8w@ z4I{<(ht5D=8t$HV@ySZB?Gsq}+sZGy2Ty$+An$g{S=^TW4da@1&haxsK>yy7_4`)1Hj5syVj?-jA8iSt z-yqs=QR$JPE&aWQ%%z8)&RiK-l!{3Z^*g6K6HY_11fliClRD|@^4Nr>7@~fK`2J}> zzRkont{$yDIF6d6n?;5gfAB|hU@9{(h*RjW zE}%jx-L7m5$X$wd>u!cWviFfoP&S!haIX_}aR7N#nM8A*bKiR!d!Jhgf7;9VVdFj^ zcTsz;6u2$;Mq#ObIfn09ECW|!k z>>IAVlrYxi(_eFIKrXP8BmOdlKdh_yKA0!hctDL^3L-u;MJ3!yf2LbvB1}*Aj$t3! z^02N5s4v44#FG_zJh1irl%V5iLNCv0g8=0Am>bQD8(vN^nxuI&7lIkOB*+jqB}I!W!g05+6{as5iCafAN_GGZ9wbY-d8M2 zi7a9c{ILvgxCEf6w{fqj@02$CUl7o9dZ3rB-=oFQ+atw)M>vJ;+mnvgoh6y& z7nfZ_;xx0&tv_x!K~%f`e5Yd?F<-*|pZ!n&z?whUk;}-Ael05#3@x!fw>A~}es%s> zWBSlXGu&*n075nrFWKa2lIf5OHm&PjiKU^c4Rkj_bS;71Pl;BrX$8lFsw+f zb!R+j_a&_TiOIh_ff=OYb)W?g(SrJk;6tL%KB84DlgCungvKgm5k}WpHlJIB=HTP) zRqt8%1q44fK3tV53%*k_U+P(;{!}QDjENKgIaUN(|96^fQvq)34V$2@AI-T=o#^h+mpi9)N-W{p z_cDpQ^nndev2=bv_a*>WJ&7AE}>>OA;frBJ6yN{>ebsJ-0nwfms4-B&YXDp7L!Ia6 z>maWt$AU%%%>UKR0`g#B%*o-sKo#k)9(_xwlsq{Z)*o5`{C}4mhLefse3w3vSEr~$ z`1MB!)zLEZ>%l5$)R*p@DA{oGBmDPJ9b((C)j5tl@#X$Gt_HSEDUXCvQHKFO=&x^v zzQD$nvpT*bvYUM&NSdcgCX{rvs`o=ktyu|U?_#+wSA#B<;Op3|s2dFx*5Yzf(yYZJ zA@kJ0+FMp)S0PInzJcjmo06nPT_$elLAK{Wuo%L8e>V5lu0ZK>;xZ+kbsbbGs-Mzr zk6DlRNRIfVgL}{$Anot)I8eSlhw8}EyaGb4sihfu|MgT_iXG<(_E$qghA{8wghmBt z6&K}XmnD?>lSJ>9V$;$*#S(a6v-H;)(}a4?4do0rp3}YLS`{>F;14GK?^G#>n5-g~ zRTHIFZ+Jh$Ts@?bjxqhJ46TNXOUJ9pf_<{O`AG|%e9myHm9L&TbEIDCKzKWl{*wWE zAqaM{R@*<5vn8(bWc!O_(JDnt_UY6Nf88KlDcv8i=fK;KuuqNZTMKa@kvu6(K9?F6 zNfV=Ce}blPT<<~9h(9|=pdFgSEC{!})DslPakTctl-jo3URl{Mzf2D^LHprs+@Ku^ zaZW{CfV5J+gmUUK2dO-WI5yid`IV}gG(@m%%MDE5v5N3>FX|IK{o7AT(E_E9n5#75uzt9y z&bs3&v}i4#Hb3ZvU8pX=sQXwOy_2#Ml4FqR0{zC8lk=lhcTEo#6T(xJ`QCE|eZSp5 z^;qm-(M5yly!tayM~U}iOzE)Y$A306g{{<5f^?XDMDo!ZuNdCdWM*i6Wkw7BK20pQ zJVygwY=d=CfC`=)eD7=U$?4sPr(ONqr6yTu#gwaKM_z`Ae|E-zgJIv|Ye+rOFn++2 z86W#`w|1LY_hozBh(YD(RJ~bv${ZTO+0sjuEOaF7MCa@WY?c=>?@yA+-R-2<2IS8~ ze?7n<2US+i;W>}*|JjJ*xsjGSI0E{DL2Q%79piSfq>KaN-@B>o7WS7XR?OxTE`0r^ zaeXscyMHu+7pbtN=!F17faIoWL2y^1gm85qt>88CSHvu0km86*#(aU_E({&C$Zt~0 z&9W#S$2OyXI-zNdc^Ow}8n#i5?*3&%T5AWy#p2ZTD(r0Urnl667Z#fG%3v}UFepVh z*zaM!kWG)pU}g8xPcYZa9|9h`N+)r zW=$TUx>y;!Vx9N*+9vg-mEGWnU<94BPUjMK8fGZ0vun zNcs}TdMfcgif(U0Yhiml(ea}7PwfUr9knWi4velOsU4n-W4rJ_Y=91-FPM9qdMgj! z*GZhjy440!sTqdF*ovas5qK0%9d+u%XD%RuP_+JuC&F3?)}L4H%o3XVCJLpkbD2M~ z;ciX9Ys~VlZd{Nw|2Z0MQBUFBJ3LvIj5^oNUx30qFrlu1-X8W|@nS^YGC=DTxXM2d zU(p@*JYY$mp-?Q;ZIgJ=ZSayz5@(uXsa z+t0vxMj$NrFW25g{=bp5?hjVNVErPPC<6DoM3aYeGo9L0~b7?1B$4}Z- z!9agKnUX-PTMXIKk@BNyL)`Ta=;+%7v3vSig>m(pKov@Av!Q0@GE#8(KB~rJNrUjncy83l#T|@zf{U__G!&{@B3=yq zm#@ojEDRzAT}hCS3&;BV+<$~EP3_>R7ehz04=6pJ6DE8^rA=@B=;SjWWc_D^(`!5S z)#Fp8ZyikRb%(}~gl@{EPEXZWlDIEW!N0yXaoX;2Z|#8h%woWjW#0YU){*9$c3J>Ko9X*W!vw^BajG}o|& zj#|pkR33Vd^Cif9HtZ;>4v*Wc8yCd~-+IlV%@@J-M4lYZEYlybE_rUf~xB85|Z2$Pu3iDY1{~g96De-sSIDsC0s8J zozT03b(UTXQ#Y#{D_C2ut$EMB!E!jaVp`mY(IL+G`Mzdp`{#=CY1ks)FwgVM_x`?- zXGT!H@<;?e*LA8nk;_$EuP@LPeNePQtA{>3>P;9;yGG#WK9vg(gXpofyt;gh^>p6@IvVVGjTNv6{rw>)l1(k>@G0(5Nz;Tw^g|+(I-|i4z0aNUZ)f2_gs0Vt zhG)D;d;KHH4ZTcM~Orr@$c*QqH)M9AU zsMkACANcZzb#-c5O{L}B55@v)j6Xcg&HnIllWttXtaLP;fdYu|eA+VQzqHHC`@ z146|VPQY0F%>fLE8;`5{?kw_t}jt%j*|AXAsHtDs zgEVEh!^5_4mkyD}m*ZmIyTNO^o)7 zeVBYYREh~WXg#%j5rmc+c02#gKXnuN)Srll(NxxtjCITpxGGs6~)<-BBC?76AWF&1kZl&c^Dmz(#p z>jVc-Se!^rMUtXhm-L=2+(3PvKj^*p`rFzc4&8nhesUDbihU6N0}L&%Iok+m=PE{cI;y3k?Y}=&ZN&!nLo?O4Sb2iBxa01M+N)nwd<;_?tFrXvT9L=MHa1B8Y{D9V(pG<7W}w zyY4rU5;zhP@>>!R`m+tQQID&f2cr&SzOQy)-YOH2&j9_UKGA2oXw8_)TbocuMYIPP5vZ@jV1g3x4b!z(+f~o|2IgD39)`HS>8MfqZ{(u!C9~B#v!8Q>hs!g& zf%>Bc39_BE{q!?^w)y#awoN5UMTmugq~X3(77r_Z0={no9YsdI{*pKKK>ai|E&YDP z;qfd{*f50WKTd%I!q;|BJo%j*6oB)`f=|G6)DLl7l1(5>#@|Ip-`0 zN>&5~Bn(KBDA7O;B1(`T20(&H5>P<|B!dc)kt9g^W_%U9X?^9KbH97nS?kyT)b9P% zv#YAByL#8I>X{(N7^AaB70tLF@}T=al!lsbWFaF8_ZZvVM_b_|RFzvnK>HLSM=Er+ z$8hdEcr$*1wcGljThKZX*TPbJ_Rz05yD-f7ynF4)mEyGPvOrwp((DW};R~l5?zLEt zQ27Xf*V73={3AWT*+HhVEc5Yq%!>pA0_i)i&49S@F?e`5{(}Xw8|3vys`>r!lf^(7 z=4hUVU1_0(dxPqr9!z;^$SE-BHZVhV)19cNx&!YLf(1x>rUV4&NyI#WxK^lLv5>o> zgt;sauHwmLlegC8pp9c>G^g3V9DV$bi;d8&-{AA{k}Oe9AfK4ENmHG={ce%2cn<5O ztPFEED|{fn_`b;HNS@AI7vUrNTahlk#)Q4vKzzr&PE3UOBc0r{Gd!uANBG0=v_T7- zT;odlGjwq&y$ET8o64nLH<=AV3u_ujzv(WUr*%*two?|kG4?!0c@8xG)=T#y=vuBR z&tEPe3=^R%X^&+A-9I9oP0Dn!n@O+^W`0J!muyreJ+FcFs|=#OdQDc2)obb9qs$f& z@l%Yl)<8U63MFRn3oCkqrRLuRLnoVwnAn@aQMEzBQXSQJ&GbV?f+POVt_Mc6p%9 zUH&QgiTKNkFKR&xKPnyPXqEeJn27{?*ce>wH%q(q?E{d%5$)uaM#Ve%*p2V$QYBg0_`8g z`4W^P>Jcw*I3yx_|H*xV9wpHDu-x*Nmpd5$+C}IUrw}oaga@=-$_2 z@(IzDH{274EayUixP#tllDQjCFh>G%6Ls)4Zs^F$vH@`+Le`GwHOE|TNUgWW^JKI7 zUn6=8#BYm<^?7b%(?t+15l=*C2)AM1o&e&Lp*%Waw>xiH+1ZVH6=1Ho!wNy;yI$tU zA-$`8Zb!-a2y?xVM&WYw_kjFsry^K6CEl{)x{uZ?pXJ@G${)7`;wQdQ+i!3Va|Hy- z)WZpmIuNkW2>|hX-eW??-0={fiew`Zo5WH&b$TE^a=^h%fZ{Bkp=n0fg!};u$#&Y0 zKz`~_^v?Fogr}>xq#H@X;K`367wCX^ce|W1%Td>u+TlbOgRiz@_+vGojV)ZdB%_@N zvI0J-KkXH-UY8l^Ndqm+G9K=FA{3`e`tHq+FazmFaUsWmFra;n*@Umv&D@-Wod_-C z6hT(c;sSdhZhPfGVdPm2+p6-TZ)$Xc1^mu3gPxDLg_I?&jd0gQC0>3r@3lI65oS>R z`$~LP7R?@HG-wx(Z|F+TI8kR<0PW`>qNp=Q7Uj9JCBKN{sG9J-+*}6Y;ncML83o+w zilrs)nB24j@2^pTHtwA+J}#tc-_38Hf12yA^uv_7r~Ms3{(SM2Y`bDbAp7xmB9%FW zcW;l#fyS3u8cV#p%(Z71K7RQo$0I}D>xcomzK%Lm7m9Tv+>R&-c3kD<3NiGn0A1f> ztfIH&GM{N1YR=>v+p*y5o%ToqmhXI5@kaRTsaKBV*7%(r2iYn5#zEuj<3``-xkI>k z&cvSKmLhP2iuoZBz915>(=|MOU3KBwA+v6dp*JBKbwK-v;24{vwV!!f;mB*T?kvt{ z#Y;eoam$qW!sJJo2;Pt4NvX^hvq-X*`vLheO|^C|*T-&DNf;MbCHB}w;9Q&n;&74q zBRWHI=0^jW6F4UwMMCP}JUdW9K#o5U0{|q)Uu?*D5D8 zJDVNOk)X^MO9RBcmjtG99CExcvgdBGQfUmvt6pdT;;}hLtIoZfN;rM%g4;X6^u;Sy zPN0oFPa<9zcbbH$YgxTBfVp3he^vad7RYB1b}XOPvv?VwSbMm<{6N~E&=zQZz<#E6 z5$AO1JMDp2Gxj%YZt-ypUI6lspW%pgdf|P#7ypmsGoA6>gcg-E zhe-u_3j1DL^e_PVJeNJW(ucccgSG>&kJ!9@gL!Qcw0;nlLP_J(NcDiF+0Np8heqZ# z16v{_KS{c#KLf$6PrfNlMZ|5ynX# z-=LZ$ubooLclk?7><-DqC(_@P`AY?lY{w+?pA^_U=_SKo2JgfZz|BPcaklvJ8s(Gh z>4*t(O$_Xh^rxkhqVz;ez1Is_CF{o-JQF6@k{$V@U$AM_Pn|O$m z4)-43=woLrR|r$RNOPXm(G_S8A|uSkF&lO7Y;&kPn;*(b?-y!`rS)dlqFS0hiWQt^ zq4SUvfiWxUqTp!#>+SpH4KH^zo2TB3|y3rx2S!1tNYO$9r|Ha;@pu z@F~|Y4r!STC%XkjQ8xYK*#(dJ4$=*gAtYoM@kP|j58Ors2t7GHwp0Q`q0K%Nf)0~me2SBCV7csl?Bu9 zOq$Q92RBnc95={!MLH8?Qi$o8bAMhNhu8!v5Cisz(`O`R?E_B@KOd`QWjBkm1LaFtU`K}F5n;6zSH+7$bYZQ6^zHtTdQ3JcE zBRoh+nnlD#jR>hFt1MR0*ZC;(i)F5#-{w{Mcwt0y^gRKar%%r~C)gMZ-HQHHgGH8d zlA>#I@zcAuV_dg^)rH82jUH_4kf@^JqbyVLq1a#hgvZ*8)4kJK1uzByZ@z4}c}@Cfb|>Dm=$<(l(HBBw!fRfv6*0=2L%c={?vAME8JlRQG= zxZot(6AShQ@9~1wcA}Fxm(AUAGFXQRN6PFLckCU+Ih#y{C#0vZ+{ExhyD*NtRG~6w_Z(>|G3V&&3l1Br_nb4e0a&Z^n9aC)IW!9%=O*x z{9R)KIymuWRY$Pb8+AtBS~}5oopdBl7|1aN@A{}ZYs3M-IAYV|)@?~VxIlwgj%`FA2oN4|mRhcTIx6Y-&&B#7o8&eLS_bC6e2bpRNV;@IR=sg7XLXU6YE~7=JqU zCV7?4h>km4cw*70mUy|=V(hi4CFXTx50U1h;)q+x1I)~|pYth9YxEjoCa+tkGz-*Y z;9{L$O{g5(3POGLE~8Deo|}y!Q(iUIQ4u}EZb{2JLk%PAV(gW~ls{CX7<9Z*3~BTD zwdEzTXh9*(@R;If=LfMKoN#cPlQLwCJ-f)5=4SrDU#8V?1lL`tOe?yK)?UY;vl z{V+qQ)@H8?cNAc)sT219s5sDK=kK3PgJ#nQh_ayWzxLCiYp4m&@PI zdx@Metri+~VHNcYjy85tsmGzfVZiHudcQ5ZMMS-5U_MaR-pfe1WuRGhU~ztzh73MS)}0Ay(0 zg&QZEOeZx31rhO{)vkm7g!7K(4mU@!&smiK2P%q0EDXvoWT_5Waht3<(YM09MiBMngXh!@3Dk`k9xY9pOce6{g(-PnJr9S12zA0s;0Hu zlJ?Oi2Y`IFVHT#j{9BmxVfOb)Bad=V9*hM|9KN}l+-YTO8mZ=weEP-6;#S7IH^>=& zLE=-)_XKC?fiXMVov9DjQ5d$MiE*I)C)_YaN~eeRymwRG2@HYg|09^dv7pKp4|3tE^V zVW7F4z6FD;bKu~0Nv{<<7YbJZ`6M*{qY2D~kGmIzf-Gvv>?9A_YXb4{g*TfLQu6W= zIMUdKFZovta>mJkxY9aZk1Ey#%mR6 zF?ex-c=dsdM;30Rb2H*@_F=&RJH_NBpdHt0X2!XQmy-`3A#DA)Gl~vN0`WqX1$U}p{mbJ@9ML;nK8hdJ z$J&5+lmm?*2~&O5DMKbj-<+{5Myrj7K%7m#Yhb=sZ|wDlv6~*>#u!2G4aX`M#xaT z0per6S6V~46iT~p5c|AERLv$aE8GR*(VbIoZ(H0>n+#p2YJML^dxk(>8Hk%Ea0Nei ztSNLxeY6nR*4yy>jE4jeuk>zQHT)nB%Vv6bGOYv2Y;8;j+U3i-YAZL-(0|zAL>tFf zHa3mh+C8AjkS0I-sf~=ncV2JD7T6t=+I|L0$H#GqTHmJ(OP7kg$ zZ!0=0&GbE7WvXr4u>tbmKP#Jx_2;uG&b)IzPvvudyfLf>h*!!{uNI$#1!kP#IKi+p zpjz;a2^3f)nllyTULUcmcaM6E#wa%^@SV@M!kTaQl&G|n114S^syFP-&#uukZ)tDc4N{@r!WpyGE6RSJslTEw6Rz7r-ay2Z8wKgfvTcdLe~b(al?A3fiGP z7s^0^*N}maIyTdlrzZQ_VseSAdsfNCtAPAt4zreDx7VLOSJO+g@m7{mmymf5#P4{E zUCcak-*8!Iz%T*xDy(lr0~FYCEhQ0-2~}QfE*|L~)MucW4?E=y_bWq`64;GVW`NopXpcw=C%XlMX+v zEZ(^;*kg_d8xWdKVwvaX2lC4sa?iRwagboI=s+gvNVp`C|wkN{UHd*9}cZ}`GAQexz)N2ey7dBL6=+_ z6nJz>%n`ue@*=L6)YX%zH+V9g_XQLTkv`{EB6mPN@xyqhjQtCW!MWz-TO<%&1NlYk;8A;LC;TsXOR8oRoRie*YbF; zV(SkdA>&8{^5bon)clm>`sWvT8Vh-xy~kumK?_${UEi*=n2_yrNlUTl8F4vi+^1E_oNa<= zvw|h1P$2(O`8M}ut8T^%SMMO5Zq27-^dX3WIK{QI`7u{6m^>4H^?HN}Ux4CF$uGB^yp(ckF9Z4)5m&ecz?HRcvO?jMPPbf3oC9@#G4J#}baFV|A-%oMmh#496Vz*R_`jIP2!y8fn<|tdV2hwWPNZG#3F{{zo>g!G4#` z)X!%+?k!crNo+&EAQ{^MZJt7>W5goSK|W8MxpSm? zYlMVreA8RYa-lYX8tW-|_gGuLo9GzUqOO6QUhj2A$HPuho6D=L1~JHDiM(>mchzGj zSFT6j4g1>1-LK!ffclu_BDRv7V^$s)$Nef%pf#v$*Ms>A*?r&r`4iHXuUo4*vUx4% zc-B8jRXrBOq#=-M#8ZV0wtE+d$sC%ETon<-$FT(=En#7bU=$hQ2UN4 zn8&RxbC*}{#vIIf^5yzPb)x&X(nD$GUld}$`8IAx-UaI#p;4Z$OMIG2N1UdLD9oIp zV-so<=dg*2k@9Px0?oys_TBv9EB*M~7_9NH3#Du7j+rX>wx%bpu7^B6c29}*+}Mc# zc$pFD70ZT{fXJ6$?GaN>6{YHr615AOLYC^j#@Rm*gW3dYJn#|sIAT1WugWJc9lxea zYmk$4fIW47Ek?DO|0!cw3hqbveea-|ficl>s^y1O=VE5@k=PCv4;#M4TimdXm09Bk zT7$}0qA+*@)SoxLz+=80_`X8rl=v6nC-w5CJ>5kyR*kYF*2)+O4k6#Fkk*)oFNP@y zYEFEOiNSsoD&&=2f3A}nKO`Dz6R7dIh9jN-s3R-O7mM!Ns~q<^Z9lqdlq9ZCRvRx?wfgRqUYz?R_64r}B=JP+ zkeZY+u(rRGQ9s2T{;A5%4@32yLA=U_3#HQCQc(+9VrJ}{gsBA-p~+iQ87#W5Po1Xz z%+&C)F!k6mxdhaICM(Q{(52Cq4(K{R&BKMSlYI{?>+dhHO!|KyuuQ~&5$QQqp1P>v zy=1CV_IlJ!HEf|0^U`>?WdUaYX!HWt?-~ox!AmSRzR;Nte-x)NecH9fMcRA%WGw*y zo9KvmF57hI-fZB(`SoYCz0+o=NobnLGXS9j3x)1djWCx6T_=6@>}$ zxPs;V6O#svnvNa#$(GTbvX})U$~NW-GsLP^X31@&rDB)b*bYbSs6Wz+Rinv(FW^}0 zWKVs z>G(wZu-LcLnoPUyeB0#2LzskCNm;$Pq=O7JueK_23u90!#>UtC5^+De_^$KIl$v=SHcIaX@iPcOZrd`_hP`Ug~ws7n#lp*p^GuHqL=M zjVt8_a%CqjXyrF6muDBaHLz(XJT2psK@i;yVSRXyBUqgYKTbNU`sPA;cF0wm&(#@| z&1QrAlnB9lkqn7BBBV!42FF99Ub*loJWJ|$Rq^TN=H0uqxn(#>vAGL(zm*M}->SPu z_sXzfazmC9V`Ff2y!$|a!5l88qECtM8tzFu^BB{XJ9dpxx>Y2J#AHQtiDXeEU1tk0 zrwwYGBM2E8Us(4&EYJ$3xyhT!Gl~!+$j~ld&J1XTeLnGp+H~QS$H8h@n~Kmcj^dRu z!xi<<@2?#(#y*y}hSM)xI_FB)b|sNpe3D7b{i>Y}HEm}m-of=;r7KhHp@>)U9(4nU zLklQd>jFi`9(iZ^EX0RWCCxU5`CDuWy3%Lk8e&vStI2t4OW+kB z6@h~mef4!sNpg-9`50xKo>86*X2kP@K-|_f|DBheqeMsWv8ldcKSFF>l`BB}n8CO~ z`?xawK%4RbUmcswPvou(Kz!Es9qw15RzY{e3k1J%KU@XkvK)^rE!)%1V-+RR z(Og&VkUNnJnwZ{tUp31a?0@{MejRJ~oJINeCvFoU|6sOrIfiNoBFsK+@p$34Hs(43 zkYi4_$lJ+^YomDA`gAiPwvwdd+0BbUe$Z{AmP;E~nVVL^KQ;L0B$3g;9s+TEIrEls zAJG<85mHQqfUPCo*+I~blBKtgcP=vKG>0li+L|OkDt~8PA_wH((U{9I&@R}?auqY& zlDZdyX`lw0xF#q+F?x9}Th@wN^NC@{Ia04BwR|9d&F9@oY56sJGsqy z1(D&t-CV5U#!gZNHlT^y8G=FgghC&yoSn40g|N^;eU*7o{qJ0h{$QK1={#?CVfHc7 zwsvY+eH3Wl&UsqYiOz|y4P(FxPie8^0O3>u5Wi{SnP*lx-k7kY9~#Wau*fbm_5z4g z@zE1yZ_`Iu_<kMjt(~<|8%9urJg|MGgGIS>M(6!Pnl>3^MH8i zam+5yJ4!xZc1%RVE!iIHy}u?5#1U;)lIvQvgq4`8#_N_-M|yMlnSuDxQ4^C7OxRc* z^;8;}PxuA~xFzpr)SN2>$ z{dsqIr@82Pobw1)6a3aCP+(_RiJ04xY&)-9C4%HyYF_Z2M;@U2XTpD8D((y|v3ldz z2Pry%&H)bWUO@Y2P0pWuem7WT;cj7W_oPw$IlD|PAg-=^uWfqGz@-{%QQ_dRE<|%T z?`t6b>fWZ&3i1sxciRGgOtIZ+0_=km5WhMAv+heC3FS-ek>z=(pQJ9Pkp;wu9}s7Y z35@4^_%5HqookY~tUX%`#HkNe`UkFmrM}Hd^Pu?NRz*57ofHtaDV@3Ym9YGtI(4Uw zk@qn=jVOm_K>QdJmVRl;7ukhSLZOMoooJGa=Wu|y0N=6B(+ZV~w%f9B1%?10b=USV zAYR=4!gW;ik?IG*R>So1`qyvMzJY=PFACEW{5}$ht!|yyH=@ir1}}^|59F&p>VCCA z{B@HlV*O&tHDt4gZVV_e!cF9@Md5tiy?!KQo|ZIs$!zDv7a;%i#(RO@Cv;(f@SYcK z89NsioAg2Dr_aqfMm>Ai{=sVfyVD9 z?_2oy)G}lX?anNS^C}`fquM_v>A3#cM7mfg~mx8)@6kPHRB^3mH@F^rm{_75M~Bgwyu& z%WLwW_pd6of(Pm^Rl3F04_Pr&=A27ak)ZOM>Q0}V-a^}1x&8`ge&Gf7c<&20^)y>{}Na`7>a1l zosaz3&g5@0lbqxkauz-sXSWNu}e}g-Hm&;Kf&Y#4>r`chZLk3tBnC00O(@x#)I`{AOKRl1I zsHD`HBNO`BC?{pwx6Jgeos{1pSpHG=b9P&uO(}0-mvmnRbt|&wE`O7ez}#%%D-a;J z^zS6IiF_R{cYY2umO{%qQh`wN`J6osFO!>nV!F2KK0;A7lE582=gQH+gYV`O^cdl^ zb=CN_(?ZqT>4+#T3Elq0Z@~qc@0>i0rxR||hm$~U0yUK7Sm#4RHb!oR^nS{|SU{c= z89p$=Rhy&~YyRn$v=AQaO{_(#m#^F?Eo^TVuzNPIEUz6?y;-eouIK6X!#$RRt6DpoJWt{ z{zjZ9wd}b`?~7*Z2~oPA283e9I_9Qoi^%qQP5=Uo7FMvG3V%g>D7E|d#OiZqYZKQ#=YU* zB;OrYUV1%tA1BA-796IpP8Jn*HVV?#n1kNsK+FHg^h}7cEta-cq;%v5g50KP&B2)) z^>mD)0`GjeC{k^C=u)e@Z|4m}Sgfdwdce)NzktE4rZJ(n)4c-RzmADsd`}OKP+N$c9=@IZkex3HV%r`<=*0S|x&4r^mHA&$$pP z`lx?=WuGc|2WAv@02^rScQPul_NuC|S?O6GxlcQ%Nj{6)wAWnMY{g+Z+=jcvEzt3yeqt%R7d|McUDJntmb;%XA_;Cg( zMan<&IUVNnZ81&Ku!bMUlnx2ZZ{_X1AQDGR_44dJ9lM}=Gz`UDJ`IEiQmb(Pu?L!L z0*b%@$iERWRuQ^e@`u$25>D{jB}XaBZcfb8(tI5ZB(O`n8EBbw%jY*FpbGR7{SNRc zYse~b?@m6kUY1vgmnYCTeF`fzZ)_%rVh9PdPvM$!YnGX9XH<)pa*Yo=J&N1ASn56NTd zgsM<-x)K^^w@@Fh`5aSu`tY1GE%^7s|0!Qh)(B8OPQiQSGu#idVrTjNNJX_?ALvVO z%5-hi7uJw^)bLf}1hjlLMNL2juq3x)UfM>l<>?BhJy0fEDjc;uBBI%RxT19Sⅅ; zq~Gwhl%#)s&W^3~&4;Yc-j$zjQDIOPHi5r2Xoj7N7S&l&`LPau7cjygTJPUq)ID$cGPeHc7C0Nevyf zJEc`5HI|PVm{Q~@`ejP7X{k>xB?eM|8EL&=y$6F~+Na*R()sc|@lI8jRzK>4I}uuJ zuMhEVzOCDVmiE=4?A}g+bW3l(1_O~qs zKbMs-tcjnr-BeanT?=plu*LBR!arg&U3{jeax)u0dZOAh=#|evWKPp*)^^I}kGs5| zf~<}Ku>CkSm1bt8d0R&hl_Zh07B&l?HQPCW^PZqmXxPZ4>t5_n+HNYRDF>>5=oQ^`6(hed4LA zs^mG{6wkS?C9hmKd7=~j8R+vTr>O_Lf3bFsCC*tC>i2{mE^6!Mobo*FLov85g!$FN zTll!_UFeFToR%`+^1-m#rcQb?m4#3oipp`TNIQ@^;8r0h5IX5bUt?-fSr2vGUM0R(comPU5mFEf$v5VE%&VF7Kxxuc-?xANzzDGwnS?GR6C+x1uSuWSDR- zC=2>vz% zsc&t_pT+K`@_I_#fY1}Bu*-#&&UJ$eTOY4y+@ygAzZg$BM??QAGiFUC!J83!{S}nK z%E+Iq0m% zyxjTWwjLGXCjY6^Yxh#bOrNL@OPS+H@7DXLprEG?ynb+v%oBkf3St!L)0L@-H&s7& zcTJnO2#DU&PPYxL>w~^O6!bv%AL^&R3OKnj3TMLjyV9C1reCSyoB6zZzk&6jP>>$; z>y@9y?xu>;YJk@t3@6##Y->C`4~MKbFEcdBc=VKoH@-eHq_n{|HPSH(G)ew$Ls1!;k^9_R=*g-UzBT)ZAQ3o`BGZ## z*VfA_EQQ;%2|BX8!hchKt?#H2)W1?fjh}$~2M5M5rq6INdraX?6+AxgokzBXM~ujI z9r(up?wkAt>$~m#DJaWo0r?n4>Jw9?t@mT!w)!5N`c_TNd!^k)S%Lac#6az{P4|qS zwB1xyR~`8L!bR21eOwr8_;;?7ynd(N+Dp0PFIm+{6yXfN`>3t}I)1AuYp4P$faC9C zbILLzXtK1DM_ZpmsM_z|g=3#c?FXuCTcu-|KtcZRbx_mP0Ny_^`|9SYJn5%Z@?N9E z{yGQ4hl!^cHaGbv#d>fVTY{kDw;JmC1HAtb^hBpy7*`fZPm%})m8mWE#a{Oa+^Fd+ z()FiVBW$bQZTC+>Ox&orWEhk3OR3=Oo5_fo7U=!2 zE)C-2AS`HtoOn$)R0@UJS)Opc#DBCkR7qsUQI;WWhe;gvv)J8KU0P3B_SY4_d`igp zIN*NFnrXW430Xw7)uqy_^q582#gr|S-IZ&gAmw)(>dHF5*r@Sm{DD8u6?{DfxfM-s zodD_496j;k{1E(99?W*$#H7I8a(@cysye{&2a8qkEGGqXrVSR+t@bsg`wb7h`TDW# z#7@~q4qqCsIR2Bin`+2Q11kVOol``Qe17kxKQg8NByJh$ippnh)aF4dX-{GXgW$tJ{FAnu>geilpWHoz!JdowTsT;Gsce?t=&B zJb2#sc}#V-B7Tsp3D04J*q$^m4PVTq307W0yBwVpd$SH(}LseU`$7CMBX> z-cLbSUsp-_*8qn7nXj(ab>-0+wfnqFsJ|3H8$XIym{B}iFuNWrQCu4OleU}c$?F0K zaEy*JLhHQir*kAc(PKQX6=jTSzFCqZx3SCnDd;K5>j4KKdf)p_dT-ZV z1G2EmNEpPM#nXj^k36{7C1zo_9w|@!leU}c=@?Kr6QVe@+Bb|$ zofHlhffh->ThP}4Ts_qIgYZmn^7?2f@JuxJLr0_H)l1tf&4C8`-@;$@Fh@Mu+^zRd z!9ZRQRR44VBlTtPd(ANNHEd-eHX@72TXj*fUQbT@m&BxJL+2lcsQEv@1H@jxT}*bt z*%UWf;oR1_^=QtjK4JTrMTVf58kXxvh&I(TRig>+ENq_R4|)y> zY@A@u;~#rXD_aqdmlP)l9X|{;HG$&?7E&{$w3|?B>DxuIA|qn@biG*t7`wp(`46xB zg;*=0fmtK){0;Fct&X!!PyEfv!?ss8hm!i~1=h%tSYSfDL46+{KZFLhjMUVDh7{7nE?8H1+3mLbr(mS61A2d2B)?(?@3oZ;dI8{c;Q2$udmb_#F9e+(w&!51qH5dk2xp2mZ!W<>x_)!_0 z=K~k8D;L$xxW>k!R%Whwrj7l|`CBOiC+*3IhPHqiqm~*m?E*DHx|k%s&-S{Np{7kL zFJ;;4Z&|;TQaI}3?-@A!FjE|&ge(;Dz2pw^lwW;!Rz#ofDWN+vzm=qZtNo<} zEnVhXT$2Hp9#e!S}>p zAUJ?Ooudr;diFcCCw}s8{0S|&M-Y5Z`~-skP#{q$W(D>#1m6??fZ#tA==Lv%AwZij zzC(NB7yrgzjTR1u;Ctd15d4P%iAoX8FKr|DiylQZkI_Gzh*YegVOMD3GWW zUyrX9g72CCLhv67^z~be&;V^x_zvxvzy67jL;=_fbLS!Wp7|#P|DiyazjoPM7lQAZ ze?ssd3gn-FJ@dyuWkZ)gpiO4qp*{15AK4)Z z5~YF5rb7MSJ^nj{_(OrNe<9*L^#0%DzyFDkL;>&>{nrqDkN*zAe<;x9ua${0LhwEQ zCj|eYK%!E(`tS`1zQ_NB;6D`T@@H+YgT9XZ4(;(j|Be53;~E(R-{XHm@E-~!D#b{9 z1NDFQ_@DpAKfu5UeShrnAODRHf4+15PdHkCMu+KNaa0y){rBm=q3sg{OD&;O(6LYd z4b3TLy$Owf?bCll^Q~VR=Al#2u}}XE&EXDGhsIy`>A#`q7?g2D5umcsC;Rl@(ER+y zy9*HhKK(Z|pU0;W8b8>l|Aykj2QNo;L(1Q$|AywpwM3hE=7pN8;zRiR^xx3DM1Gw^ z5dJ>>H#D!*C>**mYoGobnm-zN?>GJMsK?~ey|)KdtyPyY?g+ep_>hVsGjdk79t zl)xmW0H^_xEv>D3LV(9*hefn=GI`%?!K4=sB_p0sFe?#XzCFuj*AGS~b4b2Z}@@hh-pktr@ z8=5m4VxbG-qEGhezoBwsgxo)42IYg}hY%c~*c8)t0G$3K?$dumXKuBimxu88>A#_Q z=Hs`ahwa(+tduVpXr~hR$I~=x6jY9N4HW)3{wbDDEH; z5E**6f`uEH`8To@mUyu^Uksyv>ZxU%vDCp0Er|-@Lu36B`u4@TJ4>t3_!QWP6!;W! z_TTm|k!Vv*aZvwapZ+D9kFk`bi8cc|_UT_DQ6$Wvl{OF;eX>vg5I9yB>n?)4xRXC$uJ^{>6VOKIo?oDSw~-CA$3O zFdb<5|CRV)4ADBYeDMAz2>wHXM5TzArg0E_pZ+D9Plo050>a;?e~ISf>tz%{`1|xP z(bsP=qTvpNzfb=XT|V{~qg4?b_GLRABN?9;zQqF8X* zGD$EQb+%9c5?%j7JQ^+tf1my(5=Dfs1V};o;Prh7{zHK-f3563)IZ**e~Codfoo)O zLhSF;zeJZmYxm|Ll)pd!63zd*Y4{Mr-=}|xL=iF4FG1h``}8l-`~wUw(D}hW{Yx|- zUVnlHmHhEIqr;o=NAMruzwN)F=>$G!p!2(Z`fq6dgDfrR{)&D2Z)pDiI6jy~Qh~Mu zI{q)l2Md%dEI?(WPxk4*p>qE3j}HbJr9j&MUx^RCvwIEU@6&%n^P2fsq5Bi}>A#`* z|Ks?e8xu4>vrqpGRs8?+_~76hXXyIWKK(cJ_4}X32jktVpz(oy`fup+|F_2nw|J;B z(Cq*n`}E(?oD#2x?I14tWS{;UDknxK-K#YSf1myvn*YB(KDbE<)G;{u3Vr?JxXsRezhb&SvmJfPMOJXx{(-@j)RDNd15c946@g zXdn)Q;lQ+?H)**=5_}UprNtMseJhG_)rxyv4og3R_)J?Ss|Gmx-ZPr>do@4LQSLL! zfj+W<6SE+DrUPk0|CYD>?q-Y50V+xsmD?_*VVw2Ul)*3l5?`c&exLE9fPd`kHe=Ec zXS}5saZX!Y9As`_7Jhog(Rk0oXt5^ZS%$;P-kM?7RIfQ3ixvVBf6a zGZweu*h(Gdl>U({e(Z^|MV}@(4X-CQ+m{2HN|W?2##mmYm9ewx;=C0bCqN02f4e;Ejbvp zw#dhkU2Yi{-%>EemGr&|jV-+RPC4;!J zf0_8fs4$#_8{h|@es=D~jnx{<5W1ImTc`LdN0YSFebYN1#I6F%1IkDk%p>dW&I(Ys zR|eH@&w@AV{+~!N|6IGv*|VU7!oQiCRs6ztGlB$YU20dhMO%Jq&?s>BUuUTD3tgl9_eJ~B5Q14^bRiB*4`AED~6`i({zgii(Jl;w_U|N_8_O^Lp8iDvG%Er3f8oC@0j0n7?-d+| z!0>sJ!Gnwgb@HBPl#>{HZ7W7ChmX495o3QPTM7Xh1InoS_q~?Hbp-16%Ao55ZQq@*u ztOYVjum|{iuCL_|O8Pe)E&8ohloAPp;U-3NtNaN^>;J8NsQ%q9iJq7GH$DpcTmPW` zqLzdR^=s0;7f(9U5Y%aA~ZQw$<5Q1dpW%2Tix-xeoQI z%q?rmHK-XIa`+Z`IuZWfEDU}9Mq_JwxBrBr^`QIXH`fPe?jdrQ4{aZXB(*~AF8+Jr zZ|#%8V8vV_o(k#=u#a&F&kuJrDOXb`RuK~7h>arIx)d$?d$Zr4--EoR;n0O^00GoK z==g+@R&f`(n;qKzd3flZUHA_I&L8wY)NpxhpJSz@DGyl6b{y1aQGW;aY2(SY=QHU} znFLeGu@s-j{=`S3nrPAm{#(Gkc|q-i)_?lY%}c*1dlm#y_s14)sj}2=hCTg@!sn$M zgnr?>89~C|`qv+uZ@FeYq_D-H{RkdEcUi4no^H#%lQhI^hVILU2>3Wb)&M7{@{_!> zWY_>IhzM%`Z~9lR*ABT$f?l850VH1SF8+JrZ~cqJx$R?r8Kz$|FJelGL#|Ka`>mCF z^L_q=C)09PIp1aIhd)2c@!#2=ydAfs`1@l2(Sq6s9l!Hr)8SIPd}#X-<422-yZD~| zMFE6fSbD$k-HagNFYU)nfm5+n-Y1H+K6!NXF;X9+HgkYrBc5RCYk7CniR{xujv#A* z6IB1I*vt6L0#p$3FYV_$5*97BOM{0x!|I#o6dEs1hj>{el?)?MKNy1fD+HTu@Nx8>Osy4ky#i1LUe-wb~zxd=)hHrby z1D%5Ezo&mt{B6-En!oZx+c)|lFiC1R!=Cpq5(Ya&A;P)~|3Uc6_=K5`BTg<6OcQa5 z>DzJo5oWsJ?wa&WrZF>|ZoV;v6UAQEGIq;=^3+{`LNxy?&vy z7M1Paj=$ZX8SpcX33K(2s$Fy(aC0}lB)mI-!O!Bzn+eZ1_po8426Oh9)G((GqGH5e$rLcr44_}y5gj{CUqJkPC3N2?i6cY zbJJJrV7)(CN0on|O*;ng0)*!LxBfx#w^Fv*q;^@*`*Q>*mVFn7?P)&}22;7JA6)w@ z!*8X(-Je+)T-uMm)T%I*B3wl0YIaUhY2n$ENq>6sw0%R9!t=lAQ2!Xt_$}}k{(I?f_h&ZdoJ64Lq^Kv!m~DH>>Bq&j?}-ea zQ5h1_?y&vw{zLW8z~OEE_rEXpA1$bTd&VCm42E^Qhyj)S>+zTVA;yApS`e6|KaQ;; zeJdl_nV1-|9fj{#onRA2R+E+tRW)tf`0kA;Axd{Q z(w)+s(o#}_fTVyRAtE82(t>oSN+U|6pr}Zfpma*ZkNrIA1MJ&d-uwOD?>pY(_$~gJ znd_SKx@OH@%sS^Ci2ENPm6r7EzwfF4KScX? zD-Hf*9(nvZ+_n!uxcT?-7w7+$wDYI-cj+I-AA&%zkw6{s*qUwbhXB1dz=%Xh{A22{ zt65hn)L^(Rt?v52Gwgk&cp)|?`#U{Ce&YK6i~rw)H}C%rj?h8=z5qziXg{(VA^B7LZvWGTZ{A1qle|G`;=gwLZ|VPm_Syf_g=dKVQ(^68`5$gy z^gmtrb=1BsCUGB;|Kax4|I>wsi1uk&=>`1{w{Pg5E?hin-~PxQ``^xA5$*mzaQzMc z(}juNN8w)shW@iZAN%W${=@i4YzZDwugb=2=!N{6znqx<7ay=`lhKu0)-%+YHo?onU(TV?^eT3sXY|bjb(<9_3 z+W*%T|Ij|4Jeb6ssjw~A?Rk)XM+;g1K8h%B)<8?0rT^F^hh=gsrN1@{06h3=PV#w4 z_P6t|pY3Y|rpd}#eb106+=iq+4U*gxXlht|?6zGkCQ55~yHWH*M|;|3A^NzqZSxNf zZoxC?eFy2z5&T#Iz1k*fjxc-&rAu377J4H=I z6FXF1m2&oN&pwpAy)9>Q?L?mrJ-@&%es!>bO$p8B_5yXKvug_NaXgsPzKcv}s=n#! zyAlR!d_gTD426xZR%ieWIpXNY+3z6U-~_ju$GJA+UK5viP!*xt~;FTqNc@xwB-D zY~){K8Eytp{YG-PO>d6W6slRGQ_L4n`+E64-b;rO| ztSH_PwypC+Kfv4r*w#2lu)c<%NSf_<-&eb7X`kkrHm!p8q|&;Y7hn=~tXtnU1v@6z zFKW7sDn{LW`HedoV!d2MDgNR;^)sN}H7OZv6AvTm``$F2D>UnOjX6`BCCCkJ7=pAc2`pQ0suwfRE|*i>7{LJJ74Cj(k})t1)3Llw`pH?Gq^MKGW4nQ3(T4XjaxB#=P>srnSt?0b?a_f$Gi z1Drn@i=TMqvEh5~oVb3Bj%L!7{?vLto{}kI{Tna97R29t8S9?aVqjDU>|?^WXQSJ# zGl7?1QCx0#{@_p^9t!tLyuZl2Dociw5PZ9p*8sBx)BHuGnvn}>Zs6Q7aR2cA0Q7QH zboMOh`(;^jYT|v=6yLfp7Ll@BhBC!)__EwQ{f++0MboRol`Pvu&CLlHz3b&!S_45P zlqKq}z{5`@0v$|3~^vMfLETWGZ^5iRE3iP?vNG+I4-?Ve@z0kvuj zjl5kh2tC6bmjzUGV`=T>MYIY`lh}1f%L4R`!Bs*rsPsZ)^^OAEAsP%_&l(j{op|A( z$DqOTidadZ)Be_nG5Pl-X604ac})QQtLFnE&jqcTd%(mVFO-B{^O}|fDyYX0nVHd^U1CoL-6DDlg>{8*N%dpyGiO@fo) zJzq$6pM+C5e@#g!guHl0nr^>ky%pp?fRfP+6LhM-NoJN>=3Qsg#lKNk*&{}srOO>5 zA2WPzL=JTcVio^{=*F$w2|C>H_FUA<3?$>h^jjO)uIOCKBYlrIVHYbInm`MY?Z(mr zt0TJIL5+BgWTJRsW1-)iXW$(4koBU)Kr!)+hdbg5O_?>((|W!%l+t%xr zH5M++Av^NOy2OrO6^55C(d3E`(BDWnqU>@OU_?cMr?BIO6}5& z@Fs#B-rV?_97<>UnN_vFHw|`}tv@U`2_$ZQfMHuS`D!SWOY?0U?MENF=uI8F5pn2E za}NuhRl*o!Hup{rX&~z-pDFn+JHO{(L#|ia4|bvF4k~NQsHSz{LK3&w!nmI}n%*Ur zxei|_(7hX^WO@!-w4G2JGBZIK*3L^e{!|ao@P+j-kq>5NlMeMxM%5mGCI15Ddv~3S zd(+J({uEr-Ch4!1rd#D&+k1Xz61?#ex_ME%I4@UATqfPM-Z(hFxr}=VV0~~fV}hF5 zrLIN{6c(kwEk*L)TVU^jz{Dkt!`Rw}KJG`;i*@sCj4fLZ{N!_-gQW*RC{l#otd#(^x#9&^Nel!*e3-6qi}eN zCWaRUtF~2>=AA{O+j|o2R`&L6>#dTr4KP2NcaYgnuV(LFR>!@T%k#Jm`(|x(88%H4 zUMB&Mw}Q(yI0{&{>P)Q>6(Yf)?a?L{MHW%&RGKwKA$I5&_MQbc&kRqYBdgj{pbU*- zM=PV`Pp&PEvkqs6b6wO8NSt|K;0aYEZ(>;V8LzOW*MDE{oTB!vH1*jX!rZlB=X79mp30~W@r%kC;dj#^ID{a8-A9_Pcaz~diJ@C&nvR$jInmW~{#si^?2F70p2a&b z)S~%-MpHTdx=nCt*SQy@Bto2RhV7Sf>u$hc*qg$dpSuD~(d4kO9|Uk|>;@bzDSAnD zM#Vurlpms3E4CI7 zY4BDq0e;s@tcv^QTtEmJz~~~e@_I5})T2S{4CT5F&}eOKoCUtdHjh{lycnE!(VMqo zo*X}3QNR@YeFx(tBxS){)U~i6<%MZ*vbL)>ni^;Qe4WDzOkAdP{Z{=h_(q=r=0}5Y zaaH{t5tSEBDnu(gl3N7JnM!H_>NJU8pcfN|&?d`r?7>fVrZ~)oWN;Z6+7trw4s{n+q)bSV6~Dfxi@CYtb+ zZgduGKOXXJj5|{wJ)u>zE$7&U`%~`-XR`EJ%YTL9-7B7WNQ^?#@`?LN<27F8bG4!h zL;(u+>lQxr1VuvdmAcgTg23?Tkx>gI+0HfDY{cYhT-~lLzFBmD7c=wg>B+@B zIOETlf{jCUtaJRDHRrx_N{r*Z?f)VKfm==c++s65;Dp8{+$?jeG{(+<@0I_R`{@Wi z_OP)Cq#7sjJy;)?WvRGax#%EC`Bx(`}+ga-!XzAqQ+z%WBS4I(;d$wG~^i6e!f zr1qqye8ZLd6gkP#|5C=PQ5axT@h0x;aO8t_Df;94#l9z_ihO#Lz< z7j)QOkK+|C9M(8)1985l#QgF!u+g*y6Ik8Xem0SDcfxSQsjKkRXXA&N2WiW3RHxh$@@vt-gnHel;mwxHn~GeNpx*9;FD~angiv?==B4Tt2wx5>)mkGK9SHeOsMp^7)rCgso)F3XsIeruNp_6D* zdO8#j_7Q3e5~xa=>zA-np}FZLUFt%-aqzib-!COoFw-RhYSmsIpZNwqZ9`Mco7)>a zSoFc-p^%--U}?3SW+he`C>!OmB#D*=`>h#S8rf$Q&hdGQS*xEc2Yv3yn|vqv3X;-f zs*Y9SUcxA^6i<9<+8H#=D^-g_-4pv&F&0q{~Rr)@H?)-6`(Le@)-Ksg8UKqF@!ZSV46m z5x)4^=Kk%u;*U!{7V!Os_G|%#7mu$N;agHWa|bxsJb=ThrPzyFx?C_{u6i#@*f2HJ z^^}VQi+)voE$LgZ$OSJMbg-$dM0vLy#%OHQpW775YbPw;ZpL_1fF%O`-Xbio$R2>U z!!8Lv^oh^S( z8$qft2X&C6vT>D@ko@&M&3duTrpU0|sXbP@VS?gEv2xJ;DQQjU9_ZgRtMP(s$H%6MWJTG8aPFpyyR z?fxOvq#Be+Cac_OH=(K=&g4iX)k>KJ5dXR{HCLm!9WttRVX4nanmwp$nqsU4g8%Mm z^N3$QXdsgLJZ@2?*9#uc?TeJJMoRrexa()L2qg`7f?CU}K{yqZVBzHX^oTiBlUQKO zofWkeVFDIWQYgsr=pxPrG2Rrlgx%j&{^FJ;TLGpOUgmTa!+%^9&2hlEX%edsi%*L~ zWv9vQHuk99zV`a*?G#Ydjha1fPt3uRDIuvZaAgV+=+#vkCaC5(m}Ls5>E;_!&W4(> zy)lK6m*R!xaQ|HS;T3#IA=^iKZr-UyYkV@W=>Ny$sQbG<;xPU5wo(YxB6?^X=04_-y4;PM(IA>O94F_?ONT59<@4PO^fDl=p8; z=wRmFr2F~}OH%SU9&Abqy93HkICO3vLL=u+O|-o6{J0)tueSGrTaO|+Ya@w3w_2(< zJRsn>zSMAcg+y<3N{C4BMc+OXR=+W4<8eJuvJKzb0KNA#KhZROAVO$OkZQN5CP7KAKf)cgA{rq`7#*Ou3X5W`#T=Qyys$Elyv8XlS z=LDC1O5vtl>5}7*m-Z{^yy-PwKd>OxmW|6m@#ZpgY~><6N7T*R3zu?{FL)S!m6 zgV)kZ;-40XJHGyWMR}fsY5}bMwJ;L8#FV4!fa)=Z*NTURYQ&@UfF^wn)wYZ&iVUmRBTa@lDKso7zn%N@-J3DSpjgp6L?~-ULOXy;eih#L3)A>bulTW|XPoZZvFb^Pwl%fj{eVV}OJyZ9%_M0b{Ml) zg<=*}WCuOu-jeG1staD-lPZ$UO5NJPKx4U=3UL`yM#L18Sznb3(KWG~FYExmWIkhO zpfAIPE4}Oy#BKDg`Mx5I?}iJBA@NF*G??QYe_5{Nq`12est@_R>T;`&QL7x5%Rb%7 z>?t=(lez*kSy>{ElhCt+eHxsVyu;cwJ)ZHV#E!_b-gxc#Ges5b`ZiVzjR`7gFi+bv z`yntO2OcdM&c}!)3}-RIG-W8b@?lNkw#+I&#CA$-oZ}Ukp+=5~-Q6se&)lugBEBId zW_X325*Hg4gmc)X{$*yzZn zt{zgn)vfD{fr8}54?&jc!WHAVx$52H6SsPBJy#ymk>gRM33iPJ_%Vq2XhIllyFNTD z9YP&Er%dZ3>r3aVXOX2CfVaXR;!l^B%nwQcSkTnGvZ{_Be}rDz{cb)gggap34P9u= zXSPp^?!*Av8l_%-72!K|{FEOG?fs>aw}LHKcqTi#r{@;Cv}piJY>X~BZ!T8+V7K|W?jKwowmzuc(IGtu_YBx-(#;Q-tQjBUPg^K|TTts|FO(|NXVpLH^& zDxW273Ja3<wu~8Rmw!w150ZOHb+TiupMkvhu<;#arpCeKY_b}== zM?W^#-;P~m3!&9R%b$UpumXw9ePeJ!*Qqp@`iJH0b{d|EyMp)bNjz+ByJrKzXa|#I z><4UQF)#$U&5hF6g@)9_OlP6~z3*0*74<&1&p}9C^C>#MthELW=Sf>%0uHRK+%ag1taDq)2tR?F+QH={@pRlSpxsSJ zwkQaA1ceCoUKIDcU2unQL{S0@eGqH8Jb0yjLAZwy?Tt1xGMF?;{H^~xn{&+{vY>_S z6Go2faUFM9bqE<5Mn+NK5BBpIQGH#4XAaVN>x=N8Gblnox=R^pX|cq=Mc;o40viu=PR}HSzL5bi9A`OKTPtW}2&s z&YwHK)WUma00zWlSHrX_go^4ZhQ+);N`+`)V}x3Nim&`)FB;^i40vqQsPODTHk*x? zd%%4F?r!r(jg-cDNX*@EY_7UsY9q{F3l$!Xi@VVp z-wIyECcWhuU_OAMp8!;(@}v%?P9m9RvqSs7`>{|L_YzofJcp-yyT_uJj-EG(PdHBC zP;R{Q`P~)To^-M1^l=#1Lp}LUKHf<$40O!Je)kZSj<0Li*?=)3pbd^PyKQJy_H!a@ zy8>%<;etkk?GfTIMtn;DwE5>U+vHp2U(bP=Uczvdr>JP38G`4p_V#e-+*{SDkJ25WbIpguXF+fFwAzrGmj@IeOxss4)R)F5-<3qG@_*CEdkX-PnqI@f1>=SE7@Jxq z?8>{nnqz9)zV7F@tkT%;{I=MiHmU+c8MyN%S;bjFoqS!ulj+V99fPVQ9G$A(KIy*x zlk2ttb#RM1)*CA2E7u3^CWQA{e^}sGXiu^80Qy%tT!d5}6<2KIHsbbWPTo|M z+hvGYN?pM+`_^B$raOQxVFDsvdlwpaZ})BTPC39T9q75Js$^k82VwdCY5zI<$S7>{ zyp#!9L+Aj5C|LxnA+zyQ{S6J@J6*iit?eC?zL=n0wcZ|q)(02wv$jX9&P=_%(t>iQ zLAThsmM>FR+tU>0bwAkvZ`;a=sye{Sz2VY2*d1QbxPitVQ$+shaDN{t;Ma0^9yZaX zCp-4caWOq&u?Yx$*-K6#Ge#>^>+7&A;= zRS)ihB))FE(;5+dXMEr{A933P&F}O0tc~-EVDJ|uJ@!2Vz?yfZZeVGyBg?m5-dxw| zp}gDp4i@4;KB}|gI~>rPfL-gow`&IaA#9j1wT1fP zlWF)(YSD5da2(D_p#55M+6t-y1DfmeOUCC)j@HjZvS0LEN{Mjoo$>r=n^ zoR@$9mZ~uOkW$j{efldr!MckaG=S9|mnlp9JcDh#{EZL?+eKhexX{z}=427xd&M8r zl>v0y!aF6+0ILVQjuuTD4%4+%l6x$c>FX@;diq6OLHLYW*D!iU@}p{|(0BLFmsFAv zRaFaoSWf;DlPmdHyaw6@NKHO~=C7^5<__!+r24LG8Q#S3A#~pw zp66P@=-{*vslGS~%evteWf^N3pD&yI8A4}0m8Z>1E@B@N=mJ*HQKz&46moHw*Yl3o zJ6l?Ri+aR-->(UfgXW8I$S$Fy=j>h#@1>g%Grd~l5=AffTIpN9vDPi;!0D1#vQO#u zBVB5^Ae9>N&FerSlN5=Emx%-qa&L>0jWazn*~YOE9xy&10g_|%ew}YGRd0WQN{9_o zadyVbe!w*g#p6$(86mW8h6mvr_rK+Jx}_IFO;%R)g|&z;Rz$Mu%M4gO-ZTF065yc4 zEH=XEcanHt8{|XG+oEv~eBj`kv1)22{3{!5lhn~(^L+Hrm2P3>rf~9ecj2`O z+)V}EtCKQy3GFg^aG_>c#6~K=^!)DF+}MHfHT5iJ8E72H{Yn@m-d5p@2t<4b{42`RVXfylNAJRUN)xNo>S`lO%W%0m1^pz=``Y43*~x+5*VZ%kE{*XVf;k=7 zDq%R>OT}PNl`P#`JACF&oplE$H&nioYO2RFOBZjbLO_^H3lv3MDU4YR&#M#hV7v7e z1Tr2Cx|3CxeLtbwhJ!_XRT{oJd5XzflQH;58B%}*2b7iwLj-f!iVU9Z-GN3oIAfJ? z?G)oo=X}*DQkD0!Or)2jpZ6AS3!m9m)ixvo|cTk8CzG$sv8ekC?8F2l8HeZdsE&96kMJtNM4ZM zFa14Le4{lhtm2CiL6%!AG8jTZZHc#Zea0k+34L) zx1zK(s^Jk9kT?hXTIuRbT$3KpNj@13AK9pV2b^x5Dc&VPDUPlQj=B!Rl1QvuUhan; zTCl0ZB5n)tUc!pt?IJPw=$g*nbjW^}8wruZXE^XxVPc#C!tzX1 z-x_WgM_`OmyIhYui2SbKj3Nf}vbGXhE>$53lg<_G z!blK5RE==^0z6p#P9SY~aTET<&8^|V`faPE{IL}Oyw3j4mcHJH7BD6z$s0}zpOr*i zZ9`{@Z+Z#Fe|g-PvgHo5$de8?76o${Q=&Z0MQuhStU%# zn+SkLvNl1q?NV%GSa!!tA9sd#igJeiI!o=+mEX2)^q}y^=i2q8?+fIup@wo))+&=@ z;PKTO)~3E~DtD#Jrci-$@OH2ICSKcNw;$)~_3olnP%?}vHhn};V}NeB_mKq_L>%h3 zYPn$ZifeuPrOW_|(-c8;<{ZBqbsINd`1x7zP@Mb6hu#u06$J5Hv>x=A{b%G|w3H>D zC1UHyQK6#$2(yG}a|u69E2@2MtVBYEdT z=3>#EY$O;Y)O zA4wG(yEAry_)0`^LJ;aBHf65+hTGTpe3#+rg8pPLb!pzb*%}b(@i25uJ7}&!yOUF9 z(_DIkAx;q@gmS^#w1BWPz{EaPq`yoQQv4b_YISt;|y~vrE4`YMfZ+S}SVnoS&WjH$h zep|TJ?Ilo#)s0cnF5(tJo9%#tlDe&^T9?~0+~`w}?qs9ieU}9^9_pOZ81vJL@ctyt z?dGsk@-i!alaXV?bM}k7h{9F)f*Q`3y~2x8dVLEq@7Xf(xV{18!AnCIx^yjGNa~!elYR11&-mdwO5LhhFXxF&}yZUtOLehhw zNOV^s_JN1mwJi?|EU~{)I$pE+NajSz3mY*=?I#g=^SX7nE*WI}@<|D;&Uc5yrHuQ5 zm$EAF$pf7wvK6p4&{4a#%&X_rr=2i^6e#Q?p3%8A4m}(rwE`8F^^BaO){Obs6WnGj zUEN5Oz%`S*kBVbUjd|gI6eU1@t>dlaE)O-{x{VIil(aIYuQ16};oDDCs%ZFj_BTOX zo~s`4@hwTqO`sTqxRFJghnKdGL#1w$8tpho9vtH7b1%*l0%{GaCIQS|*>32Z8e<*h zfX@X`#A~SSmBKj>OWsiO>v2oI4i0TXn?ZRf=3NGjP8I4;V4)4}x(ESmy=162xQ#Io zWI?{x9<)fjj4@ukwnLb}gAKXv2T1P^Dw=^B#>nFzMMelzzxbtEmogf%piqT0CjK)Ks6^woJE zA9TT>x;K2}N?LaDFYc9t2tpgV2e~1#Zkrpbv4L($cP{4w-1_GSQkHgx6JB>f`W|DY zZ}t}8==KhYh{$@+7R-HQ!JCYYx;|J9mDxiFwdHa9u{@K>jbB+d!`rUD((ZW2#RkXj z!b^Sq#TTr@KuNcbabK+>G1N6l^4HYNU+7hM+J@qu#GA;79>AZd!>BLOe#Ghvy(Dlg z>D(*TxJ5G=mL;Qm>i$LTY~N5GkAi0To(te5l9CkeOtWx@zA8!nGW>*L1;C_9T$Svy zC=35~dy+2>DiA)e;eWYzg#T@~o<;B$*DgyeR>s4ZG#}xOvWMS*7Gz<$4dM8sPb}Je zCt6G%H~KFvam0DZX)FRh;JBKmFvnYNW{n4z#%UB$b*Mho(Z0zSh`-7^@?&5o=_F=3u%vqi%#$;c1ceExNDuD78!Xw7r%5A zCdh2d*7OVP;UmiVM&{}b9WB%{3VgEis?sFLbCu7xZMuU6u3RC4N@|BodG=ui#4V&h zTCgcs@vxC)`9cv2SBR*;)G^i!bq2)Eg|Sv%-}q#l|D>j@-u!LkrH28*;M)lxf~n1M z&|AW{jD<9t-ODXLdISfGh7vt<`B$jDBA?GmDr^zWfXrC#%@}WqJbF`wwvu^%HKrlj z!zCwB|MGi!347w*7Kjw}(=l(hSOSXNC($}gcA*Ow&?r-`2L{G%-=8`ElwCug04LFN$P zkTlu3+{=%JDjI()2zBVf?dNJr zCdVyBD7@PVuxn8e;>| z>;d^Y)Xp8BGU)~TcvM$l3_(|TOng6U8T%c7ruARQpIGbs7r4KDarEfhT2ip0bpm@p z?xo29{t#Uchs}X?8S(Gyc#qK;8Y90+!@mB7&Euj!tl$3Q`usn%@3^bK%>T(h#b2WS zBgFns_MhYq4}XKYZRT*>%*omK)^GGb#eX{gKds-N{%PnhQ#`K|&vKFK{84|O9SL(> zfcFPIfjYzS>PUIQKj4YN|Ng^Z#DBmO2bLiDJT4#({`>PI4(2>w{V(#r{rxUNyn#o5 ze*rlm`qS^bP#C18A88#!tu{cgx3KNLl9a(l-Ws#QoN_~A&f76~^n|t31w60C)1s4Q z;P>=33d6hI4?@36hsy1zk_5iWeFI=HetLjPCc-zr{-O0`{Zl2pKYjQfsB{Pye)DB@ ziS1+2wfil6(==au@?u8>S$2zr-4b4?0yjX@fN5*4IlQ&k9sXtwef&&e?=jRLE!=X& z4@4iElL~@pgp!kW@9`-EuSQ~urZkHW-)FH{K-;3% z5HDWooPP1@z-3`gQ>M#NNQ(?6)k)kU1eLnt{X%3)JuLc9FAj;G`YBEokJcmn?E%~Z ztM5zVC=Nw)u$w1_NQ>{yED;lJ4*Q-rPl@+6jD76!w{QSJ0ZD)A%LCAk%4(s?XrfzR z0?Mwpv=>&I^+Qb(Srq$c4s2(e!A(f`(_78i#t1-q|lqJ?fzHy2vh-)+R z{%RJy;IphLb8&ja0{cBI0hPCC1F5Y~>{_nzDT!9S-W2BeRqO3jMvgm7tx`$|IoQ`F{aNo9Kr7tNt__whJ6~C;$9ITM6 zZ+dx|N#$j!r%HV6&5`JQs&dVpEu^#UW^=d97zp;L5t3WsogGJRygnSFm zgbK>tsrB+Jm=U=Aao7uLiR-C_&B*tf%UTNdbY%q>#WW53i7OQ*IvgdWnzEE*JE`}7c}sOux+tgEpH?(hLG zWYfT%k3~FN5ZS+q?D6XdMmx*tN>T2QiyiXRp*jUEdB~xfhx_3-2hs*TgPLX^B>btZ zu7McagdKk8JA3&$839rl;+tvuw_ZaHx}&aJX+}+?e)tRgiT_`CN_qtz#r4o^Os$b@yhDQx*q`J5Tqi`)w(2|XS^SZt8$eB#a@=jgK@6! z^S1{TCijrX56P2TWy4BoVw6W;Np{^L)Nha40V&6c2`cC+?G)N{8FKA(c_aJRkYVQg zj}*dZYrpYsTho4jDm*Qo2=w69bdn_z>6<5ETXNCvi7R5RbE0p$CK zl!nGJ_W&?3wt$ftSU4ZR-j~v5F!x3QRo~;o=Ur%VArC2J^DdHqX)%Rkw?6i{9Ii(?;zr4EQ@&8|tfZ}xzTk;0>!~2+TKPYzg z;~#p7)V81p+kmpGnhKD|?^9cC!o6&7a2W>seJiWEqI!*ixBgzPJ^dG~n?to`onm1W z6-fRSl$DNOKY*L{ZL>LIFKQ*-#z(!>E(VQa%#!Wxv40c%ghJl#hdh2OoZM<1Op0#Y zPQI~>#fi!5SFW%+5E1U|yW8-tNZpHQ>q1vOvj1yJV#gW)?pk!XJbCVs{fo#uI*ckS zZgk%QC}cZCA7sQQg6G?juYX0c6L>h-fXMF_tKP0$As-#}bEXc|s{N4~bbW^MG(K|( zMgWrkQ(tC+ytqLA%qK5wOYdmii#WL6$ffK)DWoW)1iu~sW6f3_GG12gSn~m}>0+-d zcTqK@BsP@Y)qKRppK1t|Dy}yuNWEX?c6fOw4RWHCV{OQNWxmo#$QvCJk|s7&()s6@G(Vh0(vswcBjJitT{&YC_w)-dHRD=Z?-BVytJRk-xZl>dSaQ`EGsomI#L%(9z-Bo-f1dGo(@d#I0ZS zm%4BcYj`5xf7O*w@MjpOJcov8WA6Nn$JaoET@mYS=@l@8>}{(y{rDW}>D6Bi{h3i$ zJ8}PkW&p~cV0Y{_RSNs}PW8Ey#H2@-xL>GUZ%&u8ZOGC1nqP;eKW`B-M`(;K1JU3nn??#!rP^CDO1j zkiAP%AO*f`6d(_2i!XgDvHKJDE7g#Z)Hpr>k$UFc(s;jZ=aUY*gh?jUCX~v{hgY22 zCum^4{YVny7w%`~)K;^=+Q)X)tj{R8h(ls--Tp8ydQV2Tc?%}XON(ys z=y6j>gxhmtuhOv84CMKTmYAmGF$M_RPAIu#Z=;8jC}p{CxgE+_JHU5yewEBGtbsb5 zQex<*+h3`clD6W1etrPBOWw40Jlctx)^)o06E^oQ2UQOH#9LzV>Zo(EkrK$`hnBMP z@$m!dt?Xa+@q*~uj-z0aHU?>`#sW8hPP15Y@{JcSO$~BjS6f`>`283BCZ&n7NkgcI z6UXfC-bm~qDfiZ;SZV+-tKY!nvkK(+j<$m0asOay32EH`x|#gBzWy6igP!}TwEB8w zjsAtC0DF^aH}hZT{WGJjsCL4?LE_(_r>g+oIE($C zuwSXRhVJp<2V7Ow4(Q;psJyg3%Wr*yzZ64Y!r^@Kb1QQmgI(!|w7+maGdfZyw(12Q zhEhL{>Kf=)4|;#}W` ziCi$fD{)O1dHg-~Wk}$qO1`wo!lyEPQR7$PFNf@5HLHsHOcN6!8}os#DHA{4{w7U> zr#v51QsvesS5d$qUM4)BDky^Laj?C-3q^$wS6AEn^74&?af zsV@)y9sUw9GzCe2Dt>{a{~_=`N>Me~I+66J=D$e#9|A=Gtp+PaA?Z)eUy<}b1l~s} zYk!RACmru!22jA73Y>i(x3WrIFkN{!22i#^NnjF=}*l+kn}$Ui1XiN zZD>N$pPGLl>3;~kk5b&m0|zAisrdtv{)fQ(C^Q~dpR z_<#A|xb_xFe~P~&>3;|i{h0mA=+*I@K-!qkmCnu51cv{0)H__s#ta{&+V2_Aj5X zkQXO>BMN8aZ-}b9sn-$_>51cv{0)IA1qjccz#Hi}DG5*!~dq)0-FoQo{g1r9XjQs5g4qL9zPybJIe&o)` z-w?IAL_Cle=ADthA@KgKZmo#)#BoOchQKWN8>pYaA_`~ZZ$~wO{&}YQ$oP}-LnQq% z!JeS1)v@BgvuEUQh{~M?q$fTy^e+QvIX-pLi0H{8Qt2P_OK*g!)<|T2aYlZLzysG}l@Vr6 z9B1U0-bWzF$VS%*ETV8meuWlqrS}o=eDt?V$oQk@>qjp_{^{^Osz70jiGx({jQkSO{{3l=EM)xI_$30rGk@nX zGX8A*5`mw~gf%1K&&V$k_!*UN$o%5J6d$~&h~)o_{1V}RJwOfF|9>Su7>2%$?EmEY zCM5k2f%j1g?$C=w(w~uEBJh`>9*%|2N}mC^gaS!D7rF(gg+y{MBqs!Dv{Tx zoRME5@L>K`<{&ys zetVo@$D)7C-w;mEdzd26@6O2I5cuMB73B35XXI}P{C_(>n2uSEuyf-0zZf4ZkgYa2 z;TusnBY!)p`M*Ct=%<~8bpB_2zKg)kd@z5Dgg+yHL*UxjXpq+@o{_&H@c-@jpaTVR zeCCY&?a2NA^Z4L!j}7wksWb97MF0JF-Gx9ft|Nrgr!Tl=)X^8VUah#FA zAuy$G72PMWh{759+fhy6JCZkBNcc1IHw6BFeSC1e8H#lNXXI~3IM`ezDlZcLjQkCO z<3e{tem?mB#|KZWFGT9!V}j0wG~__X8TlK+%>R6RkeA_DbCI)f3gq?CCu_m~>5j<% zHbZp#q`Ci;j_CK{;U?bhKi!e{hkfMtGk&=Ew|(`tf4ZaY5Bo#^Y4OS2zwGY|kp0sg znSa=~KWX(prGMLJg#6PTcYoMNUcdUo#lP%-=X&x_cjW$IADO=&x4)12|1tNLVOgy2 zzc=08tuzACAl)D#AcBB|v~)Lul!SDMbeA9@-QA6JBQ4!h0(-6f?6_vF|NcG4@f^>K z{fe3S%=esOX0Eg5y6;=;|LZS8{}210|A)DM{rP|HFU|75!2i!1G=^tA?hZ<6_cpK( z^@PeW#j=9Q&4L9GD631}X!;fZ`}2j67@6T62_OX2lS^d|Ft;)YP{bR#l zUo60!a)-$`FCnvCGkma1{kv#eM%38sIw`2{j$0*J*{&i}N5PSXXF!nvKa^&pQ6G^& z(QXP3`5;2$M^DSs0I{G9xNGK%zrR>O`=4Jd;OS#Sh&Lc^UCy=1{%aysaHd^l(6^rX zqu4s}zZk7jh2K5ri;B`5PFSxiS?;~+1AT)!8t8TV4Sq*J?l5paZ~!bfoG zrjTYoqe{@H6;+Q*PieZ_8#|s&>W^I7p#5ShIJo|WnBNXoYDDpr!wPa_o53JQWOfU~ zs{O9~OTdW=g{td(`>|Uy$UT^Gd`(Y2piVIudz1D|e&BjGgbi}-BMW_Ylhd8=4{$e* zh8-oPK3h&-YWK-m3Ol0N4kmAq zGqa|kb9ci%kUDO5>o!ft^F%%y1Njz?T4K!;q4X-fhs)cgEzXca0Y8u*7}i5j|-E?eNN7+Y39ukTaYJAo$XMdf2agl;NLq%wns)b;`c`)#v zXX#r>vJK)E-BkpcDB1&`5fyvwxTa{Lz6@7%#F&%T2+ zBiw$f*<#*|zCS}Gd|%PPi2H+irW({M;Q5UHh|^iJXUJq! zIzQI~m2~QC)R%j(?E9z(6vlhVpgxtV34OPaVy0VSzNzWE`2_DuOgqTA7ZVa3&dk^5 zW7RupYb*JY<{BXJ<9CqKSqJRm;HUmxwXF17QJ3Oq$jgqd7&>YfIl&aK(TV!-XnRbb zJgZ#Lz7vJ8gbX=F>bsI!bJ&erep!Q6JCIY=GSdn3k7%8Ea>2!4s%9Ekf4&2G1ErH% z**9MJs03Nju0awn5}k-A5YEe~g@=)Eh;HyFbk!PPmyXav0_0{yyIO(~HJ>P38hvYz z8z@syiy{6gUN6J>CF`G+^xy0PX(P|V#`yOYK)se{gv@X%-J^_h8oL37hv3nRCm>=$_3!8ftqD@g5LHl!D&qxV75sXCBKGr*hP-=?$ zK0wCj1n6T)R^et3RD0uQz_xmgPwMjLv3?b0|AgBMc z&9}|oJjyzIN>;v$WNyf}AeXc(_Q2mhoyvU~ZOOa#*Z0WQ_d$~3diln2|KjH!E_@ew zgiqq0Nq@3<1L{{bBvOeCT}7{9e|^08fgC+3gAC!tYL+Pq@D^x+uJ1p@+YJe#2i>ZJ z`js0vuUeRl>5Vu1Z%e*aoZWOiG=W?|R*?EI`w9jX{*%d63Foe;QfEKNw}!&?eYOmz z3XW6vj{T=}7>PQcf;_7|+Dt3ec5N6J)l@46Pp8SD%@^cFp9T=n9Ne|U8qIV*P1Gol zn+8KBgYesk-%&Ea1V5Lu@yk!wTkR0ga{~1yQ6ILqz2V%XZX)Q1bhS2bl%hmIzUcd! z(_$gFBR?5N^V4fy1$l1z8<2lROC%k0vCFHvHK|50Mg@6nPOw+{(O$T=+}LR!@(b*(9YaV? zeVR_*;Kgonr$Z*dTH`NS&zJ5b31@Z;y&dqLteF5ytjy=>#n|;QC35b6n)aMRm z35d=(fqg6S2vfD*Ct3vy)W7l0o_KoM9Kpwu<$8Yju~TqeAr<5t{=Bc?NAkZc*xeRQ z>)KjUrmsTs=cUEpLR-r)>d%kcC}1Xoc&htDVnO|@HOVV`Jfu4M@FS!P;uUPME1GeTfO`HNwA@ zolG!XBsaik2l&KS}i)0oJjp`u#LP8H3u`cSq_c1V3-K!)tFUy04J(XRP@ znNJUk8W(vTw7>3QkB1KzT~9HOgxQZsVExY4u@mISa5WNEE;ce-EM+tt>+pprkE%o_bpK@E6JQd>xr2b)3o2*e>>zqiImn(IHBYW9;sNw_aQ3DlFBzGg@ z$9iz*REilJ5E^~IfSd}g&!M^Qo`gu(LUCZg>|#!{1Ty}e#Fc**XctrN*Nk&3@K$=S z{A!2PKi|JBCKr)1d@GX1)$%_+K(OB#h2;0A+Nh6uY!!_+EZC(V^!0>@Yy2S1gdYc{ zaZ4IhN`|4Kwkypa{FeHkLh{=QUVJO}%g9a>9!?1!O`Va`n}~Gq{D1s9o#f_cC{l7m z+z_A5dUSYkTnq9brif2hVREL8_U`&}mV$GAI7J8`&uS1$!IsmB+^|gLk-JFYC30-1 z2YDcm!@kqtE5Gxsl2+8(yL zDxN}htqvR(kF-}p)nT? zvL>=Wg~0O*nz2m3)E^uCnk%mDma|b@GgI0Ndy>DX72U7YI#4OQ7l4 zs~b-e;}QvsU&{e`ae~d(y=;x_yo_=&`)`J?HS9rcAXk3asEv5EoTR-lGBSwHDm_#2 z0#d*6DBgs1P8=rkVX!KcnZIt=R7>vz^}d~HcfwifaT7NoI6v^&_0zAn2S83MH$c^! znHtMnqtDWcgX_9L@e~f^lnU8vtA^RkFH0;hZ!s%mU*p3;?tdKS&R-LFMvxK@{rj0j z##}uI7>7W;p&82SNbJz_3uNum-l1W%X+OO+klT09R~wHP=K`>4?Fn}Iwb z7cNM%+h7>BC%p7)9bCAdz_S97cPTUL(qwALW~de?(UsceEnt6^1G#1=GOi87)}*Az zd*Q+BDL2v99!P%pR?Fb!Ovr)zqeG-`*8^$InBxMWpkDg|Ugr;f|H9>PXvL!B?q5Fz zm*WER0N(D8p+m&9WGFwiZ(2(AspwrG{Q>Eo(O#Nzfxf_b1XSU@Zsmdne@Ok5?2*(x z=yOfUDq-{22kz58WeEOI0_{J!#b?sDi9fWmRv+Pfa%niQ>c9qaxJJ?PPZ1@8YDCouH%mzV zxcYuZUPRy*A$dsS@h=$tok6Y5K~OKnpO{N!gUn}aSfo^roKRI578nomYxN#Yp3mG> zts8E=xtLw4iqhv0|JL{m(X+_X_k`rpq}ZqkH%#M;T|H2b;Y0myMVT6b(&R1Tl}v_X zn-&XXd_O+pn(zqkaJ zjzvYhHzn#?4l&vj;omV{8Gw3RD|kgtjDm;{+GkkI-HkOXpJE{6%hmB0ro0q9mXV`g zX@qK^_*_&TB>%xfU+}mU|5E%L8IM9efs_joHVTrTycUjlMM~`)E&sgaQgxj16^vAt zC3yUH=U7}y&iL;*BSogf1v}r>Mh=oe-s+Y_;<}a0*1;$Yb3=n@Jg5I44)O<<3+%7Q zSq6;9hYBqbK{70)!Ve(7Pbu8rbd&c=FLGV9R?k;mPM*03IRc{gS8S6;ISsg1@#B6` z@@{^t84i;Fu&XhUR|FetZxo&JY@cdDPEGD?;6(4$u?~V>ESp`!NVLU;8JhtzC ziRTU5JdEPLqu7j#6~YL)KEim@zsWBiJEhu%2agD?{wR;`g7|m3JUqk5$bj)uFLy5G z&i4%?-fd`L>6lGvA~Cy`fraw zzUJKnJLCq>_O$1g;9dR!qIng)GRQA&pV)Bbh>mI0xZ0{e>HJNCvK$C<)xUn>XW<<) zb&X@r+_}ef&{FD09*`H>5!EXdzN4tfsAfeafVo8FNr2Qhy_TAIFk6&~)2>lVIMwPU z=EQU4pg!!RQqTEhwgO?|qOBC(K>_tJ56hTTQyIV%C8;i z>_hdd4eB(={mXf@bkRe*gN}_}M=h$A;IPJI0Eus@Xh}y~0kagN5y(QQeJ^*f*q=f2 ze>1PeL^bTS%D~^OUbef}Zkp4l^_^t8QA>oeM#*=2WVb4{==Y`SW`OPv&s=c(V zo6Iig&VU;zCtG2M)_?Auyf_D*r~ZklbF)$P)kk$7HK;+ow~`Y%f?KqBW=BY4ow}_` zpSE=q$l1dOW9f=zpK``@f74su-7$O@VGi=gs%5Wv2Qe&Q<2=_z7_!tXvm7Dy-KFV- zSD5WD`>3-5t*c{t30RIUR#0z0I1Q8RNeH{jLAk}3FY$v~{1$S3TqtWqLS38LBA@Q* zsbPoK+;p2(f%;7esfi)9--77L;hD`%Ug><(C+8ry?q_n2obL&a{z8lGCgk12a^-#q zazPsZZQ2BEf>L|ycP)|}dQ8ISko@hfEE_i-59u?GQ0?J08Alj7h7l7`|GXxHCBBGh zoUW6;sQ0Xp=*cPr7s%Ig=o$^yoZjXnT5Q-`-gq_k=tKMuPJo~K9STtqg&rrv(2uXW zKk{#pK>g!9c%E7zwz(xsgm<@YK9`3vBE2BTt5w32J+`KoRF1eMp2kl_HdpBz$dvqgJH}Le80mfl z8NUU^)mkN&b)*4-uDIftUo>%0y&&;v?cNP}sA;G8Z62><-$nx_;yZW9_+45Ga*ukB z7=J zGVc77_zuYF_vI1R-ly$xQ+MIN?L)=ZQJb3s`3ExPhT)$*ieZYL_Db8Y55!H_NMWKK1@OU_e39RYtuzutlG81dDbIq{1i6)B~bt2;Sp-(j!@uNwV(4**Czxy zB?1c|_l*%qrn%I%#BY+*#bGLIJ@`PL8`{k; zXa{qSb!J0@|WJ7zR|C&>3v-G1KEavEyY{0f}oyXj@sxQE=o-<3BVz4cT< zRk*!V4=h1Zst9v!p#H1YtU=3fn2&sw2_J&sMl`9K`b$6__0Dfl5R;f|HXf&iD!+7x z7!$?@NjKbK5%+dT}TzU*ueowA`o{pm|p;otpe8%{R<-QbGA7f@_HS3ho z*R4o+Nc~up%!I5QbX0KCxk_V0vmcs0PX_Vta|_4LO>E?0a3jPoOO1OHtk3Eo*H8c4 z`CTb70pDSV9ETrkG{L^!7{veIrEHrQP`zR9<+4;VEJgHMIrUz6+0u>6fUcDbLW z=Y4pCudO>Wp!pB%juPAFmrM2PH9BwWSX%e26vn~-Z(++6n*+s*SvT^J_u5URTK%Vc zNPIYB#EcOXz9Yk3-QC1UN3OkK+kwOv2ZYacjpm3`a5wp$lsQ(|R?Kd^p#5RLD~)~Q z)vgRIB+kH#*RGLA`kWx|n)JVaTRFt{{&|R_uYD zo+V~e%Y*#bER%nmp}L+|#G`K3pyE%|%~?0dkM{OotrI3WFxbY4FR82ju3BkX1^L(G z4~|bf){ivP=3;9){kfy>A|Uy3$F-a%o$&`E+VZq65?0apq{t=+48Ge%#677%EKTxFgKrn?{u}rjkOinsw$0vtGgd9GMv-N zEg-K`%8uGqOEJ`E?hIZ$iZq<1*-HVr5f!h{lw{D-#GS$h(vV9T`OdE_kTXqW7;Hc3 zo3&A=G>Iu0Ji862+3(nPzAF0{UcyKL^#q`lvfIT)%)^P#0o*olJFr?STk z8X3U_>+J_(zu;v4xbbnd8-ex}E+!{XwAvD{g-H+NkB=5!9JxUJ^IS;}gO{H2?jBJ& zttb4l{LkFvccA|1R2tGr|4+o{M5a07&LKLa*UAw8ntG_IHsFfuGUI4=;Ll@*c)yVMHkAJw>eP+trOH}sU`D;1`$Rpp19I;@Ef2gx*s&_b& zD;$#)7z6no9jV5?@;e19|FZA)#Gl`NhR2l!x#qo9Uxm{BcQKcH`~cLjW+fk7NPU}H zCX!MnTl7SD^E99E@{jAc313)Hzp67a@$AFa1dFXt(v539>TvD|4fxoij zjUf58YI1J{r!l{b^&fH>6Wx5%{Rxj#P_KOdT9Mw1ZQM(0Q*McLjp<1NvMR{aLXsvK z?IuQtjSS%;rrrcTM2JH2C%kWxfAU4#v@920;THMW9vOL*LgLd~k(EUV`s}qp`>44#&9cndKSeCCQWs`E)eqxpK&_e6}6=${po3E{uFZ8nR&00r6 zy_tAo;ZS{$01OSC!Pc6T+Wek5Wc;V^gri1Gk5o`oK1qHu>%CJhU=H!W8%HsmTxb2G zB2#D;V`P)Co-|MVK>LZ7`H5&nRHbu%8j3%t`M=$Hliz~;|797HEB|v}*|Ne=c!oY$ zrLwfuV)}7u<(D$;d(I>f|NBo3xUMeIUnZ{ohkuYqCc@!%P%$c(KK^Is8RYo__y6az zPBMYTRX(Xxy?;y`y0CrO*fisx)DMRK|J?8Yw64qibFv|Ie&=a2KT}TNOXSNl2iDL5 zB1r$}|D^qUyTKrQ$3>5zR?_Gx35s!nOEpzR!80+|U;jx1GLvBqD!0^6r^Ad6vbLg} z#sPa&|>m8d1d7)w1&&dC$Ora=qVl#bC0_;A!>$ocOBqf78TQ ziYYi_ft}~1`@5og^B-z^zqR6GfdBY^UWVm^KY0@#;>WZWQ5|cI z29q!Ij%|+~ruaK6-7Xf+Aui1QvbMj9>`f{UF}2*jrHhhg&G*VTaAs9>%o3EByvv8h zpM57htatOJb%mdBmDBYIOE0vKAdzNy)m$t29qdvfc9s~1Kjb>)wYa-kSAToO%jbeL zidY*v{{`i`F|oF8T4K-$cZFd(w3mI1(L>^7ljAUN7Lgc}W5w<4a03oo=xxg{Hd6li zzrGecyQA20ACo%B!4i3!_PyzlPM$Pon!Qi4S?cETGvCE)-&&83-sRSY4MW72GTp(i zF`Ztp`+n>iX1}7ySiJGnsd4fn4Pc{v7Jq=yAzF`S8DZ&P(o?r1wfh<&O=IagdJk@< z7^hThVdW#d_Qsvs7dRhOW7(-+Z<|anvM$+v5op#q8!WbD>tsX!pfuSY+R?Ecn9TM0Lr3QTc7RE5QmCHI;xMd_w$$cP!n}n$) zP;%A!VZMZN=lkS6>C4R5;NfA^@+Cs|KOZS*|5|upP`ewZHA;eOdwoC9-LWw^#QZpK>(C~zn-71Hy-a5uf?H@|HL`uV zjeyVe?yj`?1%?WZNM7?7NMg0PZ0WAlcRn2-No0$olmz>_KJE=@|82pbeJ1>*k==3O zqwFjx98Y|E$n&A!IA3#2%;K{yyZ0MWHfE2y#~BgS|9X*j--^&vU?kT2>3~OzlA#jC z3giP%f&_U#ru6KWiI`T92e0>pi9()F30j#}awPviHu`6SLZ%t8v(ulbLA|Fr^{60y zw?D#`2i~U(jL0Qvxu+m!kceDjDci%=TX5H&^ESkC=_8#6`R0h?pFwJ#OdpO$iLJE5 z`t`m>8j!!3t)g5lVh$oa$sBlt+NAlwJ#`6kLn#`?Zv3ZY%)99WD8WKS$&EUY!V{5z zm@?c;#s0{%3Jn|MUbbP%7}9u1A4`ymrJQpUa%`Eax1ftY`$pIZ+V4|x_?$x?lAGt# zIbVOF$trS&2D$&JUdh29-yDkhJTgo6JSpR|mU{6O)U$<~z3{&3lG2_rfaNHSBuO7` zECabr^U=OPlL~?1RXHm5yO1=-4}K8;CFi4*LZ`865{_@1mOjtw^2?AY7t~`2HZ8^> zy43Pc>DUREtly_{M8SdlRjab{do&lCLaS)fyB|@Vf%rF=!VDQ3LNTHH*%y8`V` z$)=Et&8;Wy610}{;O{eN@*^RGoa>7F%dcBh+2Wr+A151Y$Tg(E z`yO=A`khL@$dw%AsX>lN`G)g^r7lrDP>yEUAeksP9suA&APnFRZ#9Key@TVcC;T&jh&vezSuf#h1%Q(cXSP zZNJp{JB~V#(}!QorM!QiF@beriqLE$x7tGsY22Kc((;v35-reEC zxetVc`b*^B!wNQa<2|SBgu!{#g>ipjb5Cx9H`Of2BW5S%)xV|hyiXie zuy{V3!z?G)19EEVT*YAKVa-Z4uB z`H*=*TIgORt&|EKwnUGhv{WG*Ge4%}OEHdQ&!OgRJx)!(y zc%j{!l-(!2+Rip~2JKf-bCr*7vveSBJ!9i9+#YtO#)0^6JlrUScPI5_WHisoOgBte ziPc(#K)v%CySY{%TUal~^q(vA&GQX;ZHRyBrmY+p&cY{KH&evvA~g5k(szQy*IWNB z_%aXsopvFZVa^m3SDkrd$cyRR6Xzd$HS%;XuwR=lv>j!_mSot0$6t83WO%;5L{0iS zVW!IsUXW3;6cQg?eMU1hzat2@@R@o#dyy&BM>=PM`Z)AapCyY<2M2~tC&d849IM)2 z*dUkic>!bP(G&OC;&kiRtPR)c{iqtqMY?NHwwAD4t$(^(k)aFF?v)eXfE@8;Cb%?g zO<~c-Z;ouJe7^EqL_5g)+6+H+*ZxVL)}7=CE#T;1Azx_*`SC>1v4a#V#^(D5&a8ui zo)^B~s6f8FOfHb<)hMxwkJDP}h4@$1wtj8oO>6Q1jw8bMWAl4!8Q(kb__3QWs}s$hd8t_*(~cHUajq!5 zgVZPVGhDi}bXeES2r68s-g+fHiFP%hzBq#jpO@C&8fj`Rk=5$nq-x=-j)MK5ZTGc(L#|En0ihae%~+)z#S;5ldO+%TQ6zIXFQK>PL4gpdLNJHd3*F zqINZp$ys}1^Tqpj$&mPL9D`-NE7qSu+-;ljTvWB#!8&dZ)CUh^oEx9fZztrAU+cZ* z5m^m1^9Q*omNA`~NG?pRjw-%)<2FfXA%ZW+8@!Ng%aNK^7`c*$ zUTbhXBdxoAQwQWd*X8iSbU`6gRE2snQa!xppGRy!p88k*5=s1tWRs6{Pc3I{iu7m* zGV#kaFw7=hWGY_u3-+VgBf8&0Qss~r%Wm)*=uP#JfuZBAhaS%gInH+)i9!4RESX&s zT6|9YVm$Z_w#ulB&P~Q3FNvvfQS;X*y{wGNUwk$5RLva@;=dB?Zqsy11`O`Qv#uWC z1CN8si6QrVWO6w{yPgy%6lpnY6C`a6f2SUm(2)7O!%O1vR{C1D^> ze_@>-Z?ZIVpfi0;G-14v+5W)?f#B=&BrbYG2IKVd=bY;8qKmcUX^4$fV}FO!f%*#T9-L~2|y?HP@ zM#k%o_x%&`)1#Mx$>cZ4$3HTt**5pdq_kc%B`cSA*DI%*gB(o*-lMHiL+?9Tg>4RZ z`h*TM>>9}Z!{D`;IrWF|F}Y><(YRBz`SLPAuBbYj5{uX?HD6{}jMQJU^DIX!0pyF} zsW!MS;^lPCksCc#WqrvVXYKXE{eG@~9_Wjp9koWcuCL9m$T))|B+a7X#H0r|89m|)$2p)=wKJg4&V`rp*2laMGXU@#+ zI_0D>a35DRHMb^VjrKq;$@=}JDNl+)uukmt5UoonP!=bB&*XmG|L zE!boYL-PBQ5`H;5OsUsGx7A3aC1=;K4@DvI-IVo~GB}h*=8wefs-I$M{${fRq<$&y z4njj@7%4IOtV1I0S7k()DqBn+X!%m8I_X5YO&S`{C7dY_3OSl$R}~K|S?tw`%<>n36w2g-R5caM5qv zNg(~X4s@3`Q?|xJ4*^vMc%OVsoX9^OpuWvsOZi~DuOl9%s6CRqCb2O0r~A!DuKgH_4Oa(h}T03itlO&A|Uyx zfpW0@{Zwe!i1J+mwubh|O$4abQ$D6uHmnqY@V&G zd$0&RPYA6LC#;3&hl+pe`%B^QdAC}ve}-qD=+%D(@eh)s2^;M+!ui$SwvN;TNW(?n zKSBC8*gjv$@|`bPiz*O4{E!K5onvIu1dqRhDN8!p@THS~KpqDLmkhZ@h% z&Wro42Q~&Kjk=bc6RQ|Mg8UD6n9a~-Y6U*m^Z?#kUGaQ8<2A^AQ(Vd6yR(JA!MwNk zlaB~KDt46uIiu&ma2i|}-{-k7G!67egP+R?&p<9?bl9fwQSJvf+2MeI#DYf{3%dY$1ePCXtvcKr%t!{+Ji*yU z3bG+4{%=wD;!X*i#!6+Y=~2`<5NA zmUD47LW|2D6`U|r;iw7yqy%|#L6iO}zS`>iwH3Xr5iCnb!mKLD)4tvJlQCAn%yP%& zKQ@(&wUE@L2RW+(IhyLZwz-EL@>m^~hTw5a1?0szi$4EBIO7z~HVLh6dkHp2v5|Qi zsLx|u8T7F9`Q&Vs(j_vx>*GCH?F#ZcANWGjFpm5xEFSsZk0xDITGWvG*4#{K3SBjK zDg75ajdRv(=W}i+T2PO-?!3T6`=pI&OC#}Xf95YoG1z&KQ{f^g3m?p~J6`i1)|a-p z*50{7j*s(&D3bQuCxqlcqLve@tQLKN4TyhV^FZ#$sgk-c)IWDYc~$$$;(3Q8Xg~F{ z6=~4#C+$X2V?QkQXr3N7V?gW=aPODF4;2n)Pa0R*p1lnbo1%q0A1T_>zB=S6zakh+ ze&GB{JT+*%mIm$DA8jt-RgtbtRV_aj)1zi48BhHTa@g`v(Z`}cC5&+n14_r8G{V*P zzk~eYD^3nud2f-VnM-KH_Gt(v4>~0OI1WVX-`#&rTWuQW%RQQt^sOdU5Y+Q^e~gxs zRnnhFXD+orq8uV-DtZm_S=v~gbwfNthO0+w-V#jv;kS8lARj4T3vgLP>SE?J!TdFs z)oK^z6an%vk@+h&kv@ZEiS@8L^On1c^xIC5tN!&x9%5Vegf@xDwBxp&T0h1z`9Z#p z!+fXp$M4&mJ^kJsYM#rk>c~1 zybpQ$!7;j8hm!uYgxq={Z@|&78Mw9E_g)M0I^StSgRNYH@Ry;4v)yzzu85R7BkOw@ zIHtOf{6Re>zCh_N-7wDk$>uSu18>}nC{8($JH{S3wJ?Y>M87|jFSKs#%oN9h^jAop zIHQNL4c;hbNVem$a}u%k=R+p;kS=dOMz{~7zd78|VM!}$}3(X-(u(Eexw+WYMWWQS17Mt)itb#+rsUx@!zKg79l zAsxgvX*>Pvx@rIZRUqjJ>c6cxo5aq#5XkXYBb}HO$N!lx=mz=RV>sD*Wg+_(11X=V zwE;s;2QyTV%P^2SVI_vp>1&X89cseANBTxX26Db$o!R5*i$__EaA?j?htS&=t|0x7 zz`Ffc;wkk5?Y{531D?yp*&A0u`fn%Y5k%43-(rV%LWwZ@M-`n4+eMxh5%LGJUj%26`(K~m z|DKb5GJ>Mn>#yYHPoEiwTnBjkXqfcNwPOvpP(4d2_-wc+qe53m{WtTVpF_wQ7$77fy20>MMNfNwxDgZY%J(PPaOgE#av%VwD#Xe{%XJHoW3a08u)}(1#)v8LxwLr7UEvLj`LCx9rQ?v}oFIKE1%HS&H;J0L&rH(kfCeKU-K%QoM z`xM@bq3%7?6C4EQ?(ZVMd!;}gh4EM=|KPQkv1fnz3XyS)ydHWw$k&cy4%h!^!5O=v#%GSu9LIyqOGV}R*bs7kH{(=@xjV)x-K*AXg3-{DaZ~sp_0g{Rk&MoC z@CqC)TQ7TRgu{{h7$m-&M$Uw)a&BiRS*6YGZ%9kX@=|_C zDr;{J(q4m!%cVKIpv!*Nz+u)TK69GL$59M9|A}Ciq1p+o_m?Dh6vT%TJVC>=NuYf} z_*Xl0f~tjK^a*jLkLDHJZ3!Ux*B2_cfww5XJ#uI+5%Tu?n7*>+L9VY}s9QkR^dec9 z%&n}`5z~*yu+ep(eM`m~s}PgVy5nk<6!zO1!l_^R>OtQ3u+5!daX7gt{PfNHZj>4v zsk3vCvospnkxL;7*>Dd8k()T~)Vv6X137=|f=IwldobzI$hs)}+yLyCvkj2v7B1Gk zgDKd2*s%~gwDwKqN_Y*iKNmdkdG>Sr=58P-URe2>o8bp!c2G|^Ug%8Ca3}N5k89Q{ zo?D7OXmAzeZxu%C=Qn<0;BtyPNu?DQ=X7g9>gO``7v1gW64-uRnyup$V>jPT645}t zU23iWdu;QJ#Zbm4mu131{LJXyAm_DuA>mQgA6sF)p>S;+mt%hZQ5@vh+_J4T+J~kV zN33-4ksHdEf7&4Z!Qg65OxBS-cLvvk_bFScdE_*Pkow!)d`xh@cY@j?vY35GIqAXg zoa_Z?pL~+LH}$e3@nI)gPaAXd-caiVlK*AFc)1Lo|DuiLaE`~s&DuAA7K;Ju(Q^pm z_@7`dQK#uA_-|7nttIe6{69a-?FB;L5P$DeHq4PW+s`34sb4^S4c9)dsAY(8AnbVZ zsQ+So1Dp&V$nhf1|B!!ot4wSw`KxkhqH zCkf(5gPzhn1_^V1iI%fReId#Yt)Tu=MAs+(R5)FKLvlGW>puTUx$0w(f98BXBZ8aF zIwnxgEzodt1kYY~1M)*hg*jF>M!H8zbH};*#tJpK$;}{7_>C0YA-QC}bf5b+hA@ef zOpKTY7Xw0aI#8;$tIU_6UGVJN}>}L$tc7%+=GmAOTX$ymLqo9b9oFS6Qhb`zBtIU^iTcY6JL>y4IfXxPSIVH7iCMT`yi3T z$%ii+VkD2>r7bd1{hjBZ8gTvZiLWp*!6;7Gbe2!C*@*=5i58td!2kY=7)A&Gsb+F? zENKGV|DVf$PkcpKf)VF83yno8@jf3(BQ>Vss4v)C+frzznri#Q^%nh~8gTvZiLVH} z#Tf=yrOrC2dL4u?E+LG3ScWIACkof4NZtM!c{||#|6Kli;w$2t_kUy^zn&}Q9U$OJ z(tOaE(ID)&&GD~NI$Bym^fdQWNESi4&|?juNpxk zt~1Z85Xywg-18}twtw6X-p!6~9=HgjAaCc4?`66e3({sEtPKp;S{uSY{lwYtu4a2L zQtkh|xNBYM)VThCPJGq41Hh=e4WXrw$QIHd*Tr>JCeO&Ln3TlOM`OK^tTb~7TuLRe|UlLg* zbb)$mKh{q_PRfQu607hpiviS^wbxOZX+QKLT54o+4j_%=7h$cj*`C=+MKK2Dc(p9A zDf?S_LHlTXL3(@}LWgr|{(M=hlX^3a32cJ%J=Z~G0i z1a9CFewwx9y#~2TarW1VSpxr2)ezmvvQl-oTV_d+o4h+@OHU=fw5e6EK7EAc(MxCs z2`qwcbn4w<)dhkt-ABAvERM30FCc~8^wj}J^^1qv(U@iK0iEVBEp1N<(7poPWv&0l zo#!TYxZL1EqIM2S7{nP#6*;gW^;jAWB@!KCZE4-^y{tC?^|XW$-6gw>!TsfQ5y~6B z{V*kMkiu3J_W%VyASUrpC~Jbxgqy%7i1h=gr#zr(Ghq&CyLfl`TS}&8F61Exl9>JC z2`^1K$+aI&Obo{Pm{D|UIIaQe^)B^=u>bl@;+O1eTL#OypX+;Qki-&Y`R2G}^Hu*Z zH{2>YWOC#-)m9Nu?-bFSX|5XMc$>BFg8Al7Oo|WJ59E(sQ79?qQQ0r$84Y6Cv?K(= z(w>3*(U(3~PeBy=V0C}#^P3tEgr)IHkgLf(Hyro4vR*YyQke}}WKfw~GzWQfe2+Z4 z?KK|e&CE%gU%Ws3+$f~+q)lroT$%eK_gG`YkwyP!o2Kp`NMZWTrI<-5*?-fN4SeO@P! zw@YAT?b@l;-PC(MbTnbdb>&K>fgAz0!tU?~l{VQZiFiLLZBw90*#pS)L(8;^6IKpH z6mPhmynX-n=01M_t6aT%?};g88glAd{% zlq_G6$Eegtjv|Nj!yWY9It}uC{Lnw^1#%uCd3G&|f#)I7zQ3_`6T?RO*=#^AA<41F z{?Q~wwmvtmw2aVjh<+WC7{}IPb$t`P5IxYSxOYU@mj3YU4I-#75YPT9D3;s6@@=3w zbKiEyd3oj$$T^?6nJP>Nn(@{A(hVWFI!7)G7HkJz63D zyJJ;&|J%e$6!Rv{#hQikJqC;;ENK5-sixW`4bAf_Q;f{vaQ!Jdv)-o=?)mUFj4dJX zvHs+ldOg}FEyVIUkYj5fwRI1ciHG@i4aBR=m?6P7KngcZO?pQT`KDv-)(69oFADFP zr>hJ=y{>sGp+>jMD?37ikDX=el|PWtia|awGXJ)wdc;>F-zUTkoel%uv>VbG=vS-1 zC#DZ>eQlU<5;26H4)@~}a(!MR=&E=+wS;iT)b}^=4_HE!>xaj#A^z2dJ6pw(|80Cp2>h&1P_}kP#kV6+uk*`6XY(vM zkmBx*&ynJ+zLV-zBgk1YKG6H8&Rk-iSiy7QmDXb8FqVMajmYJIm|^$!R-93G207ma zF^>;YEZE{OVH+c9DO%ep+D4zW=FhRd=?3+kokanZg{LIX7RE zl&|w@XYF<<+u8hOmSJ)Lq_88(=%%8^|C715NBlZc2D$lU!MqyO=eFczu=?IpR%;^Y zXu>|r_MzW|y3TQ~)mFerT>|Cu0GGej@I@B6OjKkjh|X8 zxJX!TsPKB)60Y-V$Kx@`8S+NVnD~z>*RhPU4!`EsD_+##fc$Y~s04}&{=xgzD*{!;hs_z#;yzA9}hxBYqU*`4W{;Jd;DB*U!Zx^L5l`kURh0H3-_j zY|o8X-n0zKf}c81KUz+qZ~o*4^6{o#ravWOg^CTeI;orJ2lo10WFS{EwRbJLuk{tJ z&}PI>+PasS!XyIu2mY{HJ>;gM2@Mk5=^r@K_-iY-AkX3Ozh8C=(s^7Wo*q};S2I8F z47q;@TKkg9uA|FWGmn^miz$rDM^8|JdSeZhbCm{bLhoLXe zyn8Wo`n%FT{8;->GnyL6+w`KJG5>JcC3&UmX@%o$ZOeTN8DHD`-&J&!7`~UTgyv&? z#LS;Og`^iSc!k&+)s0qde~kT(#)|LYY_CHg`joy;{3Vz~BY}I@>@SKtcVzY{An}Xy z{TVlvV_5opq`VmhZn_jpoEfAUAvV)7Qf6%0`81aGq_1~n$+m)?5Ip~3{+1RvSF3TV z&WZll(c-m;wlYm1kL#nkMc>lod%88M8!q>AaPW~r3&=T@uQDbpQj`1`ZMQfAzENxI zCIo;SNs!+y$9DC!2xB8L0cYbL*Sk9xof6*b#F)&EA?TSnFKZ0)|dJAvTt4hfdv?!gIeK|^qNcXxM!yL-^!65JgU zB)9~<`JZvmKHYn~XYV`4OXyPC9|zPM&+&Zo{Ly>Jq-b~OCGYwjFze!}2J zaU1h^2INmmu1#GM=nxMKFSvx4`1ZpsX`uE8sgrcnP9dYd;Y=LzSka{G`0L^YP+!JM zf=9FENAQ~|EFMN9!&xQI734n{>FwN@x@nyl;C8tMuT5}~z9@pmA5VTV-D&ChHpb-; z@%i7$eh`%_f%-3I7FJ5vp#Xd&Z|t?IJ^OO&??fQ`&5K(THG-)*(9mTvlbmaf2}2(9 z!2RoP#@p=^J=8D z8R3zqMSlbGbj_ZVKs)E2ze{196naNN3I&^w138<~t$cayQ#Sg>8*(Z>$@|ct156;l zRnbwV<+v78#FRIG$Jndc{5$9t$Xg3irWYzbD)3%U!xbFFy+T<6#~<*;NSdmX2N^wL zObEu$AX1*rH)DbNdKG8r6-RGDvhAVpHyRwn0I%P~nk=k*Pf5tT#^4cj{gP);k7bnZ(T*#X^rD`_qV@hf2mOBJtn~Qhu>Ec*_Wj91k_6-*Nr<0@?v6S}6)e@ z!0Ql@|7>8>t1_mU9#xY`LL4aS9rJ;Z1RAJM3yE1r;F)#!fl^?*P@8A$zn=l>-{61l zlC|)JW$2l@X*46z$JFR}aDsfi4)l7l7^P~~KS_7}=f@Jxs0x}-BL z-VfM*yDHWx1^HJmj$z1^JrK$*k5Xs25Pui9EuRJXSDzii#Fgi`y8`#7-_T6=mX2XP zg8Y+&9EAaeINyTwaGs(+0>mxu0gs^it1TaSLs|S?7VWX}enr1VK0$~8wg10AIR>z? z*Zg+QomYT9x4qbUMFEOGxK0VPL$;5PDZy!sdvm7*N-cdN1|HuTn%c0lmgK&JnCPB2 zZ2P!n6dmaLXT-G(Ur8RkT>0u;7roeH62v@63Di5=5Yl!vWZe)Fa^EUaJtR2)Ne9Ij zx~E)RozS!tyhJB(PL9pBZ_>iWfO;3A5=it0OPt5O!5KPg{OnwbIs_ocdZ9Rk~vrCx~ zlOm{p3ia>NqpGa#QxtYa>f-N)wKNh0#qT^)!*&rH8MrNBY1uT&zEmy}$b$OE)wZle zJvs{W^hyE`k9N$xXJUFQ3`M?Udrxlr1 zI0k?D$qe-1d*17l!!@1;pNX(&X)3#2kA653AMp6D6p%`s_=fh{#V8L)JLjp7E22UB zC(L9=o=d7%9VB8T{NpVrce{lHX#7UTO_|HcE@S#TS=R1V{$^3n0{Fpe;TOx%>CKL5@WcsK`QN(A+TO;nq#r&T_PpA(<1PX9uaG>2 z&Q%WvFc{nn;&RKcOcU?@e1Y~4`u8nVqJ^B2qi^^5VnlyUA-sU%fAfJyou8VK%4{F@ zTIi!yLb|sOgn{~tFE2V8MWr9kGH`78@9B%S(KZEu{K<@-B&SHekDmhLJLX;wB77G9 zI*>cV;6KZg_4X(*ejTzXdXbb!n%@NS>_9Ir9jdgiJZg$^NscYoDE9=_s@+c8S`1~{l$P7wfj>A23{iWEfbL6 z6&w!4Hz|c|OtUe;;hj?0%ld8rIp)~11jN^?2rmoatO!-i0X?czO&~8+UwYBXfE+?{1Y0iBG%8E&s(Z7LiFM>f zZ3M^_Wvc6S)Sj)pV%%x|Bs=yPeqOr+@(Jkxj;gQbvEeE&@i;U)2<>gd7C_EZ&HYne z@01%_pMxkNaOqoE--{8DZ)NM>CwPqa>Al1$c6Hg0V<`~s0(oDifw#*yB=h7}3jH3z z>Eh&OuWBG)Y_ye7%GP?gIEtsSre)XvnY9M;ud9ny>SbU_|EzaHE9SDh-goYHj{)^} zRpk{1@6Uo;ym7?en6#Hif_~`Fh#}@p{MUi&Vl^QPs7-mMXrNi zZn-JX@K=RF@8vfjHyNd46~k4((6KpU!O3gtnCW3d1@gd$5G3g3OB45dIfhc1#dcJL z0Z{+vv)aDE;tOrZbj3fB-87|e+z*Eg)O*b_^Wk*HPeVC}#aRurdB$T_PXYNDSFeA5 zY_aLdFw%I0!n&5S}YC@*Coi~I?c5_PfaAp5$le&;k%f*UjxxpC;YgGVKla5x0=bQ0ts}}g&G{;=e@-4^qL^!|GwAw|8vf1B z9o`pTZ&V9gB44-@Zb&2p)K?Gt80v|NG~b|9{nRH8_W!_GY5?ShB%V`?^Hug(f#LpW zJD(dz*qpn7ylA>8NpCe+4izHmiK8lI)Z+KDEs(#|RkX@0kPSB}ro3w(px8&asQC=! z3S9W<@T5s+yS8pf0osUR0jtNkK)&odLr`Y3i)Z8Xg#E^d{9PbtT?~+y|3OQ=4HPfA zKN<$gI=Sy&e;0S+hdJ47qWv0lV4Ur{s~ELk%O)CQtw*L_0Ue0H3waVmJU##%v_SX zwD@L^LaFbN==IqPFUAJQ|Fd%IkW|dqIP0#C6HxnweD)TX7c{=bpDSnT;J-$eF!1Kd zkPMHenC9#Q+D}dNln_zxL~L(e=qZjmSR3-NR0eX+_40>E-`y_2eEThhow*;#64isrNM?x0_3#42-bugtK~Zhk*~I6^Jl9@TR`n^%MY4hyH~IH z5lr>tw#sv%b!vZ_1NAE!WY>KwrenT7r?ciNDSWe;vjv#w**&~49?^*S zk3jp5Gfoe8Mn5aCkBkPtHYT37+Nd7@`GLAFKXTC7;mS{90XSnXYqRXPus|N)+epKT z(D^zvH*~XE8L#=E4#OD8hs)P`Ze_)t(iZGw-s+M`_q3{l+7EGC1?6~u+fF;L$Ui4| z*#^S7L*YO@BU+q&&%D|yU*Z7ijSrpWJ+@8w>SfG3vMPlKB)cI)xYVNDa6|qW@9ZpC98mFSL5OV>MwqiM`?Al2UCVy1_q-V z$jqIyp#V7#@wA)fKD4194~_x(lnUa>24Mz}KZ>Y+jb%F`f-FhrdE~apwzZqV1@iey ziAQIG$5x2eunH6CxUNTXD2q;_8DH?2pif3Ip?7BXV9TF&Fz56uw)jstq&9Dvs!!$il6w+ zecLr~ku;s0*nji;ROOHM>_rK1|KomOc3qSD7NgfEFxXV#{2c95JB&cic+?a=f0DQCgbd+mpF@8^#jN4bXn3Zu$>|0j0$el?t4_7l``1 zv`|odDOyEAOZ5k*sjGm%)Wcv5$&hNK3l*dC+UofcicWd>qM{w*25x*k4_7Qhm~_>)k+Zsi-n1LsuR0 zT$g02u_GkCY#ksDpXL{Hj(8EOx5wb$^3++QQkb z#5)?G-qr6I6CZU$HQKK>%l-7b1tS}4CXkP9ds%l(kJB;WtH|_?@z9nmYJ+|s;I{um z8Sj7i?q6U1llXdTk3%xs43l+rvreYBq0WZPn6Lk@4gO2vpTt))oh42x)_$*_P6DO; z#G$fnwV5{mMfNZJPvWa1vg|&L*Eu{hn(r)M__l)m%Nyr^ZSY?T|0KRPUMx?g=U12G z=H@$c1~$y;pUtcN7umn?KZ&nHdd%WG=RKvm-aMmn*>f|J$s_##+Tgzw{z-gQ{TOdg z08gPiT=f11F-XBbTr3ULzy2o^|0KS43(3&!AG_#grlx<8CdQ!WR(5y)PZ}WmC-GI? z4t|`UeDI;Cr2XW}`*IX$#L)`iHvg&oPvWaN=}V@pmpH16L`;-%_VDjoJw#DpYMVwrLl6;oQkG3F?5`{HO9iiLcSJmqIq*OY;q8)>i7xEO84EiueAL z2FU(Ne9cjr7eBCIc*#S0uWPeqTZ3)(_x%%qzrTsE$=jp>wumXSTnOjQXkQB7C+kic z^%7l2ko;PV=uU!~lsn|4R8BVVmTrMc`S=I`RPu<=$^*5*SpED3UiZwqGj zkBp&l>yLycco@Q~^c@11^Rn{G_+$59q4VeU#E9R7|Ebc?jf&c7QShV1SczN^48<$m z#9x?XgZ{g~PoEF03k^eQ$C?#7@}6$RN_}#VU&&Wm6^pQ33&=#o{#y+6y5qsvqRmI? z@P0i(yzJ)8OjZ47cm)2o7pTjbLxG#0t%9x}+b1c0oC!zm+%Oz>2ujQFOwY&1OTr$C zw1|roc%rV27!Q>MQ4GAOd*Itn!R1^1oku`8dkKGmefdi)#~wmcN|yZNR!rZj8FjKO zy_y{579VmkNha)h~5ybnzg6B(9mKItuMj|yIB~8Mzr?8;L>WGU2u9T#r-4dWUC&|ePH)NLaSW{|A6W(pS zdR+@|X8j6BH8~MKStOMwOON{rzKsH>X}^Q{fJk+7b#DZQpi_XuoVL3q)}$fPRwlV> zpb=vI{R^4acfx`nkDg-?j23|d?@;kII7E(O9#?lrp0W|FVSMx}3o@^NRpuo^;_iN` zIojy?65J;VT?2id6Y`r$X9D%}Q=GHS<{qjE7nzPrmHml#HGG45(n0k zk+(gpu4F`x@I7(Y@yn!f$446MA_8R|y4CYp?&bwhM%kauxw`q`p%NU&naP;Ft>7Z^ zdoT@Ff?e@HQnS9c;cA8F2puklf_P=K*IJ(0JfsQrJzc9ja*(!fB8i?KJ>hj*55m=Z zD(D_vok8RQd-$Aysx3;Zl(yemfxqV|+gkZc=gpdR^&wu+q{bM(Y)Z(TFuX2cjoI}w z4yiwi&+2>caq{M)#XQvcRMuQ8{8vHuU-r+JWWrpn##cjf*+Cs_Zw<%E-NNDRuCJ_# zhEws5`R{&7uR+$aKH_Vp{ar6Oxqq@9bzumaJyLohXQ?9o+>VMjjGH;p71!cjj&Gqf z{NOQ?4xuM}T^(km6bl8Cm{crc$70Lsp>Cg21T3@Y+&L_q+t?K}E+Y%EPux>4?y`MN zq}97%GsEdEM&!PS%f($c>Hb(4;i3pw-;?#GXJ~dM0%IrX-m(N#KH4*RB_+%vamW?8 zhDM%FHus&eeQqB$3j5hUD3eWIsm~ZjY^ydzgXh{11%8;6`Dsp-Fvc1UQc)VJjhkI7 zk>VEnk2dUF(YqHgvJ;qYgaaYW36Xf_Kb@mj9qxE7kT3T?t4B^=*%G_q*EE6VH|gRNq{V}M-d(nWFssH?8I^yL zwTM+e1B~%CH9doDBIN)AJSCHFSy2cziK0GRGjtkqU}?n}rH{I;J5pG)kpCAfSSdyh z7EYY9eZ=cf@Aoh*G34(cUcD^@`R7geZk)`s$~AJ?H)%7=KRw!?`B!O*|m9PYP5)pTBGC>jViw)qTv$=Wd<%oPy)u<32e zKm(N<{W^zKad(r}WE+0FQJ-MNU}de7?lvb`!7?Sadau094+wfBH{8* zJ!kMf+$2<~v~$TkewdJIAzsYZ$}F} z#?3~)5d{v^o?7M=-%F2-et~=iW9<1zO+Cf2mJB=+U z>X%)COa3LRcz@$0C8EazxLieuxkU+wW3sf>a#_JXXw`@?dgn8QA4X%%JSExm+kZ&pLVL zeA!ILBJ12kXBp5FW516-xW7}vjff7axDj2q8QR-`_54~~I4^I#cdQAA#VT2(slWJa zP_ctzz@*OTSG_nE%Da6QnX_zRI!p_jbpapo>6a39^;kmm7Uko#)eD1r<5R5!a;b=G zO`t!-%5Qm%kc+1mv$wa8TLkRZ!uHM2Im#RZUTn7&l|xbxU!phR;We^nC74p-)|&F~ zNXavD34WP{eaXC4xA~3;tzWoZWoqQ4?k8ljFResA3!mAV99W6@aL}g|0-r?$|7dYv zXtEBOCAzHB;5sj@XiKrt!l#x@7!ZLG7aYqCf5ByyISCcJ3oGbL(;Ip{|7{~U&ff2v zwwKq!lJieic%AF}Ftuh1n?TZ3=oAtBes{dSiZh>xvxZx5s3`|%c)k@n?y1^@i?${9 zq)KE-GNjJkewtpo`JM~u4=RTYFzYVLO5S!B*#w22vkLOOMXzj zg6~)fENvGoni77Glrv9om)`1est5kGy&)n; z4`D>I$mVc{c9)LNe+qNn{85orRSPtkSclu0Wj>L^O+iHDND7bPrWLdE9N1Xdr!x#+ zHBffuyM|1|?W6yUNNmhr^LkdlAdkqERK0D|b;NT>f^XCoof)EY26JZka#jz{Em!7a zA1V&ZI$tFp{KeY%iU^f-!YkPKib8`M*69$nMJ%+vkP=jJWn@*zpG^;62^js>J8FRP ztwv1BaT1`u%%ep+O|e2$&eKe=&%Qr2z3zsLoIQuw{GjXS*NUlE&;PmQNvVkFcav`( z_Xql7WPaAE`mbxxqh+`7#IMlX6J21=iu4=Sb;A8)5gVcXs9&f;OVAx75@Y4@n`RN+^{3(+NOj!y_pFxYfw$yUlJdqk zvx&7Cp)qDcjM_rg>Wlq#YxI#jD5>Y`yWX6Wy`yb;5;v!y2M2JUu1Dpirz1>*WJSI* zIQpvgmw*CitkcRUC~%)=dY0xJm}=r9R70RZ556I)Ue&Xl%y13TWw=AWmH{Qk zyuG&&2(AvzqY*`EcVBK(?ygQ50(#2XNDJRQxzWe2WqfZ>cX(c*4KsoQ=ZHkOQjo4c z(ut|Ivl`P{zT=l8@BsDrsFsd;Dwj*>@@kj1r)%M`^|~HFUVDSg>&m1X!;-{#=nq}y zv_S;l59HhvYAK;Yx6#xUWG_2`B_<`uCx3ukX+i&>4K5>N)WvO{%gFsP{K6d+nBKbf zc}2v~10xyGmtAPPr~6AdSQMx?GhaG?*pq1CzZ=CBlJfZ?$fynqY^=HDLz~VhGo=pj zxT+l%UXe)zq2FA3&sNk-1NpG+zEY|w z2huW5_0nQ_piVNMfh&*?a!QvxMqKEie?fvjwlOWfwpj-~IFuJ5W&CD{%k|#)dHUrS z`o}&!iXfoATvS<;o%2u5d$Zxb{Ac&L=JP&SAouAWQn$e2<7QreX-#M5Hu~hl2MTPm z^?V-JaI*U3o+$2J9zi2O+STz3sQ(egaMjHl9xRB2?X!M}8-VWMYX{`?&^cdTo8^&v z`f6{>5_y04A|5vjsGV z6*)J_TyN6dSI~k(Q6jJP>OZr*#RAsvB$@m`bJq4)dyu!E%pr$4ZbS*XzAKH&Caz`G z2dPw2wydLso=Eh2K!MR@t=OC&=v*2%^-s}72=n|Y z)3bRSm<;4F;uD+C=PfAm|S zGNT=DDq9A=c#(jvFIJ-{#^Dh?CSa41){<)-W|JQ@ z@)EE__oYA&zVi5uqs1>xaBhB|@@I}Vo_}mV10~*RR-Ajm+|>_PH-@g)PM%s*Op<}F zkJor)n~90=dod6AJGMzCD}A5ZrhxU&^L*dsLckC~?j!Vgr3?{kVfOPQkTVx6@SPi~ z^g7BBx``Vl&X3ETeF5?kXNfcyA;n4kx{BzapJ~SE{vL)v9&8(Z9H-jZ(>x%E5xS$y zADFAH1mrD}8j7i9QWzXTD%{jH*2%}gD?kik9gC#dsRNxkPK7j#f^Qz7vN$LQ+@PBF zJfeD^d)O7zJhWS4*==qiTMNj)9{QESWvfx}hZi2q-=Pvy;BA5K@2_~t)>&dgG!3px z%HF+FUBWxbp!;Jzd(yjMFg@QiOP{}Kv`{@RzY_FdZ7da=uoyvrs3Yo+jeG~U`!T{= z7_feZ{0U5{5hyk7$&H^b?Ity*{CJ@KH_mUo_NUvLe86-XX|;{L&iuZ&Cxa$NJ2X1P_H<%;|Bk^>oGQz%-CT76{k{mXsEEq;7d*tg+tG_v zMkj{tSh_&_c6&;@KEZiQo?({q&iw)BVgNPGRBg8pvk=H}L?Y5*do++@@h{#HT6u=& z=GxM!`KJNOPfvY35oFp z>2;^kN?N@< z>HRW;VDO9>D^#anT+;v3#R1$Ppwxsmc%C?N=QK$YE@c0^dsa6RkcX;xXQJ}0vUAqz z=AnE#zqXxWm;>_E`KTOgsu!$Zs*1bMucSD;x~v+290o3PaT+@mfx_;560aH`XDNVH z7|7G8$b|+taa!n-^~NV==5jZhmkfYhCEUZSm3gdV`H$+uPb)dotPklzKu(4!!j{eT zX>dhu%xy1I_nUml6X^cD=#sS{1D>KgKrp+P~Pu ze3#o+qCVNlC`&IWF~f7SqFaa@8pW0)+hRaw9g!3~?+qd# zGi0fFgX7o$@GT3FciV}_W1|L4Tkyl`=_DC4RGpZE{Bxz)YuSp5z}%)`es9*D%UiKjA&`He ztGGA$-rwBcgJ6c3t5wxnXt|gSXdlx{q1e%#>9vsio5*Uagf2dz94jE#xj%k_Fn(#* zZ}5N&b3F>;6rG?2@^vVDz0kp=wy=YI?ohW)W?0j&p!UNtNyx5NL5|RXchKIJ0;8ee?K!_wZzZHU)4p0e_iYX6ed9VrOZyYi@= zi1ww=mE12&>wYMn>Po|O0rDHwi?mBAGR;X{*zc^CWBJzo9-%;fn;MGeyh|BNuMy7q zbJ*;zX(Sxfeik1(?s_C5l531QzWtD!l@pnBlMU2MHfRJRN2bKf@K>-u`W#>69>gsG zc^i!dQR!ByTYmSesiSHaD7h7XP~uo~Gc>MUmEhmW@z?7-d?KM$l_}~#{rXn@Py4m_ z@8dA41|QfJ*J9lxNP&Ecg0EXZu&ZXl{ZP;BtvZBWsa61xH>~nG%v^jDSQsMQu&ql* zvGrd{0dh*64O!ItD>kCT!=$(wyWw{WE1>bm(;nrrRuriYLaF$bx_o=?`wD{`pnh0q z2V-!&-{rjR^YpOxk0A~1Uo=3D5Rk}Hru|^M@AaqDiU!f`H&hd-|Bb@^tg}XvnTLBI zfaj}?L6#zo0Pp=ARIvc52cd5z7zNA)c5aRPJhTBUEm}2g3?1A;q$rmY+#kvxx`tU0L zcyO8y#A~XbP+|{KIbVNomTt&C%I+4DG1Ax;9Wp3B z;%WMw!*GZMF=!-IFX~aG@RUWH9B6-$lYqV+rxE*%V$Nbev1b1rWhUtUz!sJ)PJs*N zApDJTSB~|pSD>L+4%Ab!lo`Z5aAir2ofgk|cBE5EARhxc&(+s=I*c=JWjw=Ckud>e26E(5eo;DFr2B*p^wJ{6J?Q*SJC61wW0~;=kj=EX81!+fiZ>Jk?T@1O z+%0JAb{1q)u)}HQmLvMWzySHnx|wqH2(lg4cee^%;lxXg0zr^}8OTV}eaFIU2 zoA28>Q;?JhYJa*05jIi0(pn+)`|;o3vv{${n1If&^#`k9_vur35k}$HVkVQFx;Unj z!20)*uP?Wv50%rb#VigDi1k&szpDWG#9|(yl&Syi*rI$VF-97M%`;OgY*=YC{ z_~iL;Cv@fuRFCH>pyT5fmZkB0X;Nmd!I5Y#;fKL@6(a-MKUpQ}dqZ3_9`J>?5~4U% z(`#@7H2$()?0nH^9vfn&s!dpSb9pUiJb`Nt8OI9qMW1Ujdv(wb{qhyY+$8$o;WNd_eBL zj_jzIR@D-ZTW8r7Jh9XLv3UTd<4002OJjtxWD%o%%0{P6csNP#E}o^Vl5ifje_;TPkN)}$)|-D{`a~%^iNGSN z&X3>m8PqFy5(ODk_`7gR{ep{+^YlmN0^m`1V4J(vwSVhwGP(4boq0$g4%zt zcN}6SBW=63u{bMeib?+WrIMAv<7)`i?4uP33!;24y-%_f+p;lrYyjkx`7`2YTiP<; zduv;)o1|+R|GWqJUkDwE`80An7IYu7{J!e9RCq>j9Rl^a6%Cqkib-$fS8*8QM|{`p z7^z|V^KgAf2~J&Wp!@4P>9+W}4xD9<$dV+& zs<3xnrh+T5eyoa_c6mIuV{FLf4vh7oqoSm8kpCKgw;#M;YO1>18g^AaY#4SDd*%q# zA4@L^ES>Q^$?Tp6%0j4^x=kup0Xc%PgoI9@1gi;S^*9YCV@hfc-WMRJn&KGIXW6AO z`SrX`*T4dCfA0$N-v`camO?E0#O>G+C$srVYov{8K;sL=vghCsqq2KayAK2n&@!)7 zeBMw4?LW?Hsh>?yTNvZSq3j#s3OI(Rw*fiwa!hNx(c6>NPxUvhR4*^lXRgUW-qHAD zb52#1i_l*Y!KW;B3-OWY9LO7?@YN(&?~8h>tPl{Y@mBJNk3j8{kc?Q+PsWI=WYt(M8PRh75ST! z0r>m9aI~>0{BeV_%Bo@gdl%`&N)o95$*p{6pl5N(*Hh74q4c4VO*fe-79fY%NG7ma zg(-NqiRO|VtX!qErmYS-fApLTi_{|_Sjc`1S3d~6L|0neKIjinJ2C6-XF0=ckSDO{pqr@8Iu&EaJ8X48Vp;w6YXw$S`RiEV%X zlxcvHU|K}}aD@Zp+p@=7EJEvw^*veEbp#Bpo0{NNY-dQGroDGh(^$^LWBvGhU>op< zK;tW^Ghb>&;nx|(9=O>K+A2Niq^kr#`*ajO4j*+W>C(d#Riuh01EgX?)PY=QNSz~0 z*cS>LD-=&cpzK!Q<`(4twoa;Bn_Wxm%)mstZ|d+AcU~lbp1*dL{2{cFgTW)@N8u`$ zg!s(AF0}!)pU;vWZD$uag{>_iJ#bG->UHxB__wDIBl(EVu91gt@GF6j4p<-ue!UPX#o z*Vbzz*!|O4m!U)qccr3;dBQVn0*xZ2;bo2 z9ne7WHBD{{mB(AgnJ(@z2%L~QB(mrOP<+K1q31Y-GJ`DqUX|fT_IV}KkRm#8|7k>J zHDIvb<+^=qLC;@$;@&k(vj%cm_C-T$>orQBVp!NTF{8^s(x-1g9#E?m+bcrK`i0=< z2*vFa%X?SqZXg%9+&617wt8Q6*@K+>=MAdqW!g`?uggT9Ku$@QG*0L- zLgRgqcJ13-zcrLj!4BkGjE!+kxpM9(mix4=gIkYsp7jWctp#JCUkUH&$GkV+p_jkt{XCow+O1D6cywA8r9x=ow=Mevz<&IoP{GBbR|8vGU zX{RdXu|?8a{^q0KAXT~7+gMSp^V2L}+V z{QRvcj|nzG?H}JQ+wlyft6GrvbB_!O6r+35EU5iS^Yff;F#kk__M%AL{$76vR$1K+ zXdhL=?nPQQGL252IeTYpShn}-1k}C=o}#K*!yc^=*N2FIO-v(%5gh0R>hbI5B~K;r z*tBa7JtkxzjwEIFLGjZDhjQg@%|m|d-$U`)j&$kR6HK7WG)wi|MIwJ`TfoD?#<+`yHf zy`O<6E8a7`Z;BpQ^S=1jV*Un=)&Jf8|5|apgajv7@*8P;i%MSWRQZoolNQ(UHJ)_# z>@7UhPXzI>)rOjlhnld588;8h23Dy})g;n^ZxVg|@tW512a-T-*}vQWXJQ7FJOWIC zu+%_Iv>-8I+A3CGS0~S_5^5GJ4a8N6xvL;(Uj{8ySaK|(X@B#S<|Wx>@ohInLosjY zqbD1^pJ$-9@ZatKGjRg~)u!yka!S@oPkjHPgT3QD0*~7n!fVgUKsP7!v_@Wtipg$< ze8j*X>lFIgtx$vI22lld+B+_=w#*lpoN}1|ZWH+Vf30ZFZ;1sPrWG_OY1CdyR+3qW z9N*5Y+>kIUm!qkC@r51Eu_sKEgv@6%&#XGno~SNj@HJY_Sv!I-LFrjOb*v81-oKas znfL+wvxkz1fe1-Y1*Kq+{G7f^L2w1ePjXt)LmlGCam^NX@GOa3R$x=3A=l3t2Hsv+ z7k=C-fE6Z*OH*42Q#beDZ2~|4Gcg1?{rg?#xsf{I-7HP{{J&^1l}vJP3Uzwfk)G< zS_;Mtk!Vh;;6?`KNGoTZ=u*Jk8I&saT^0d$Siu-#ZX9P~hBy?R1L^XZ52{sTU(pwR zP^<~K@BftlnOFi@zRg4V)dAH)@&hUrclVtSe1{w-Ne7=7ZTAMO&95TJ$dm87Q2dFd z`Q5`+{^%DB1z98>?lm?Y_U8^{Ok(f;-6rt!KNC-2EC`=xODR`Nt|UKlyXvm0!`ztP zib@fT_~cim_%ffs7SjwUeq|#!yFW3WpJVG-QcSD(`|b~^WP9uLPu?e>wiIalf0zGG zT&Y(>;iSsGvg-NT_k9^FG^s($8TAy4@@f0d#XLvT*CbbMZMxe!Tw&@D+>As?9)@c11IEb}wX{&`W7s6t^4ynCS(r;H4+GTV)uv+TA(- zNd*6OzVp{FbC$I1Ovtgq1DZGxkF-ghNtd>X6tR;LQmmBiiJCKu& z5?|!q@8@1hq`jI;u6TP`H|_Hs?`tHs86y}gNVH;Wf$7K}gjiVj^22!&B6 zwhd0N4&1HO)V0Eb8hS4I9;%rU_2L#0krm#JwX9_%lT8|HkR{>T_x30hDqEl%x98r%GVOQ`KD-u z%0w4O6znq)M4rc@L`V#G@sIEa&=MywyAV#vr!e8a>$1Ff`w(o1DAIF8f*gWLndx8e z!k)TTA9x}6&Yrhk>QnISA?eQkbWv3EV;03h`5W zWYA0~dv{-kU|`0+1J^U6lm9GqSRQy4_9&`S&DVoOvQeo&8)C6|ZkddG2`iUmGZ(Fi zm?M^_8I8ZJ{HPcDGN< zJflL{n3P7PvwsYpW3>sDWQGnW_xZ)O_Y}ULO(!5Ik>xd4Z}WGFB)#t23v?#s@@RR$#GBtBvqxFafh7!)?u5sepCfsAX8`hEC)Ok8WHAvzRH5O;Ao$5DaOC_bqq(pdq| z_QL`RCpIE=B9=+n>;NegL=0Lqa#U$0S2`YESE6Q&&Diey3>toW^V6)yYL9HlABT)W zU$_c82g&5d@!3WQw-Bm|DwH>JA`|;qZaa}72ZudW_5@XkwD-iZ>#vO%vlZ~RF3f_6 zAibcql%e$D5w;4qdI}K<<6gz5=zfK4K^pAeh(gWrR}p>B<9%`n`zn!O<+pwg^{iV? z&DRabclWTH1U1wtUttCgiqV3wA=s{ek15-Xt$wdrq{Cvu&6qLiVre7Y6qTTB!7aSVbW3g~xkYv=6Nj9M0mo}HIrv9U?ORJ3 z4sO0+M>sRNyGSewFE4o`qKURVwBgK5XB)I#_$?9_i7ak|ZAB%5rY zB+yGhCT6t3V(`3gvRZSH^B{(l7Enws`^b*pw27FSeaj7horpkVdSId zB9<)SOEJ{LByDS&#I)08K5ex*{Yc1z+q#Jrv^9U_7$VuahoXY`p7dFbAvFGRL0nso znlob#N=_`8D}if2tJaJeA@DqXOz?IAZR}~-`GOr%KyRb~iX~5FBYZc~r|hDc=J1}P zXLkMjm$nwqaqLMJadD?Eh_X6#^};>l9in&a?lf=WtDLbOQ^@!hq-CkQX6>WupeqB`_!`G zCI{=6=AQr^5BY|d*X{8^FbSF?PV`BFFn+=zW^ieC)gx5~y#uqe3i^C+X#rgo_SgNC*AliU#*{><-xGogg)8?XQK1xB@6g zm1D%Xn-TWD|=QQ8d0vG2E8$JKzD4w;(o3PK%V+?zZCT~!o z+1YajEIWw&QiQ4y5ZAJK0=`c2KZKCuedVSZE^97?mN%mwtDSgqC_htH(*Jc5P6{9J z)et+Y#P0K~%-Y&F7@^x;oOK=*Tub%91k6g^s-`dpvgerdOJ2>A_kUma1vzXaoKNb% zefKRCiLvGv_Ux63psL5YB7^mI>oaD^7S!kamx(+0L#JOtogb%)JhL^^mZUwzH%xM= z6&uGW4&g)2zfsE)b)F^3`g|~+*>mIa2L(RIWYON3t0VX(zEd|-6<~j`5#;)RSbM9m zxSFU<8yaXJ0RjYfC%C%?g1bv_cY?c1(BSS6oM6F&y9M{)?(PKm^Ucxj_nK?|IhY(( zJ9O4Wnt3my2w4QfLIS_Nf|zrZzbzu58@X1>NnMqf2-djj*C zw+Eu+KpyvX{w2JIgP&3Jf|@tW4#hp_!Uf1xM9=?NoCy>@G^F^aQE7-IAt1v8xyK6s zLkN%bS^48Q#(N5RQf1z;UqG%$^;vVyFd3AE$D~gD8EL8O(|ao*r<$VQcedUr>l8#s zSTOhCvnlg{269rTdE!tQDz9bT^$I(x{($!;cnv^qGv_Sta=--ZZ{H{$oGYr@(QgA0 z_#Dq_DYFM}1Su73OA4#F&n+!YL2%1Q7w?IC$yl^+s(FoS6TiG|Z$^Ojtx@G}droZ5 zX!z&Cq@tTK2J(Ggft(oK=6+Su%22)wmO;u0Bb%%wYzN4PLmA(&cvv3yh@z4g+8K%U z$cIk>d06B2;x_9C&C6vYM>A*`&)iMQ6Ck(RVa0!)MB!-#tb$KlSj`_=C>qAFb9P7F0W*bBvmLnNU7&WJuQGQB0IT%k$6RzbjS-DZXf z$ZK@QspnD3l2DS(_T(aDuYRf>kpX$P*()Lq4lh`ByR|CHBwgxPhn*piuQ)+fT9}BY z-idx`*O(3>T__Um19HYT7gO3w_|f-+e@%$6`OVwbm>~l1cr`&bMfS&$eHA*zVHWQ! zZI!=q0rgC7i#3G8L!r$rbNvK2ngMUm2p}AfNg0}}-}1ncgmTBMUG(Hcp>#PPfcno- z5(NwP5)_yN@7km#T%+GNk#GY!Z`X=WjLU4}a~?wt{*}I{lAVDFkgtFGA?PD}w81PP z>tl7DZY317atY*uJ7F9F=`JJ|nf9&`A4(n>r)&v;+;FR-da@mkAJaFjGI;WK?BTT@ z!g0$vgiLqDvyh5H9>B!uhRmOK`}P*7cTbsh*uPt@+BZgu%$Mv+Y2RlK+=L_&Qqq-)tl7pM_uk{ob4p$d9q&-F7>_RCKj4IkkRT z1mP9BP6N3LoWIB1U+M%*w&nvw9&f?|=j9F9)k=UH*vVkru-C{la1)yTF}?`2O?!*w&x^*big7 z5T3{!vtVbnFew1_aai(x99iGggxuH@SSY{#_yMg9(SIxxl78w>x$ty*e=y9*3OMBDx=qlSWND3w5FF6F+lFJ?`-wTIwvJ+igT3hmV!|WY99mgO1jGzTH=#< z&#ct1bcKr3TIDnlc*@+y6Q)WFHj5cD$Vh&{eFK)Sek1rN%g0WHeuDg0{hpj0 zxHA-D{qxEC(^|k`JwjXiD1$_dKF`^2Q)HlCm#4o#vmTA{I~b%IB@mtpuQikjhc_1a0^93JC)iNb zl`@0H#}~L}bi`7@-cLaN%gpq&Ho<3b?iuGf36dx3Lm!hckZ+gWd`UAm?z=?o<=Qhi z3a{G8fq4FuXT6e2pN2NyIVE%jZ!9HLoS9kx^{o@tSZ^L!8rUx^;y53%ZZ-(*{eT=l zIpEK8U>4Luj-`Xv-YB!(Z5719Bxx>D?SdsW*^Ds3jM>TF4@v8Vx=rrQXfc-F`I#<5dadE=QKf`21K5h-`~44d1M8q|+5ofSf3_!hNkYw{r3i~gMLiPe4r8w}JN z?wTmHu4KpVvT}s}l<8SLH=w};ay>OVnDBM9E9Gd+mF7?GZdE3H5bfW`Juh)WVxuR0 z;j=iyR4;?pu?-PWkN+;t_cuDbeNd@mZamE}MUqJj~o_SCMIZKx%wE zy~GD{m8(LdDUUF^I3vb5%mM~esvq|CKpt$~IFTQCJbDq4kpFR)I4%gYLjlO+415;+ zo@V~k)bUoT)}g!;;sr?nxxFJ<@LcC|oZqh@2ThJqX*u;+i1~@EK*m4FaP+g^%&M5% z4#@eSJUR!cm!p-zoq7#x{HWd2+{k-rFSw}x2grG5qa&ITPrcK5W-8>w57E@_>aT%3 zZA}8Ye|>MwV-O1}29Y6rX7;fL$oD|OP{JS6pUr1+4P@B4f{%Ms5P%#OZ%kN|yV0k; zb|3#%J8WylS7Zyw{RU5=#PhHNhCja41hR~us1xV!0J;2G8IIZv_QaI!upYrOCjNtJ z9y^fh{1N0~gx75*`gm!9;QBgh$rWA)$V>fP5oWCyCW~^=C4tF401M z=Rzt+XD5))uLmjHfMA)$H(;qmY&!X zk!(4AxM6epj#b)$sqX=?e#ph%_eCQ*<>B^+*YLc!*OFU4;{)p93WXd<;T(+@2yvWmsgY9e`Uw{&bk z``eg;rWg+9Jwl(~wmnQ0JCB^bLiGPJu7;175|0Uic+c>QR2^b`?-=7$2JcHOs7%P);2{CE@jk(T<60f#qvX zL1X%y3M@FP)Y0CUD@;Z@-n~zIICu2TcR9zENP$l@JRLXb*gH=|{$9d|NtN zK?&4XNA9t`DhjOxm@gBA7mp*1AR3CHI3uF%%Pd zSw&e*K^J0w_@Y6;|FL`??k^5d?`&J$6jIvRZSDQ=JTu#5=I%hdann9}r1jBHVV)}iIZtidieJXm3wmN~aL*)|pS}A77|8uQuQ4sahC;Jf?8Z!I=mkm#(5H8Mbi3)!nU>=I$ z$aDr>J01SSJ`f|RdB~@__yQHkY4^nlyHZk9m6}33B@*{*@YXR|ft)(MtYcYF-{=B? z<1*o4hhy&p`v)M`+xt=Loib+b^v3{}tqm-ILrg*q6&FRRmnJ|O3#6nHJ) zHHGtSz(~f;+jzIov zmOJiunvYLs$AZ99d)EFTA0`i_ao74-75Bph(N><&BLAI{! zflEM6bhW@3-(OqUCzgrjuk(2)k|xFl$iIJqk;F8gvXcr9lWZ17qo5yaHUM$~TYguf zQhFYdo^YgUK5btu_op5puT+u7KCpto^eq6Cn6i1>1;7-u<@2gAm9S z+C%D*nTp{CQHH)LG;$l5c>rG?G!1dDUiRBuktwe{qk_Xl{mBzts1#t zEq@H;^?r6)1e(3n!S*7oA^~Gx_7g53#y{81D4Lbmpf4Sc=#vMyTzKFz$Q!ZY`RgaP zG#p~J{1~kNc`BAcb@UMP_t%1}!u{{|=S_d=UrSY_wyG>urhw)Dc;U^_ZE041z*lmn z8Ba`v{fHC;o z8AqB1QT{EFf`hSD)W;@AW*r*&L+lk#3wYS~454ULxNw?(L!zu|r2b z_aXeIct3bQ5;K59w8F2Oub1QRy_K$<{pYDf8)7c|ul+Y+JRD%_e5=5CJ42ShI@0h{ zb{+d^?IhL6oW5k=@X;&ETQoyZMnRYl2SVMVRBwK!+$cF47B;0X55ug;H8QpOl$`Z zIAnL0prk(hAD<6WT3HX2MJM78V!LxV-vBFAZr5}%R6k`b_Qi07S{UtmrDX@)#PTOP z6y8ky=ao#Q$_dtewLhN*^Lk-sYeqiM^mI!8x6Z&k(sgS)G*N~!GwG8VZ^owo`U2-W zbyZgAPt&A_KG-kbp-^VNP!t1Z*v&PpE;VxK-&Tas_#a=^PAKYn5khSX!W63^`U24Q z|D=&;Sn=R5qgU=RP^E+T7-OwPr#znD{6O3HOe2YzA1q_pqnw3zxXm<~66I+>6#M2SNBgo*m4}xWWjw!~SC)8%$ zbJ(F8?P>V{{=xwrefO(o{g{bH#$u!c`CeKE^+N}xs1o@Xvr7C#puPX5QG(U-z|TUZ zN}O537BK~+vW^4uy97=9g0KE=w+l;OGa}l4!9=d71#!t-H+>4Lli0DtUeAu3)Fdeq zA}3(=z0No``mar39;Md*5@gr(XY1y<8~@kZgkxc`t#<_`VKZ4<_QCV0$*B0g6<9D| z!UJ^qWyG4(`d9`Zdk%%o#?4#zw7BWCP?b^*Vl51`{Xgkr3$fk%YmGzU1aoeo5ZstYH~g5bZkbsoPTNMcZSXX1#b+o zCk?&kIGVZ);PBG$IGW$W;^o;J=@7}|55iM3q<^G4lkgO-b6`C#nu(K3fr-<;Jo%47Y-z%#y~?ma#W41L2I z!3n4x$yKdwvcxa3Ma}VNi46^>!^35qSA6am+`2>;j@f>Rr#3 z(h+u2m~(m>aG(n;{%4(dGQa8x<`_|jk|AxZC7ocbL1Nh@jmg{LseqoQVOMXrqyyilql-Ul$63uQ zAk963=%$}S_Ay@b_+wQ0l5mtP+#W%<6w*-keT}TQpbCVyVOjXXrZ^4ejFka|ZratD zU`#wWxe2U$$KE2K2VYrCI3l|FQ0QlpMdN}eJ83e;1p;Hl3kFW*LZ z2y%`elqDw8sXYm9XmAb(zvbF3m~4Fb1*L#MXP-(q)RMGpzRi2^XE*4X`v8j(+rKbE zl&RW)8`LWOw%ST7&2o45C>5#GW|U`97w zs|fR_us1t!AvL~hZ(q~Z?BO@Op6KCwGBmQ^3b$OumD_5f9<#Wnp7VA)w}8HjTZ0<| z($JaS&I$kK%nHmbes{uWixJ~ev?0qdNkxAVTW($KN`*(OTRB*nsPP%dv-S@CtQ$Y%wOguR^ zsKY5@4741m8WJdWVOoPdC0h(?65P=KnqUT8BKiW(ut!s^+>fgShYp)bp{fh0WazZ{cd+R87d4^yZ@e|(2NfKtrP zu0RN!K=xa=JpJw3sfL(vr?u)E1QpKoo-r0Wq?wJ}y=|BNauIevDo5fc^3>{XQ+|_~ zL=L5oUeFNe=kW@2Y5tVNl}g6iIj@eZHe=Yu&*JYo+EK!q!$d5w&KsZM);w=_%7;{nFgR z%O_+Lg=6<^X0aC`m;77u$M2F-GrQ~0jNM7gzbmmdjoal^5Z%EJYB{?Nl^XsTL4=HAHZE`NjAU&odbphV~HJ4%HdcZ2VpvcO)O z?>#X=Av_8}U+;5D#9mL*>`#l|{0d2{8?7vO&Vi1K$nd)8nTOfU^U4I9Ug8&s7_+oZNv*K&i6X)vn{k*ozRo}0IRrsjsnv9yocd9zm)aWjz%XVSTvR&l9!Q*(K( zDwW>lW$Fr3sX{u&XrLZ9-eq5znHs;P&WK;vO+0WJH)SI`+6z#`S$Alwb z{(NLO)7wZtxN~Xpx*)8k1gB@)>_c)v_XjXeTx%6boCI_IO-FHn+VOdm?9R5LSZdcR zPx3|Ft>;oqq_HFjZ|j#LvioHIrtH^`?=O53u4-$mdyp2E~-s71E<-%&(bKW1wc53V3kTo_t7r=K?9!B|{v^_IN zG7)%!UaZC5)MS~d^k%6Hv$e}x@>srRWGKfgzp;=SpSMYYFSdL+9Q6ec2{uC8cD(s2 z?mw?9f_*0h&D$+I6I_c1_Jpg_hTm?;q`u}H$MedVs;oM2Fkorw$edSm+0zdL6LwR= zo8KH?!K8}FGNH!2hx(Rp)Y~nGEXf=l_FOv(L#@x=Ihg($g`k;STssaO7@jhWP||&5 zYUj$v6nFmzdZY5f@SZcC-dZl&nd^pOpanK_dH2cnj|_*Q|G}_7)XqLSsE$tMd(6Gn zrotnFg}_x*s6d$w%qEC*#=XA>+V#Fp&T$SZXf}11u-Jotp6}4!!%$*|$p$H=X7GX$ z+WvR1aztS5ufxAm1G%%J>y3Mp`zpJSIE_NJm*<{gaP0(sO(Z={y?nEbHV3Y^b;o$B z<}cQVIvo$hepjf>@S;-P#^SN+%h~pd?Sb5!o4gjf+?_9rMYDeT^eh_Da346Q!*dUo!M*4-fXzWvL9g_e`0R ze=0{((JJzeQ!3|@B;rk;v~|?wZU$!rVW5qnq__ zIugvMGsZj9==nHu`!Qemq~Ns&oq>`JwES)}-M%7`eyl zEDn9}f$qIpdHP4JD^EFLHs(=?z)Q~%Q@QJOd7%nhQugc|iz{qoFT@&FD_(pN&-w#x ziP>RsN8e*LTcHhvW5czGp-`fZL^E7e?*Q(v@fSigafrZZwIN)^OwLbTPJc6&PIK9rFqiIMA6z)tK1&Oq)Mw z&#&U(=EaGsy8yJmWvN1}n>X<<@@CRs^7FnvA&vDrkjLFQoO(0B!>8)6nK#u}x|?We zLIl2tPy5tct^F;1P3-G7-F*~c<*1o}`mgR~^R`y9MDOq0rvml8Vs7ntd4Rmi2l1U% zK-hEq>{kc3Z(G*ajH#c1{OcP8HFSZ<(SHXFpWxoMb}o=&LIeh#(Re8AA7D@2?IbH^ z{Cl&t@mC?79W)@S(JycKxhNa(9R?M|XapIp3xW3aPe_o9Y>$-;Hu2WL7S{V`WvIA7 zj*DqMH3*A-c!=gv4a!N{`;)B(5%_LIB0`O)i$n;8dwxDS?@x<=hdd3`kL;wMNnouQ z>0M*(6+|sD*k$Q{2Ooi4mnO#`OOosg7Jlhreq+9|IhTqeZF0u?a( zP8=QU=4I%!QJB-80y)-p&w^nbNA2$SD)!d9UmMp+gB`L zk~Lu*kROu>IJ*U$vSGPgk}q?r@AJhMPXf6>&5ErGUhindh=Vo1ic%M)UEn5=(>F;G z&kSu4(Mn?^=D;gx-}21D1G%@^PQg=EN%(ZbBs2xZd|GH_%>j_B;TGs%RhU|=xm7x? zDQ>f^ZR*bfxg*7gB~DZlMhV1dx3=CCvy4lFuR#99IBTyGJ9a>K;_#Ba(?~(?c)|_j zyS*c_&53BgOx@r9p3pRfDKO-R2&@#KnpOm|o+S>dicU&hptiea&_NuW;|AlRe+k&3 zHRa@VfO6r5)(9P{ z8pzv(=>MpLpPM{yIOAwX1{FPJEK+HLJt5lnhLIvE+7Tm#`K0V27kW1TKr1f9!Kzen zK~QC3s!#E2Z2g&-a$98+y)Ur*!!TonM)i!DAhT^7oW9kHdk^FcBawJ0N>WG;Z$j7>Dz)lYqUOkf++2r#%E*}5#}MV>N0o5y9a+)^ z0g#iA#Uko2e`g8#;6KYFf>{AB7=Up6SMR&wKjStj+6q4omUoNm_q#N80rlZBJM6N) ze>~3<8kXpjgGF&}wTywhf`ixzU(;pVZ{gUT4Ql53&IzJ_4yQR$+47O63@1pC}9!L`XpdKD*(&$0?FN)k}Zgpd!Wz(R?HE2=V+0 z#YPh!#HeuUVHg}1M2Uj-rwt(jTVsgQ4%HaJ|qb!ddFFSqsN5lH%s z7M*AV`9U9a#bqBMX`$y;gl3=!o>bgBh+sqBgFBjw>YSTaqc}+I6W&pm6_8Uew-Ih|?~}?anQ^Q&6xC#3JVQ8JRYIsUS_d&0;lNH& ztb?g7{@GUvKt0Jt*0&7Gk@4CDI2(~uuAWZ{!w}=E03{mKxa!@~A^uSj zsIN+ww^*`nlU-Y}FgmG9ltvI6_zmQanIJz{_TjhWteeRR^V-g|v220A)APy!f-p-_)Nt`IXX<}I!5v{!cKH3Hiv|rhe?)jdef2A>wG(Io$ zMBY9#31WQV`uqzd*ntdFZ}G><)KFhf=w$`s_hEk)Fm1J`3+q~t>4~&%Yrc+3fk> zL$^v$SP6Qtn=wTFO@;>Sf(Sd^G`HD2sbyoChC^l8fcE>MeLY}`f2`K+;OgYUM5JHS z9A5(YHwI#F3JndcTk)l4)pCQy_gFveHP0%!N?kY;wtp z4uS3sdU7yOpL_0Oeu+Q?TIH|sPiM8m-uqf32jrY2A*NJ+62+CfuvskvR)0bTl|l3m z&3FbFmin(o?i(Lu9x3k@pw+rkf%+oSj|J%Pq&m0q6`Y?fOFXnLBq4%5vwpdMk7oIP z+izspjKBWn>3P;E1?pe!Z1$(NzmTd|-{o^Ia(DIS|Z1lw^uD(6T z732ATfIQEiZ+}-GBu!hxI%8%$u+E`$1hKxK{gF&%=HD#nIhKe8wd#tugNru+)YHoa z-Nl0OC5e3cSpv$O-oFSGlmNLe7Ea?Xi27sR{8CN^HNSGFrUAtGHyVcry}a*DpnQH= zQQ6jUL!NK_0Mxf!IB$20d)+Voc*7Kwx=HTgin-Cd3l*sbLvYDfTt9-|bQlj+310kV`2YneG>G|44!W26?*uWo4jGKsqgTmC zTp1UL@ykWKK(&~@Zy$&2;a!bY2m`Mo^0}eP0oLUM zHN}4~b1|cMMx2UOf~dclP<`gFFq8!&^(G06as-A>-9(7>T`l#;+12;gBa2UA%moi) z^YFOGAT)UsDY_l%EL{as_TaoPbp=L@iif?& zsw#_cZSHfo?R&oDwIj4k%@7Yu z=4y^yp#J?j=FcqmsO<6ZwSr=uN*R*P5?&x*Zv>4s)~ay|!z0&C!$qvb)$BsF@8bs4 z1?3Xox zcfW^jnZA{|4iIctPpARvqhifRN64GRI?BC6$oQ`caOEbPfc$2bOb#}rn~hQVn}5~S zYt9qWYZQ>5Ta?u*?$vTmyT&PPyyVPz%6V1;d4B%Fbj~W}cbJS@>T_lN;8>IU3LyW~ zdp1IL+4`dJmf==u%~!gN5w10ccmMXC@ z3a4J!euIv!y`4v=Rv(~!oUI6V9uw(LVOzVI2mu8&;+QwEK%UIq2~uS@2=MMacGKTT zG%rB$qX6==*m-$s{36@W4bndgG>-D?2WxbIJe8G|C@nT+?W5 zo&ot6Z;p`jNd!zKzMF4!>E>2>PSX(M4;vb#JPJJan?{@Vj%O+^z3~4WY-IgyU;FQ> zN$O-ZY`u~o9|_8qSqT5<2|7(;(*&G#q-Y(pwAyHXtT!#h{L||33U%E>&x7cll@-(= zUVJr+48g61f-|@xlWE=A1P)mi=*$h&bgbcIJv^LLLCXXyr6mBrQtn}9sWk75koP6gIRTqzxTh0*cZB2B?=Vp)y@#x?R3tS!rB?-FzGkwubn_8~>AU zDN;hlJ=o&Ab4C1s}NgW>S9-5%!VnU2@J8PWFXLfAaepD3V*TlCOhU6 zlj|f`lI`vUkXM8%YGLgMMCBpI z7o2*hXNfEFbSLhbfu9kbi8Fpf5dI;TIZZ(@wpg8tcKm?7UwX->y?uIM`N|f)Kjc3+ z*;{Q;eKUQ^mgRuyhVbu@_E)19Ry*)Nt~N62(iSzzVm?CDzp7YsXp*7$NRH$rG8g|H zhK!mjBhbFSo#_{axK1~Uky+Q)EOe?gkmn+hr@a*M5=e^;OuD-7F<^6D z1spL(P<13Y)2*xFk|!A!=bwQ3?gZs0zvL2X6eQhca4-=4ANbk-O&Ve@{V#{cYcB%(cGSKo zU7y1ohntv=eNdo2!(nSd`*m1xNifx!myaex2z)tN(m(x2%n@o~`O@8% zeNAAo%OAOMf+z>1lL-wMI@}I!!FyO2I#-8=Di{e;ixDCY2vb>d zL$!)2w9_~zfqfig!C}pc1i2ZhI=l6j;Tj)*TJ{y%Bx zFXcgS?|K8MB=&ai#(YJ8bbTs`xZYU$iD|7SZQ=0+J32=JTPndoDh z`(9ekq@RC>a<_)!U^>nb zB3WjYKKW|0cjo0thn$nRcx0(As0Q<>ogkxmkiheFjf^;;=tLo^fVRQvqdW5Bj(2+z z9b&8i+Wwz3baqM#^eY7s7l*6M#Gv*)rmNbgUrJI_!REWtN}Hm# z!9)zr841;GLR;0;DCYSvUo*Ck(#v~Y{%aGMhi(ZK1v^T=8k6L|3MT$Wg8KWh)ogM- z1j_|hn%o{wQgeif4`hrXxkh|mVs%D+Fw$1}?kG7-db%xnV9Ndab6n-{JkZ{M)6lH} z(r~R`*sHw>Na+W$;~haMW%FS}=&5r;tXDkGgzqkztl=bUCt1PL&I9}cT;+#$k=F5x z8jUSWaa}}%M`NERQU7Zbn1`N-sDmext3O(mSu4q!j(z{D)}8Mho811T1vYn>0zCj* zS_@j+4>9g6wfY-fvOC&F_HQy&X7&Dyf`u|QLT6;<&~2c-|E8hm6p5fa0^#5+z%~wg z4A*}V#Mc-~-(ZHG8E4*QiGNZXS&4^Zsnfa~+~Xtp5@=o3Zv1E8ILW)yu){);3diOW zG??>Wo4`EuU2hb0-6@AH>do5h0dMQI#Uc?|Rc=-E)wlWg`N6(X?(b}&LUL!~b+M%u zgHdFH)2~8_#9;VnFy!s2#Ly5t# z|91yBDgGb4@s|V&dfjoWCEd+wqmb_f=W^>VcJ$s#S(r2x8NKJ|gMzM?}Zoj}zV~6ZJN8Cyaj;=EWOdI0VFTSBUsw0-kCl9jC4k`Bb2%kw+?`!KpkH z-UeuD7lKvIB}KSk!WaTlv^qZi$aVWV)MomxOlt8doxQ;ad=uLtcA`GeM=H*-EhE9g zJl0w#c}-LN0bO7Gf;>7p$7#B3y(_JuFY|li?@f)GHT@RwJ%>l%p9HWfX-&_Vu3MJn zS8%z)`s>}{@3=Fig&XSgVXs2-RRm1B&%6D_zDq`z9fgEMdbAZ; z>fwU91S@EQ2HU<0E+KXoG}ICV>7aV!sbXzYWHJhW2u?IYlkE%=B+;0{JeG}|u+l3+C)npDb3qYWwN{fj;YEW=_o0^X;a zpFdPf8#k@0GhpT<8R+vTg6=NTESVUJB^-SYir=VpEywf* zYl4avHlD&io-!bu+^?m+niqkC@Xp`jwkeLR4}}z83BhD3o(&ozz4@>;8^ht*%1Sr6 zP?Ub~#8T?Pf~c)`jR&84+A^f<@~ul*oCDwQ1HJqCP?J>dR%;E8WkkSlx=Q%fWzeti zA1FLXO!IXMNxbE!R2^2lMXbwh*g6^)@uJ|@ngW=_3|>6XG0UB=%v(XKRqMRW52X)C zN5?0>BECV5xAvRBs}wl&&dhMrM!eCUZyJKh7Gq|5pBJe`6hI6Yrur&l^@*9;v!nRL zOk&b~vC<;5`wiQKX==(|!Pz#nhPXKk&XqI6dDJEE)pma>igdc*B9G6Z6S&NKsBo0@z&-6U z?dWwMs%Mcu?~x)Vsr?ree@pRj!xr}SK z;;Nf5k@2@L0b3!h!Z4k>(8@O?otTF;sB3!$4b5zz`ORnrhBLRgc?78XJ^V}?*k{qA z1x1Wlco?^1^r7v?OEr5z&>axuOu zh|U2sf`u+zcz`3n3hk>vY0OiFyfZSkSc$-hqgi6vvrbIGB>l^;pK6$A2sSdeRDdvO z6#)+0&*XGi>%j*chljfQz6YF=w5Pp80(M*9XTLe8;FVm`QY0=F;r9JQoqTYN6<7*` zHHo;64ML=-r^nV6h?sZwd>kdoa9x1C`x4uzxrg_shA@N^pZk z@3LK(;n)<4cLI?|)f6gk8t(k`xEK75R@hpJ*a{#04A0kf06oO2`E6foHkN6AFz9`$ z2i;UuvufIBwdaJM5?2Ba^}}~3feZTxFIni%dN4Q(7z!zlN(Jlhv$)6`PpwGmI?rA! zN;iLW*v-wue}`kx8D=e$EG0$1?QWvJQ=;-%|F9d@hq9|rfzY>s-VX=IaKyo0`K7b* z1S@-Le7!ms4zJF3j(700Mt76$!Zi#U%D%KqR98xZUOD@Xcy@AgFoxv!$H7QvSgR2M zQh1o}Z9~%77AU)Zj$MRHHW@R&2@U^ZTUksNFNBuoqmTV4} zTW4Y&6>-m6;7A<;mV~+IFC(__(foVwC@^%GplpBOhr)ThHmyeJ9f|xn zKIyB&dJJWP)xeQ@{R!_}A#oJ`YoE|40iNuH{NGt*p$PP_55?SgaJ|8;f7O)KBS#5G{C>8PFr?8#40)ldyNpA^w6&{+*AMjcf zCIq&psH1;_f4^ILZGv?Jri3MsLQGx`X@Otm(R$qS=|tp*^7gx&X+xV!c^c zl34|3V#u`iOWJiG%&bP(IO2xWJx}rC@p~_F1GcieN}GzO9u^X zI6k0!bt(2!I8eX)MZ+(#^Q?@|*Pp)2z1e=xjQ@%jkjHbc{nQ@z;w(I=3cB1<|E$WKdGW@GAZ-|?2eYk<$I&Zb#>2SLC8p`pGS zz8N7ga{WbgvQ|2BU))?^`@YF;XYK4o=T{xN&ZkoYY=51RVJ!T`*{VcOL#i1Mck6_3IfU^!5KycfrizbDe@xb* z%X?pyi=rD1*nZtLAZ3i;lgBQA%9b(|bVQ z0Bld9sZ;odW$8fXz*c^A-*71H?EDgtC!+{gfIX88PMLdE?^NWQK>qL*!p4P-K+$Pa z#a>U9vdV%uPNg19E~iLf`=GHaabK)#a9}2kqGri74OhYh5hi{Q9Ei#csykNm$?Gz@ ziY?@*iMj~F_zAnpMYmTKwu9@ZE=w_DrE|;*1)@{Nq3nvbq z0rsDA_?hCSGDTO$4E8$`FF8@+TxMJ#-yi8ZjvLd*MWWSMZc*nvsPWn|7|36e#!xNf zXB(H#n0Lez*L;z;9DNVS+nvRm+L${Zo@KBUBbQYDd@Egr1jy4iSsRPzaf_yT@UacJ z#wqt{(>w(7@t-=m`f&u91=Z=fn&;JTzUuWvSU61cxGH-iBb=9!Xy|OzXv>-DqY?eU z_PUgtnO7oSiof1E$A60Nx#SbYiFqLZab~3I#s-;=2iBv}2#A`{v){Tj1i6~VsFxWNpfDgh9UUhA|IUu2m8|d;3W)$<}`lZ zhhNShOqROY<7){jVsLcaxrW6m>VckE9OhV?`PAEs|AkZ}(WVgUK5 zI?od9B+|zpdW;jtZq&A>--AEX0zUfkst(t?>e0_{^{3yHE>@Ps-dqLHAXa~_9*2%P z_eW3MG_Q?9V@xFd?>Z7Yv&(P&K&CMkw|Ac`WwYPjtOcQX@V;GAX1?IM5T|)=&eyt z(9e6SQ2SpJh%L6r2J-kF20B)oan(V2Z(;ZXgB%o#%ol;Y^a9>X@%UE;t^`t~lO79{ zHPRY4fPC5K&5DmW*N!v?T)h&yV#|1W_Ao+zp;;3J8^kz5nVQnE%#n22c+?O64^aW1 zA?8b=lQLos;{lGZeIRKu_2;J$Hiit7ZH4lCMBU)4I4b)Y_ZB;3_#nc<&%ML4F~c^C ziZ~rFrDz|C5Wa0&&IG6j^%LR+zO77evA#^EpRUjOQTM5=A&{4>CXyIL++du?LOc;51T!8HZs*>G!vW+#Il7hX{-wpS>B%)um|59Brb1ID*n zx|7xiiRoztd5xcsjUvRi_JZigLkV-M40-}=8jqSjudJ020o%{kMsucJ=0CF}xfqL| zA8#U%pQjJxTN+tdA|H^>DU}`($Kzwv?s(yjaR2iWD&N-k+UA^TS-*W-e|ctJVRIeW zJ}OOm8h%%Og)~no#HqJs`hWW21Q}g={HmlAXWRyYBE!+_-Td|CaaGg{`>i zR}`n2@v1Cl6-BV$ z*nv}2DZ!Gvy%XO#Jly%I)VeV>uzl-ChWjku5?`RgoPkFePxM6$E-wIis^(zXzoKYZuOwF8`{b;Tq{I?Dn? zuU!&0L|7P=vgb-%&dO39$WkYqw)lL_iy{}{_@2u=D0$fE%!d@C@=q}5*^dmo2NCr9 zOWeVuc6=CLvXu4g{Hv+R@l<(nVE_G?VY->_gzb%kxX)Jmpz?)1_Ym>}rK2d(}28+5e(KAKc!*sS;JRSY$kDE?-0VmeKB{=g`%o+y5Z8;3OCZG-)&TDAS^ud z)RT}CQ+K%9P~H*094C05?`1#2V#{^qx5qv#^--A_$7P`t)ohBM^2Gu6e`LG!p82Kr zLwT#zW|}XLUr}0PM%Xyil$?KX?JfRGul`1E&D{5m*Cr=U1KV4fx-Wscu99ErY;lRn zl5`okQ}PnX3waLGm)Jl8N@F`pR>n$`ykCIQxOAR}9^|F=o2ElnQoLbp@<6^Zis#9^^&vFM z_?l!4+S-geF@rE5f9c8mL52>Q5Z2>zT|Ek04Nd3J4*~hEkyNaNs##pt8sWo=y=*M^ zUYhd<@wB`9^%6k=5M!Q}pZ|iBp37G5M!-f&9{qv2xRJHL=vU6mwD00)$&n znGx<^PPhUKnCGzc4^?eNa??;XUz_L-0k*H0D-AI%<>te3YvtXdGkSd!zuXGQd+4y; z%Hbg43*Y7{-JUJdjKS7^3*?Xa+=@LUPr$Z4o9&jr_2rJ8sZBSKpAaPHIrCo8o^>=t zB;aXa#Jwo-4?sS_h4jl)oGkOo&yJ85zh&0G#G}4IzB-90($}}{h)s}n$elGK^mlW5 zIzWCb?Sd&s_@<39hkdd24Q@VM-ckhrR!9WD6)JeybF%tH*#Xt=`Fu_LN??2O+E2ce z;6~9-49_@bsf~dYTqzbHk0vuzcci1__Eqamwh!p_I^czjPe9&nAdlp|&3oIe@{;94 zgI2@miINcRU!Ao1b6$1XTn#!Q3801Zqc#z_kAUqTGXx&dTvL!BFW6qr;t!t;Npj&R`Z z3Y&SeP0B932!_Q4dt-iJ`}K=2WP*;mcYUxn!@rWH6SpQ^atp}!6S;$KG8)V(2j&wT zZ*-QbC-$WU@?pvIYIiJ2sWp!`QCuyaAM{=C-~;lX!?Pl)Jt5FD=ko5hO{tb)R`Vj{ zw@C~5oyXl?=1vi97_Gwp^HMdhzB91B-GU7Ubgoe&DyHhRd4@!+Z(1k9@k=Kx+#ee! zu+==O<2e8N%d&^zD#HEyzNy5s=SJf1grzO#=Mv<;TvflD<6|YPiZf}D=k(`M|l2iln>xnsEo@)mz>KVx}KwB zW7BQ{Y+rqs{)9@BfAFFvs_y&Ny7d3OfEY2&7nv6;`v$OOyi%p#f&*yem zFE)KJ8IeMV)lYsI;ufL%EEx-IpIHO{R}IG1=_gE&rpX_=NDB^Xp8@ho@C{$D+(~tc zdqN;Hiw=tEkh>c|{u!ZPui`3IFlMSDh5to+qua_Q28t0T( z+(Jw-i?laLS)J)B#?2G8XNo_BIt~l?mZarbrC8t_<5ZgPK?wFAEBmuUSnSGS)P;*7 z&&mBGPOQ&SH_cammesJ{i12seJ~Bw|g29;_F*ite&<~rgse`k~ByyYobE57GQ12`0 zhfEMdF(f|w32~6vfs4WQTCSf$n}SRI?K(caohls8F38sc>zK-vbl$02f!J|tjzk^z zU{qGaYgYxWHjNX02)&{@J;uYPJJ6Z<>ZdX!66a-a401NjW;;GKyQQ?$2d%xFl6=dF z!L=6`bm#bO&Ur$8F=_DVyOQU{(w6a4&xuge@l(~xJ|JXkmRX)s!k^H3ibU&A=#*(L zSm2r-;lqhR-^OdM=J_<58dNOIHXaHCs*sJas*&PO5IxbWJd>avK60x!*;RNt^*D$1 zaExv4UO!8>-J1j1-~ZgNmeDerw(MSbZdFkB)!1yRm)(Bz+%!Pf=T>1$tsMm{ZwfS* zNI>dmPP=@AFHAh|Xw0%g+JY_#%#j7UUe5eU#uCsHqT)QFM>fDviJBwC%PT z%aatHGaPfV=6S z%f5ww%ZG~>oOH{@^x-I2@{mW74q9VS-NBv*XPF|7U_WNYOev;A$ffq`e~JdiznY4D zx4rc38@{FLTP|yX{EvJjWno8f+c!h>O^$G&qZj8WAI29_DH1*?es%7`)9Ul*p4aji zDGgM4_kOJB+@li+h^$9qjXOEWIW(<=$>=UfyOnTVju+OY7-kh`;rk-1%Y0)Jf+O1~ zOSDMuXHL_0KQ4xh!0nKwRn8qEHH=$L>K%ayxhzoiUow2-6qWCl`iRA9+rA?f?>b`f zPA!QFh$PrWrXS1g}h(B(mykSdtwx#N8o`64@XN;uN=CE>&=H_4{FwP z4*H{~-OWkzJuD#>!++nLrIzkZamy7a4tp#486V6s9B75qLH*3)zdHT;9iciP_sSzz zy+8N1pK_R7qq%_q|2B#bJ8Vus{cw}jYt7!dr4nD>IA!bkxn>91BC1EM7vi@+D%^%) zUVb(B#q_ASgaQ5FR*wzUd8R(-iA^R0Os@d3d9s`Z$OhUg40=DVn>>U5n3iJ(^pc7}NXv~sOW8%6$CjjJ; z$Ob@c9?1vD@0B5q`I^B98R@vvCu;VdoL(_s?hDDLL6Nf9vIrS5Dt>Pt;Jpw60v1Ck z0trFGEpdw`B6Y7fK{Q$;NQjIS0FOMx$ProI!$VQ*KjfUJQivO!e)a)8m@1~ zZ9cvBbi@+oQJL?@eKqJp)_U8LXp-Hd2VFjPPWs7B4QvjO`+(<_}{^+qm?TI(7&-Y;a{US6 z_ui6CV^k`wBpny#VfDTprmm^yVrjWc2B8%u5;q%FQMiB}1&?sptQ569eu&=avN+(Q zEQgsV5YPVHfdkf6$S!oSay4~ob%`Uoj4|3N26`@w?A(4^`ko71G@4RoL`S#ZrBEJ~Z!(^LmRkw&aBxO-~ z$i-FSc|pD-G~NAJr2U-Msp@E!Q>yf^t+dDK*-bcV;wqA>ReTF`0WvmoF@tqb*dkYH z2F+A@G?zz7Cb_!c^{$NY#FOS9tO={kCagLHD`A2;Jwp5i>UXd%6y?v;J-oQ_@M7%j z!g74PMB0jL{l-Pe`YjA~e{^WDiNwxdO=5qYbszHoLhSdqv-26Iz!CL+Wx;6Mv>^Mzz)2FD2eKvNu`aZAHo zZl8tX#(f~7A8t9vK6$e3Yztjce3+)`iXIIAcEMT9>d^emRhJ9lr+Y=6hP8craJerG z#oIWK`CC3&4?6tKE}xYnF}g7WR;`!%4G%qh62^G{;EYE=E>eKAwqG_2*B$b6~;*a=0MP&pkxC4fF-H z3n?w=I$i`5m)vsu^9Dk*uislQyzt$U*IF^3-kkQl#o*pVrPRaUe&7n%It_nA;Dfu5 zIGRA93MsMr!xTV^Jn1OiJ|8)CAW%8Fgjl`<+HG?v6v*$EF;++6788VU#$9M|BRX9c zCc1I#zH=7NXO)!A%iF%jWZJ`xR3R{ub)RitL*2}XlXN7w{M=)o%oZ=RJUqhr!C14p z?_@yEdiF(sEUtsk1$|}$Vz2@(*aTL7glO`4LKL2xa@N03^Fop(1SR(zf6hQ1RZKTL z+O3u#$0G2aZMS&{W%lMe4pmO@RPa^cVzg&;q|^2o2dK8HPyqs06FBIK7q049a z>Xq7MY4+8jv!;<%gI#OR^Q!AN(Ed8?(&=Jy@@!MYoFXIckV2eUFyEe^Asi!vra$z3 zi4abA(Iz)TsD;CU#Rp_bBa0g2uCmJFC?o{!17Z@zEDH7;Ab)d zBUT5f>_w5%N6=nv?Jj?Mm|bi*c6mk={z^Tj`GI%!9Zr}OQ(0H5hK0jw)Iy`8?bi#j z{n205r55@8@g?yOEZ!N&w@YGa{ROUQT7`b*lO_E2ozo{t>1mFDRc}02z0C#x9~5!C zFrt~`CgSaxC9)!$V`CbRp+>}=41C6ESt(6)us2&x&)2ihes^-YA<~lt$t5v|6wQZC zax!XMNV`)}igEN&PYJ?|9Kg{(&JuQ5b+tY5D(>=moincqU#iv`+II9rvMOTD)Cq*G zX|FWCg`rJ_--W4!bu7pc>&NAh^My~q7xLw#q4Qogu|sUBYwMQ0-%W*QSQ71g>UY>i zUmcKh;6cB@X?9f?zOx?OT&rd*275fG&Lauq>4M#DLUUp=PVQAd~+&mK-Ld zwdDi+Oc>`pVYL|7nlU-!K!A2#DRp(Z1dn;*xXlMhqr2DGHEzfTSO5d!cgW()Wq*up zQ$f35UWR`jg?p~Uo{_P@O>v`e!$W$aMpRQM8h|jd{ z7P@#r3e_N&w(4vPWZfRaJJav$A^I~XM&qIns>GCc(P^-LN2=^~`n>Zxwf|IC20jQ1 zzl64Yeq*5RIXVA&3A|+{Eh^;}bv9Yst@mDxExZHOX_AWoe50flXT(W({XNSU6o zG8MQoxbLDQ?C2Kr&C^$}Le@(#d=I8sf9uuJLM;fi6S802=Cd)95oT`aPg8YZ+6HDa z^j2QzQUp_fJRUkyAz8zzSKL0G$%;r7mcHa}z!}9{0 z41O^6%@UjCjg+HeSJ&ID63zV%W^Bk03@tvaEGx=Rd`6-H`2^OB!|;U+%zZQbAT!m6 z0ADU``WRvDb1jR@&PpvLd0fzfcorG`cy8!JupU!e1mXhc9@9V|dL$SS!Uh)h%3y=N z7!cux0!w>kp}M^o5RrxfOM7Lvy1f_>DL}z@jR0%!mHWi@VnAege;y2m0&DM)!Ke1; zL7)*@4Sj044Vt^EHcjNA+H{|IN~EPGOS+8h<$LiHhNIZg`ETlja+o{1(`IaVi9c#F zmG>weGSm0%w7KkfGdP*MX0DvZ_?n|`IJIi_<@w8Ek3Nj65_0Af@39F8 z5L(U33Cmcd9$PO9T#I>vSA4hXg7SMoAzscx(Aw&dC&&mEN zvAF+aUj)@A%d@Hb_rl}P8tGoWp4+|;B&g;tg+Q<eKkpI|>8C5L+asHVeE zbiw`+Ug73?h)+QL*U4X>=O<&-_cnn*(`$wVO7{{!5cKTQM^Ak5&o#kpo4cBO@99#w zj295-YAe=bC2m0Px19B=IKo$T#JxBNMwy>tVdeJbOV{js7{!R;=Yd4@^V_1_-Ent7 z3_J$U-osylU)}&O_+s&HeKpA!vQZIJe$mrU)sqChUbV=MS?_>Xw@jxJv=ZzWw#3cx z!v?q@k>qff`a><9h`9wrw#ETcn)_UIK>id_(uE-YY30ox5rg54i#cA{sR(o0z`7XD zYbUvCf0e6$ugQda4cG>W3`2qep>#%$Ksg{#YJIzKh~d$lwP`aEMxg~?s%Vy|KjDFO ze`^dN5ER(vPvxDc`&rkfKSiJ5y1Z6<$|NVV{%GJw*D?_tldIzN8D#!SpG5qtWH^XYIyRTAl3u+4; z3ei#}D7PGYkuyd^f;SM`e}+7-ao8Vr;B>xSTY08kx4lM_*z{IG(U*fZOpewdt4vYc zTNO+2xxv5A^$-7T)DxC%R!_jEQ1<&U;kxce!#NlVeoxd7Lm2(!8cx?Y7uR-Aw%o&$ z6Ba>8C{Xdh{xE)L4F5*dg{MTIp9l_p&WfJTBj5B?I$oCwE{Gl@i8)pNa`y7%4H!G| zSg3{fbs?@T#~$TG6Mswj>T_bF6*(m6l?rrIpKX6eedfQ7x&m#mXi(onpcnVZOt@}) zFhl@`SlmGrQM@dt%MJ~aSaq2hscF@9$Z|2*nVFne3BORAr6 z-hWn`kvP%5B-Z3TNxFUU_vR*r#DWCbQTZ!klR99uL!Qp2(=HG89#ruc$zy2W8z2DdB6O(Q8x$Wog{u4 z5A@<5nF-f@4~7W95Q}?Cd!ioZ&L<{1;S*{va938^$sMEA$@Ctu6)GNB7vp!v@Xw>( z1RKcUip~_g%Rrlu=%YIufUA4j`x(0C-O{ehvQFpKJa53R(h<6R%qg`4yYo1!@s@*J zpwWlBqqF7i#+@=H`mzx4GwPrI+odt)W&V_^T4#P_`$ho4S`Umv-EmZ(<@iR} zt130Qw}d?F`jkrWGvVh475}p)T>70Q@P8xf84jMW?+v@jU#;Ji4C;A(!=|$`*X>i% zgTgbC^pO<75-_7igLv7j+WJ6#kvo@49Bf$lpY`6#E7v6vIX-1s_8j&j>Zh^))2RQ} zQx6j^6v4}T3-Iym``Ht9R8MB8cwle8HHLp4^(I&=J8`>2LqMQEW4xDRv0mpyu@1Ur zyAO?f+}1pM!Hv5x3vJKF{FiKHS|6V9;}@E#dgJ@zalE;0^)Y4Yao1b5gFm9qyZxU= z9dzC{(P8w!{yUPHaJ_+EMk2$IU_fYZ)Xfb)XID6(&ChOt$Vb0q3fba!0~;Jh!2|35 z))@Ycs9)iJCyKdsFx_+7kD8{-g4>SC(+<1jJqc;8NPMg7bS_N7_(*0hc^paagY#;k zD;6MQADNeVE9>!sy1BL?{x>LpM*Z`D8g&etXPc^656&TYad&|U*Jn4hs|?#!-d)@i zb=VOLvoRG=5*FGydZjyqp%gNGpMZ@}@xZ#jHHLp9>JU!K9^=_SeTTb28|i9yl#E^s z_OHjeR+eO*b;E|-XoNXNmVkt=B|Q;T5;cj|e>WLtA8l7r!M;4@z-@9(WCflb{gutF z?nE7pRQNxchyShZ2u!#L`@Vr5|1QIjU_fY3)Q^UURa~oib)tXaaJl?IUNELpTC0P~)iCAr( z!WeXVJE3e{U6{`MzM>G!c;+)3Ryo;>E%_@I=Em64R(X|2qDCk?eMT3dvIPqQ$%8k|%<6=0IL?YFNRQ5R(&)2T*J$02u%b z-X+TD$(m&Z?g9~tCTdSd5P-b}gl+75X_VV#43M<>E$A;dps4&u_Jt^CT=L5jU3+)t z36z%{WGSrgauJ1oC<-10jmkX4_}Lqr$9|wb*YTdrX_k%kN!caJGAdvP-RV!`$OA_t zk3K#7?50P)?WU?pHjqamqg-x>AKc9_ypRS-x1kHfgVV3pGA54;oKkxkkjoON{pr|j z0(Ym9CG6;!?68ts$t^NWlIPEHzF;s}9#$W3L(|aQ$d%@YGU|gs2kbTuSd)J{wlPhz zksjO7$^+>C=$OX3N%D}h^bV47{(#`e^w5t2asic3#E>`p3q?c0BgS-U`04GdFmrcH zgS#Woj$8$RLM(PlKVM(*0(!9@2aH#nKeW0 zOKm_Ni3~-8{Q>l6=L9PMiC?fznt~Bgx+@SC)>7jXmNavIy*vDCpTD6W`p-K5Gyh;6 zy&xBlO*~&nGH`x@H?e=~aKsZ}|J^cuGN$4xq|;)Ry8elaGqiUbP-UK|jxV5kWU>Je zL=`_^-4520faeHvQ9$;GjP^r9E*Kz(|3Xo0K?Ffyf7Sf@eV<{dfpKY@R#{n^jV8b~ zO_Wvg3y?=5Ly=&A0R36~QTbQ+C7Z93r6HIZKX>7*nAUpJij~hF7=yCv-_Q?5sdV28 zf99X0b0=gcJ&3s83WTm}LBPOT>_$sl|2ykH50Gf@PFEe@m|8J@)D`x7o?pei7v{uFop1POAx7VIOHP43|)SE zc-1&(k$}^$j>%nA5+;z8{4}c%ECD$)mt7xm*g-BTH-DOW?(pZwc^^IuToHx$Vz+MN z12*|;^4E7=L6UFq0-FX8v|4d7@?c@4YFf!s1Rf){Ck{+whweBjBwmHrPxi}okK%04 zcUK-fr4H(dJhn1}=XZrvq$^Oe6{eDrdHhTleYkt)y_69y6$QibILyR$g4|6bnLAc* z=HbTzgl_}Fga0itLsw-)nwSM*I;f4kwmMM>Zj70=MtF#|>Ykl%;(x#;)Z!AddBMz} z_sLahr0)6&*8LQbNr?FeVhu3m=ZARWtxgC!Wx0{PvtP~!eSI|8py4{Y;VV9kf-eW+-#NL4qMR^`x^pIHNtM<(wa zI;lZaAE)i~LhA2(L0s%}xMLD-3+54?O;r0_<%1v$ z$pQ-aYM=_KmS^wzkQwt8gAh;ggew1v{fl0?lo8GjBgW`7O0+tM-FNHlt?-*nYm7HzQf0IVTJ31?-2H` z{B~ss&xy&yn&^ca@4C zl=AJajM@B>T$^U2W$SjKX2zRyCCPI>RcYJ7%KbNH@Xr6lc_04Cq zS&y%0(u{ww#_od>pMP(~f_vf^%#v*Rm_W0J8t$?hlj&RxS+%M0(^P6%4aEj^;IH>Jq&MWvAO%Y#>F z!IUE;HZfVT#X>z;5bop4Ib=yjG$QQ3==U2;Xf@|UvtHtsME{PA$9h@u8G{abM@7{- zui!?1uAh(JidEUZ?Lnw=TM$H|t7AsX`SAd!!;6+oe5XWCZu>XBBpR1xiVty?Q|T0= ze1Bj+1E|%)_M_qN2mG^(GwSyPP&&XVM+@uz4A%**LJmJ6*Jf~oZ{Wn4ZLzHriyW-B z@`eSd{IlQqK-6mC`|6@81Uo{0HuK?)U`Xrx_nQAiv||(<}3lgxUV* zcYMfWo)@wF`wt2g`~Tt{|GS%j7X0ZQKNhk9Y>rH(2ZDd|j^7M>@<5fok>oDR;qlq# zuy#|1WiUsPt>7p7!u57HGptoGObv5H$L8`#H{H{g+F=>$FYWWUH<&a<>7rXRj&cY# z$-u3!S+~O4x#VX@#2k|B6C}ar&x?%8I$8wb;Ni&cAMnLWrT6P48!Fwoswc$axtb{8 zM0I0Oo}Ho5-s({#k{-X?d3_^LwZ%gGRu1f{60G%0`6XLT?8>5CF$jBY>bgQV$?E)7 z>;h+T#|dY6kMJA={4K{|2|%7>pe;BDIZ=h8$T85y8vF#PfsRBX|CM7*&lWq5o~p`I zo1Vl7Of-fxj>T!WegYJ*?0ZlZwiF(`EGEJE6 zQ|N9=a>8|g%`qas726!KZaZpG@h)dRzh2jG^E8aiq>1Hok^Dj*h3-4mgbpvf z8B;mw2w`0!{@$ylT$*2Z35BfRpxB9BXi`k-du&FH)if!BL(66PSKet#gA&o8=^T_E zzf4ZSqBZ_=@TsF6b1<_On-}gsyIBjOZJs`m-$8Kt`terj#Kj~GXLTF>Nq-nQksvEq z?uw%IsWZ5)h836OX7njrESg9P!(O@9D1f%$=7g>Xdq3K-=kKQvV7He$_9Sc%QrFtw zuYG^4#e{#vpUv&|qmvusc8y8=_9gCl7l~oUuY%*_o^i5FTGcKrRyf;UPMRyOn>rFT zM@XNq@M0U~8Tcb2BdGF&6BlAR7n>?ByI-tKH}#N8dN+S7N@$zb78{Ju9(Byi_8;aC z^HG9S8cL$B>uEMZk}K*gyc7}9r=TXF7Y4e{{Nve@C|>D6`D_EON^4zmR-9_#m-*cB z@MM50|CGvY+-tWN(+#Q@F}JGMQQcpEcnX<7t%k6HaCa5>{S&@9;~#ub-#<}0VE^8+ z;M4DD3ma&&2X@YE3?74+YDyh?PEqMMEXH_2_a812KEw*=ga5vqUtS#IN-Gk@^hhuu zgbpkqk)cR100cGw6p)AzdL$qKLY(OY5Pym3IP&c^e-)5^ACfCVio-!n8G*qW!?3%* z;3-eOJCacV!!G^>4`0}~IebmToadK3oG?*K*_Yq1Eu8b)v~{_>Z=HR{_p5fhe+O}Q zJyiVe`umin|Sxc*c zZD8G0@z++A9HnL6$P;^t(%NP7831{Jj5Pj#YTr&4VnFdG)rV{8VHTUkwpvUlo67ns z)nNI9aF0`$Yb!aML}A*Od~dkFsT5diA*=@z^Ej^B5fM@Bo@PlO>PcFRxo0my@`|?==d1Uznkl!nVSN38cumvVniiL-uxEZ+xd~@dC z{eVq^xJCfrxbm-N zf&evtip2w}9xoyuHmtbE9^!d)xPCs3BIhT&SNew+R&+uJyx_fBe4UuNDPes21WkB$rpV@=je$rXg6e(}vMK<+2Pr6hI!WVbDl&9Yc@%WPYt)PTHl~ z79HQ%h+wXt`hHs)L6yHztly#t4C;QXZ(};J-X^X$&RoqVFQ}1VT_|(X?v>6(It8vA z(&8XKL0NmK47#GR+h-%9;(7yko_Ug7SnJTOeJvrhS^zUfPc_Lz)p&LZq%JD*xEsQ;CX-3j!d9wj$hvecf58{Ijo()M_#N@$m7(lZk)u`4`5&s+{-%zs~G) zab5mooLADKc2lN%S>^r1mnZIIl21$6qVmta)>EtPd)I-F|M{k4qL z#%Yk)>7r;lm~hwuJfYKT_Bx-1aML{{pY_e&L*<`+^;4_G?Z?B%e_}r#C5L+aC|CFQ z?}L34K0o|&1M=Lv@B0L?X*bddV(35If2w0_@nq7b;p;@uFF)|TdLMpCej`u~mmWoX z10Zi!V7_HsvKe$Rzkw;cf^FIvEq0$IRL0A&k!B9q9*O)%_We)ae~QXKe`EhCD*t?& zZls)jx8RiKcMtR00;Q~{wyWm~UO8S32mkpM^0=U&I}jMNgOx)@>3Ir}eqJ0{+5?wD zD^4vyi=~s*a^`I0tJ~Y~C%A|Ww42~>$28ihkzX9sXiQUQj;VJO?c0!*LEPCA}b$>P_6Ainu8ash>;8h2>rFT z6Kv|7ujg>E|J8z+pw+f>YPWw;p(D!^Qk-@{Z5v^tVVINrhg-Ti_I{dzvNzpNa#Z?;cC6W5@sY4xH>CQf_%R7q~zqL>6;U!!Qangh(f#*g}+GnbtpQQ?pQ7p;_ zr?srbyvSeFgy~ir-f-~27rPggKY3qgE*1TWc$ppdc}M{&GNRfmov}DpWbO-+&1|#_iqGmBC{-gk4=|SHze(IS?2HHc$=qi^>sbiF`4cC+ZKb{Y6jdOQgFra&3E3ZJ1%l`jgj`!%j;%6XzB9w-bDMc z`cks^z1EzZIJ-=1&Jzz-XD3AiI=#hE7G7o=u!8*8xJ z0uD+^F?MBhvnF*fv%{D>3|1sR)p} z@TYn7KSg8`7!@yutkq8p)wKT4ntw-vd|HJ5NxN$TlV+7hR(Hj#o78+IH%@x$-2df1 zuWx$6b$_D&Z`6eV+n`EXQ1L)r-()nC=a-dG?en1D+YW#bDnkLKJ>u>dpt7<5(Q1i? zX+Kj++HJEpP=6Mk;>=N?(gUjv;fqEm7o0**2(#)J%oxAHRRZ!w3G*X!GUaJX;AI$H zt01;?PSAZIk4itV?swS`2?l@=LIl8nSA>WGYigyiGM>D|Ds$A~(~{p@oY++MN)M3V zD?<$SVhG6BE7*?@vhh2(S{s?$I+#1K8`|0ZsQKFh!cBHEz=^8hbxBeN!xU+R>H|vu z@(7gkSSWf*zoeLi>P08a{d7R4Qh!B5%yMrq8n-9A04pH#5s=?4o2ca+*^T{fc_#tU zYD~&}p`LibD54|dUxscOe(qCVRJZr=<(`pw`kPh*;oK09dC#VO*udC@=Wnd8W%ZTnRCZC)x- zZ5?P()9ra$!Q<4%G5BhBWwwS7^P!L0f&32{{m9dhL!fkKIIy!KP}hARq3XbcD-v7j zjn7sy-%!ol@}68YXtqoqiW2F4{~TsZPA8ovFSGm>?~K}ucQIo{!7sDTRNpIy(%ExX zUSqL`E3#gle53GJl&L7?+|`v-hf}w;Bnt1)o;T^*F-Qf7bulPO@;eTV_4Tz!NQ^+pEX2+Zc5gOrF&oi!_kjS|u**b@F zi4TwHEw_u(y=u?garzm17~4vej8Sm7Y=?_xEeG9#kNca2*1`FfH5Vi8OoBvv5%}y5 z=>~-8+glNkp17FaAk2trd;1&hqBYke=$E|;+*)h{(QbOCH$y>{;B?&y@GldvuTg@p z0||cp-sdB>5BHTqxDq0%OOFHtLKEl{x(IhGRBMq%%{N`6T%(&#_8d^u91XLhwb82u zD%>k$R{*gveRVJX)={AbA9F5gtJQmbi26 z848Urq&$+N>AERaP3xpdt9e4vDo*zG*o@f6<|A0J0T}JEj>4Ix$JVV$n)Llx(~p*& z986cf+~|fjR%6knTwI1Hz>6CgbKVgSn*k&B7Cln*FAzlr zygLS{GT;*fdfuc*3kHi6Ra(KW3O=+qg!KRF>g)GE&45wq2ln+{HbjB}p#QrvU}WR} z|9S?DF&+s81Pmym54k;l0h1__0fNYU1mt(i@C^9&ZtQoqus4ko_*nK89lnThE=_Nnd;wTw#8O2K=2zNE3#|EFsC} zv!)qb+YyghmY8*4*D1mD_|I?%yszDSslfkWA}ZR`e$Dzl_ah^IJ=(XF0X*AZ_u|0~ zM*Eg;etoAiSw{gsIZ%`cw@UmzsCHIx<*$2;Bw<$O=pBDMLeD!r7c=dOShko&%)D!BVF{B3l1^%_s z`PIpkj?Gs$80t$k=8Dnk3!tJ*r%9M&;y!mw=JsjlT(-C2f?iM?Z2G@A5e7 zW5-Mv_7$&Ig1MBH{!wH2N(kQu8V}*Ol@JUHail9Df#4K%ha^6UN=W1L?m&b!-I9nL z@yH9KAM5i@YWAD9bI|x_pvnM8{u%}HZ!i=&4$nvi+VWxxP04@7{~nHNz6m^nLeJm7 zRDX9PP(P{{&H;6)C@0>GEvNtx1Awu!=-7q&iyW6}+W;it~a4YcPzN9BBJ{h+=W6l^iJ zu*~a~o4QY?dmF6q@y+CnB43q${omq#Z>aa*=mkPt#rg9T5;F3`gGdF46qlLbDpKk% z5dJUkx~qC(rB0w_tK->XT!LttC(~{?em+D|bJr5GuCy^!`B&Ai-+85jNlTKMr_p(? z-R@WRH`#FxrxP&?6SWb1c0Q=u7BOI4|X%L7B z8Tb1l`vIQU(^*FbypaZ=qcuis91tJpr39-g`E>h0M8*SPcR1VKCVyicls zk##2xwH^U>zDNG{>jcd(zhlxYAJ(R^Jf#@@f>Z)-J-TRscQ#yg6a1k`PBm1+9!7qK zUXC@>J>c0ntXAu^!7V5bK_UOBqa9lMfYKAjv+xHkw8agyIq#00=Ku^ZHKebQ{O?<1 z-y;AK`1QUz9A&{fOu^QBV)Y$3N;7nO*gs*Pt^eSI6Q_wD{;K^~96qUiWRQxa0B7^sTYYep4f2l=!jQXZMM00$s$GYOIaJzljP-R8P`uz*r^VS%K z_PurM+B#3|O0&m}`q8Jhk0D%nv1!Ur$H5gPc z`u%w#iu8Rkpe?@`Bf2nmeYBGa6j*haETruyOVE(E3;j2X`=3%s`qx&@rtz>oH|!d+ z&@g88XpJeSdEbYjokLbdnPkLwnRiy&(mEAmgzHl@ZwFSBy}l|XQ6q1KzS|`1D(JeB zq|9{rCw`ZCwb%9Wcy|_FC$0w2k5t;w_=`99{>G3;P`&>|DFLc*Y4&Nge`T0wKa2KY z)vL+%Y@w*M1J|(_or>6}|M2~&#Y^v}!&#$+oj{TST5s?8D`!9YxB#v@Vs@2AfCKyV z1L6J0u+M((Zw(JHj>sgEvD8abo!2?8(8J?Vnn0 z-)RB(`vE8SZI6mX{eA!jDivg*#HShpO?#p~F2~L#G&p4xhA$p5vAuk#KYFq}-~AdY z|Lm)uS}lG*9zOn4`|&6_)Z<4-r6cD~iLUQCacEy|-NRc87yMgksct-3&zZ4ejJbHT zRc;WKfA-Z+t(LGK4}w;nTH<~@eEj_T@hCaejkTxhR0 zTVxvU*aPOw!ro6S3C54R=iO2HXJ50a)spw);p2zzQ27TR)Z<4P|GT%D%4S4QDm3&_ zLapq}@43CX>(2wbSN2kC?C_urs0EdO_O+f`EoDC*K7R52c$6IK@$b9m@2mbpbR}Ng zb=L^jGx67stxVlJk7LGo0yM#-ZuWSXv39x)m4EiNky>rv6;Js15%yi~s|y*4di*Hk zk4)`PjP=R)1vDZQ3ux%wpLAblH2=p7XasS_81)3FP`uIfb9l(5|8N2AyBmRaAzOd~ z@&H*?rNA%i8GJDp#fKiq|L+3YFBZ^5D zTNT)=FbW&vz@%u|7l*BgyoO##edKaBz9^a&qCWNv3+?H1I~}-}cV3(^-I67YST5fT@HMF@LihN^SUH78~D|u zd;_iI+pp%LJ-7{p&-ob`Kn=vyc%USPApG+NAn2B!3OD@A5DYrxt>NvkduHcr8A3rC z;fs_Zg^)#Psu-fd+k*@YKmrD!4_|il8=#-YHw1~oJK!}Kp@tgU&$#dt8-k?HLyd1{ zwCTY`J-GPwr~BYy09+i4KfwSOBhKsX*tBDx%6Iz=K{|$@XIgq7y&c%>2W)2u*45H8 z5K}b-c|=ASLTGjh&`7k14|;~6Fhj8Ck4DKq8kHNMLr z&W<7@$|$ljDx(M~Bs*n9R`!;NWac4+B1KlRS9VcU_NWlDH~k)->eW-8o{!Y){rP^r zzwi6`2j`x1@44sB$GzvCdu~WDLP9+_uDc#ahr0H6j2@iIO%G#PUE5uCy$HHY?ij(! zdT@SSmSFiAhGG z2Y1oXgWu4DA5zx_qw2wLdb)dI=)q5`YlEDBaCiGhqEwJe_-2VJzDopr&tUbwfyq@v z>$A*cO!4|%M}{ceY4noJ4TD_@Lt^bKmr5nE9MuaV0dIO0fcg- zR+`oO^5CfvN)iRxiAOwgweHNXPRk_LpA7bUg4uHY2_P>!Cl%_3ABAJ$1n|(1Jii+l zFrwr%HFqhHTE80C=o2`Tn9Z9Y{Jy!n=%Vy~>FT*Qs@BgiG6+rNd@DOmvx$kzFdZ>J z>p3jXF0(5|`M&>H?-fJ>z)MrHPPV0TO&8kMds2@T4^I>A>ck=KxtKj$K_T$r9jHD; zya(03zR-Z*Z9!?NI+@z4?LIzvL{ zv#KPS{Ylt{W4mtIRn+reCIrO+%?j7u%x>d$?Hx0=)yUrHri8O&GGz}sTTWpbpjI!D z=O+ulBL4Iiq#^{i;ajC1W_Tr`n3%g#WFs+rsV?zx`f~JP=Fe8fa5tMe0Mo_DV>hGG zJq>jqt1Q>6br)h8W*&p`8qAMdC!VkRiJlWEqO`jyZK(la|W7$b12CIoX9O@{S1cw z%@$`czjegZYmEF-$#wx%8wj0Tiho^>PRq0UgUAFvoLn%))nlIJTmJQOWuN8q20 zoPIOB@r)(T_G9;(Rq-3V&Zp~BoC>?ovx83nBnbWu?6Vf;Q2UfVrLA(TbkI#;vz0sC8X=` z2d#w5UR_r;C3w$@1Lih?0Z!(szFjtt4f077x&C zJ4i|5&v!1dKX&XE9${0)?RqO)fI;4gZ)w5(LQ{eHt8<-Hue!|{T*}QN%nzRO5>7Qc z?*&Q)ZtqCEaF$upVD5S;VS_i?_6ysT(AwOJK1iUmo4Sq3mD`6qkl=LJ6U6npN+5!G zPP?h};IXKk)Hh#bWmTUbc3Sidirt7 zBvn5#dMY?}r6tLg{2%iz1&=);C7?~_|NZ7$CWC4hzrA7_c+^8;kVRw=A#6gyxcefk z!2o|H<~$Hoh}`(QRulHV8_9olb>?zs(0hsyK|`MnQv`C{H5h!i*AE$osxu(dQ4I}b z`eqnvmiv2~OborE;m!H}z=$S}mmNm^QTq`E1_OSAFxYnM5H#PKL{5(a-Y_7c>KJOk zqTv6hD9NhKyZYMC=^Ac2eLb0{5{A*mjAa~kXgJbM-@HPeXY}HZGj$%P-6mVG`3rcD z?`EU==%q`~`FW4BCNIq-fc^Fb)He_@MRUwv%}F{yAwisIX)k~E?0zTDihWH>CZhS? zscM=|g{A;osadivJEu`_!Boyofh4`m`?HVlnD!qOJ3PI}=}`i1eb!coREWhwQEu1= zCRL8rtFCbEX~+8cXL8dDc1H|c6XoLGeMVmoc&8BaTJO2T&hENjFOIO0Ctne`sjkJy z5K3b|d3PXx_?b3cYY|}kAlA=rh4M~i5ce^v-Mn&1IFU8aU$N_4{NSf`@I5QIiumPL z_U~OfF*=%jK%=@NJqN@ zX;oEKbMLB~^09BD2Va-H+9%+!X9aKoumgg%?Z_Ac0wrIo(epdsTG%VL(3Jwm;!3J2 z31=@}J3<04Ij{Tr2eoXiF(FPabOcrkDy4qUQl{r$4%QNQQ;71n^n9O%9a-K|=?xGq zm4tOkM3$DfTLTe=R{2zt?@kk6-fys~0LTgYxN6#DOtaxqp5?fnxHNB)X0>1Y0i3q7 zPj-;4(FdGZt}b9)cDuj+O}(rehRTtPYd(5|>mLqX{;8N>0lz8b5v(SLU(vsW0pxHS znx08W znF!#z{ViqBRFHW>$;5-14#6-<<~&tEb*auED}6_NQ8q_7t*JWih4x&b&}^;ydS%TP zoO&+PU@qqKrmlz@$FxdAZQ|&uf%RtX_CpmwVD|X9X*SlbII3W|@wXD$rxevDIqo(d ztM>|Hsf%;@2Atk8!SgmkMN1UpBIA(I>?IoiZgDjyjh(C2BPzKTirip+2<5F2Vp#r$ z^0o?SJG`$@&NIb+^6bf{v;HjD!Iv|xbriWht7=%!&<*3b`NOaapu-A1!l|sX(#K zLACBcZj^sueDbPFW*+la@Pq)-C=D+23(pGG;+`>;x-yPFG}o$e5^%ri@+u}-djW&) z3K4K1x0uOgIdB!mlecGnd}-LSSo4OY(B9dKTWQTi^)J@;wP2%sci2@rDg`*vU7@0hFSNz3>95shLTJp4WruW9O0RJNZb<0G$AI?qkzc=FB6*KCuTOK5r!_E}7u3taxJJw|{~Z+l>$8vUJ-x5e0S~#pWCTDA0e+&lbvGjO7ixGQP+)CEyl6yx>Y1>qA=*5opk?R23Z^ zf(C6?imrTHF#q*%;n!$T^k3tn38S}xgMsxYcYj0uY}caH+Xx4;LZZUZ9>GPh(oK7(oMsU#|Q{m|MTw3 z;1bA{vKfZ!BIBTT6nK|!9&l&!4x`%Jz4sKc>Htp%)dF(*jWDFrVJ-TGEy*9;v@6+| z=cW*(iU6a&9JOBXCo9*VAPk1e^&B$&R~S?gNd1cYCA?wLf@)EM93N`Of*ki>fH%kn z#c%tv;bQ%@yEag@fKIMo+ZRg4%!orty2RhkuMj_-n&NuvC+wrNL9zC?&MKhzqbQ3W z3LFw@V-sy}jIObPyCN}zy^p06SXdU6o+qI?{iwoRRq~SZnnoMV4;w&3TqCg}(@~ZG zFP&9jBJ+oG*A|6Q9AabF8K+~ZL+wqy9AK^7z>J*!G5LGH4`t(B#nZZJq+A|Oe$SH| zkm;LY_yB`f6k5~WU;E%@9T@R2!r1v`K zmdpgFYVUBVZnzkWA})Z8Lp8@B(|?7xtH%HL=a63|`WZ&-Mde4S$DI;Cbf}3@vR!s_ zf7>}@*oYfSR>S}0y~Z%ztq%Ziwm(mW-h44GtbFE)Jw|-`BR&&)_ay9VPR*HG)%j~H z5OA~3fLNE}bCjSw<3r(02G#R1I)k@!bJ8479F-Mav%C+ z&na_+^zO1}lG9z>bR4{d0WR}~1?%NU3-N*Ib-HBc6Xw$^-Q~{)n__uJZsGbx_VkC| ztD}E#h0}JDKtZ8bf2#@ld--so%88RarOP)Lq_yw9*D9HjnQ0iQO8!0pg5-&@%Z`n? zvYA6r_R(lFpi_-QM+fbn<F6ADz6EM+h$A zVwQ;Ib7hCnLe&pi8R=93bab%%3h3yNH0bhgSjeH`r_e1Ud9VwkjJ}Sm{UfW|z_%09 zdFKhncGcq?BcmtOFoUWeR6PH|(W%D&1ET=I@}EF~{(Tg5`Oz+L)-tj>aD&jhO3%EX z?zlsyV`QR2sI{TOK-kQQqLOR7VsD>^svoq%(y5}IcEIv0qfHM*gf2f86pKoqd3IgN zON5GdvnBf+W|R6pTKcJ`e!8X}T=dZX%IG(!`a#Q|PBj4?9V|bTX9#r3VX%Lp{JIxs zzqOa(y|Co+7!gSsB3_l|I?dAjEXc)SFZ>$4YA96wpcRl#H4z;hEWbKBIwTFc{E+h> z`aBrMK$ZL2Q!;GpW|^2OH&L2pf@a-+^^^>Tp2W(_dihi6qyfz_H~5`JrFQsLfBS0~ zfO8MH;_gRdUdFc3`hAaLE1r;52N#~Ysp3zmvwC75$Ac#h zuT+G@geel?l`qVxd>8T>dQz{V%O9j|*wUbg%Y*`ljM7C8psF8a`bHRo$}EWlYBX+3 zzW?p!q)_!!FPtc)t~fAbh<~UtI)JEZzUt)$W0H;ahX_H{&-#HkV@KSvb7HLON>&0U z7vi5z)hC>+z9)6oQe{$glw*-v7I1cUbo2@7zomBhP4&pRCCs-GU$gv^+1bs=$y^wm zb3x_F`kQ4P-)*J;Q*V|DrZ>!IRNg{S(0@?iprrpNfEs|R6eiTk{=w@01Azckqanuy zZM?P29C$bE^Ec8Fp>4_ce|x9?54~Ad2@LdWHTKX3=3zhbN${FjuuK=Ft#;mR;Qfkp zQfJ^GHw$1guOw0vK1-;oI_Nq#aC>6DERJIK+1}UJk~*j`E1V94ZLz`E>A(Ht59Jr# zPMeHY#~0rZztxltUPid&twuc@J7_#7@D7jRX=t~7rANB+lv4?zwGnVs-9O~Tq-au} zfLe-UErTq)W2zz+@f1^(anYL9jQvMdkg0zRsQk5B--Z~9LtEO4o; zs#~S6Wgs*JIoO}$tQt?(0DdrB`RmzQMrw!E{eyk>_g;8kbS$w37tkuW-`3Z=Sdn^# zd+$sN%}jpOd*7>XvuDIB0ZJG_W(O#*#k@H=Lf+KiFjEU@`4_+);(>$^Go1USHl~ojTyPFI2Ro=GGtc2BSrV8dX!Jq$)1(td*KyY_;S`evmu)MY>a%#C z(DJwsd!i#y`s2lf`vtStg39-#5NTX~KcXJp;b80-I`@KaCcd+rar_0=J3#gRMTX*s z99kdGuntHS$+JA!S+>K=i^~1zsS{#n7e1^V0fTyw^{1{N^&pi%O}RR^ZnvGLQB`zL z=y>Fe(LE`2TyR_N8Q8fqQd%D{>C( zcHjf^dp4iBZb*a0Hcq0y^ zU2AUhI>LIY(UBVCfygobs14T%mTs>R-Ql-w>fsu`m-kq?+05UAn~jSI zTG^aUy*(8~-Y=YGVmsHhpa)3q4`Qc%&cZPf^ITq>I*EBnR|oTWaZzyywZl%Hz}3`5$0B{6ywGA#j(32}YpF*Wb>u#+ z4gA5TM|n=j4zayR{4i|jUS52nwNPvAm2_~B;v!0!6CQ@@@dgxHMO4pPY|$n{2W!$R zyMXVL%KJ=W;nENe?cNXXZra)Z*fFf&DK7VGLD9XQ?-l_?P+0!678->nEZM2d zD&hisvAeZsdzB1I&N1U8Dy;c;0jF)oHF-VO)Sz3u;qvRtXTPlm#e1r^Z)woz16VPL zG2W2G+#Y-~Bb|2QVoSN=lEZ~3iM(Atlwx<^5#74~amffE=qAN+OKy50~ocQKbaW8IP0WRsqXf9g3;9MelEE0mc!<%?&T(4z;;$_VO&;;(_);rcHS2gD;0Sy5Jd7EP|>0^In;iS&3aTi+*Bw- znr%#S#+|xvxnK@pSHnh(jo;{MWp8d0$K2a7`k{8pu~r-B)wCBsmx9J2 z3#+6%+Sm#Q2>=bZ6mIVuH3RoV#mC=A2sH35IWboiva3?&{+7nuR%4NBP$vWAk%ltU8v zT3%7(Ul{I}c_61p0YmMs*2i{FDD&rG;x|1Lx=x-%x+T=g(J>b*eKDy|PqDc=l1tqI z?6avn4e6_&J`vqxEYI^cjY$dTu}}t~=J}H*buB)R))w0cb6jJ@hw-<}ai1#(GpXGV zxqpj$_mvif;v}1|ANRvf$`#!%&!{v}4j2WruUvDrY!-bz?)vzCf$OmEWJsfh!12%z zX?~3X0?MY3z~S_D0Y1Wk7{8d+x3;i0g?pxjE3X;xqx&(TBZ4$2J-sga^cYMKbO1da z_8ZogT+0wVHj$yUZ&37247Wz0x3sVjz{?}i}nr{+4*njPKI9^hj0`%8yV_7s(XIK4I}Gf!RjExR^P0kZ!7ZV@4UVs*8fua6MtV91@f z2ejY6@#|jjDl&QkLZx0@wc^OhbI23-T?|ec;GjH-hTA4(1l^V5hW|>hSxhN*!@WLiz zZ-lequFKfzO@7Y3b;&cWK1*RK%d|{9j?{AC-C6k41^8okLSTJC9%Hn{_4wa$Q5cU@{1F&-Z0%m7kSZp2daLC5gN@a%RtGE^A$UGV)re3d|66`?K!3 z0(AR>O6mHK+n*{R&XD+kW5*}420KYXs*e&d>VCBo)Byjdm;BXn6a5q^z-2Ri*A#ok z*Q2M4YXz8B7ya+#bUX)oD#VgL&qpnEuGv9@MrnvEZ|>eCf4XB|H%hAl?BCMDn%r_< zld;z?N#P#T<(lAWHm%(gl+#iDMS<523(%<-rAgFc8s!yY3r}`cEZ2C{?!@&{Et_!m z4PI*Tkri|eijvj?W&eef#?26eA$-S)b)e--Wv$4OsveQz$-KH1nH9fprKd9f(mrgG zV*bYB#~XKEu`s0Em=R8+PFnTGJ29uF5+~YaWS5o?Ub02^gPMh47<-Oo<2i~h>hqH1 zG~MY`#AbgoDm2rK^Li3^fcY9j;69Z71iIW|bgFAA_bq>*eT3$g#NQ!6+ebj{yC8DG z6a&91!V7T0UEg&s586=cT!?+XlGseB=3`WZjawu8X{AYw!Isx8xhuO?*rHvC^sO%mQ*GNnt4F8yc zx#*Bef={en;>5jXL6))CH_C&q4t}|E0N{-Xj9XOdEwp*aEKus4$0_{zJ}cQSpH^Ny zyi-wk%+|Wzn!XY)jL>P+nQ!qP`@p{bCS-v(!?>&nfiyfWd`&(xd*8P6cm@;2uEJO; zE;6}Nn{$+nt4cb@6BYmhOY#LSZH;?a_oNs(A0E-J#*X5s;yc$;&ofBtS){lhi!I8>;Snh4m3nHcci$J z&MX=aH4wfp-Dy-Er}f;um)X{FQssBLNe|TgN#${tM#HepP5yjgM{w_XT$UkBk95DZ zU=j;F&KNgrrN5kCrzp%ZCzr%x52h9f7@NOU!V|#??@Om(w|^bLb5)$~64mC`K*m58 z5o5afd|UHV4qjl-^I+V1uTu8YKF&KCGo~$6r}tjp$K4Dy|03tM38qthunC8R0O!|p zNWtxc(DUnGF5%!6pitm8WBqt^ZJY6%r|iG&`fF#84(!WZ;@cmm>>Y<`;g4RcUMjja zLvqq4^n7D~N9v=+p8A|UOLB>|b54NSndoa4toeF}0cHjK>9-*#gW`7HrrA}<_)$kV zs+4f;g9WbG+=|vW8PFRsiP3|HZr;#wefIj@YBE)Q;v;q0Y73z$2n8B{s*C?vdzcmTOoSbP)+?U{^s$8N_-#kC&%K| z(iLOO6ly-pp9N%SPc|_!ub^4?>o<3KA+ry>%D}{{k{a#miAam!d9Up*tt3ZYEtYiu^%Xak&I$hmXObb5Ayl>{zd8)}}i7eMAq`{6-(gxOjOd{=CIl(^U%B z)s}WVuiYVI>5OWevR0Oy&4IP5N!DamE$^&E>_oNPC{>PK^tik4%O1B`3U9ZgMmT8K zUobNB>1L6PQmmnG19N(B4TRE9->g!Yen!YG&GYEl6-$ z;eQ(kU4CpRUaO>q`ipc6v|iVyF4HK@b&SuD!oC~Uc(DJ8KMSDRM zEWa_@^iV|T@Znd@q6}|ww zrrfvESc-|5ansLxrPf$&^}|-?rdU9%h^T`q`3*hOQ(o86Ja@t>aZaWsr%ZFN+hYju zT8JIgy8x)CjghSMkg~K>JgXoKirr!Mh%JY@AH&eW+Xe*WCoC?ABnZ$)IL2@?6oHvZaSx5SynMX`|SjILym1k=MK;8%zh|2 zkad5iWAR{w#_E^&K>FOq;{*NLW5RUL42A&7oB3(`^@)_E#xipABSp>fj@0$I$~#a- z`eW@))$Q~JGg>pl;6!Y2{%eN8z3uJ9#?#wx-;le-8Vp7MH9nd!`coB*Q)xYE^&-gW zw`=BroE8QAH)a?Z-ire|+A7nd$b7deAEL|CAM&o}u6Nk#B< zi*K1_#gRI3fJZd`VkdvEV7JOKhwe;l{Py;qdz^uDp)DP{1av&{;Gz_Jd;xouVSPHs z>UKw|RCdkB_S{#22jw7A7r8t?Ve%7Hb9sk8Xdiz36So=q4GTV~X+UB#S7Zz(1byQ) zHfedsYG6nxrUQpz7XE5-r~fgQ6Uf|tfpI)|3l@LD{TlpB`7v~n8Jf3^j8z_b$|qWV zJz)8qk?#8%D;Z=u3fL6|ZX2lWeFmWF?++{(w;?MG_6Pj`N9zabGs#~jlAK9>KBiOO zOh>kMRfUbvwgaL9p!yd?^1tf;u>2!@ry(+c>Of4$=|Aq*=e*!taQhW+ux$E+2i@|^ z!Jm-nC}39=g+ z38zcJ2N=$grbjo;3$Aut8Jl~lqZQzVKQkJIN7wbFJT)i}aT@fOZ-lT)sL#w4&>%iR z91i^TY)fHk42{;A7e8?M8K8W(kGYlq3m*0Li2ILs^uxam z?CDc^&#kia`3pX$8OAle#oBe7X2jXlZ(p~mC3GU0TslhbQhM(8;@ekrJBW_41iD9s zHZ~U(+hT}w0@6O7yAzaSbgN};C4Z8T<*a}E{MNN`tlV*5X?8GMgq^PmBH!Pz z^Q{EFDzU!_#R;Y9W6}{n+46Mvbbq<=O=y~o%mS3x& zibSCE%X|$<){;RPx7s@?d%y*z-2&XkoiHA;#DvEjKxr7{&8GJJPE=h!91Hpd^H?v4 z9xeevhnh4X(r^4Ip~!37uWM|_d+2(8gn7vMZo_=$cg%wjO@Yk+Uxu;B_7MA{#?&$Oyi7}(VW0i(gfChb94o%pMKH`!#nyB}6|lQxB33!5eCh5Uwe!j012XAfS2*sjp(olxlvvY`VU;jW7SXQEol`lKD}xwLV$H;W;GzW ze|#n&h~DYSVWQC2cHm^y_%dkfw)DKd%LzR@1s$Bpy22Zw+44->H#;~;#6dgZwOeZI zH(Sc)PuMIUUB<$dKdadb5J{PHcHV~9IaNd9Y8A6Om&MoYW zO8lso(6u@2hx4nr*Xj=fA{MBOsw}WV0^ewgT*@lGmhkoRr2~?^*g1AKz?q|Nm};Z< zMe2Rd*v!B^_|D{6-7ifqWFJ~-$nvIAcL%4OHLDTs^#wlWiK{3V_&zu|u9aXfL}4}Z z2kM3rv_*rVpEAsxzPX}L4@KRc)yVel`NLqe%S&&dB-oPvy_&bAuM?P%=?okPmlsb3 z_e*Fnl8JsI&jgO#2#R_Zb}`vFj=KKYhn`PMJ1WJlv#?3f&T^3j zQUk=!J&*XFP`z4$x;x!R;z^BUcEfxj*4@Hd_ppU<39{!*rOhvmVobm!fG&iI^R z!bcyW&~*L-+Vump*&j}N5@@n3E`80sdFRVfqVM{fy$FdhTV+unQ0M=@a*bvmd{dP} zAfWU!P;?0P$Ycop&B5FQm1Y~BhuW@w1UWMl@Zacf$QCnIdQMUWY}oH?S3blTJ$HrM zNI*zw)r%&xqRHktOF7xh^0#-rVMgOOADQcW9tZT>XE|x6b~t%tEt`HkyFw^_=|baR zuAU#$ttY$H`Cy+kzw181hfEa-| z`CFKXM!NCma%gR&vn+`@!;zxLJ*;YnMi&1sM@M1IeZLdYO3%|L74SGj;y zWaI`I!%?Ts_RN|=5vm_8evI!fjbH&9R73<0!@)F@R}7rae*U7`zEPURoiHkT^NGEj z9cT2|fXs$VTt8#3L*SDL)`%wEYpavdFQs^oNWeCXbaz#650ho??201kJ}eDZ_3kH6 zfm6b}V-_9rgC2i%lF)cB6zx(qV>uw>R%!~PH00lz3;fD?t8fpLeF#l$N;U85jT+0K zXzGH&mM5{Pwoz`&hOJEdeQrf7(+a#{bilXplV)<=8UIPJB|f!Y;jV{j+UWfT-qpqI zO!=*g_^}J2fRQq8o!PoIP=*gg- z)b~w^wgL|^RYwdZX_OO=RMpB2w~OW$)CI$jWsAC^vY^ga(PluWnvRYRo}XHxqeIf5 z&rb=Ucm#yfGf1iPN6g8^i9EYkOwEXGNI|&BMZ-X{H z6cM`oSWqmgV;XvFMg*(Q1o141P7av?`0Z_W|;!KV0m^D}t_`PcMNXnzg2ym!5P=0eG{c-eN^o6Y9lrB)}I7u=QdCw7~WJJYK{)el&~_h7ozh)PDt>6X6FI;QR5`cegm>GGXbJleSS>t{)5@kA=(z zDmLje$Yz1>qgC8K?T6Mv4V0c~V~I9}c5K3I4*@m8pjrSEB0H!V4627ieFC9OEb$=Z z!-t?leSruO3z~jojKqeaR9ZIKvSQVw;+!~DWv$10-&pHizJ#ne;JyGvOu5c9^1{E; zX;9y*=%P^=wKZXUc4$wI@9llkD~>H};LUTA1xzA}??;|bbWmzUT=U_H2Zar*kY(P^ z1sdWEccPB=vj)iNQNTSYa7ZW&38cUufEWP!L>MLL=`aao`X5|if$Gy+)3I=m-qyaO zkDNaeECu*0+X^(Ce}IHT32s>QqX?i(^c@KvNgqQ;2Wh$xbjeS>5d>U0<8*@Hj7oF1$IIEFn(^aq3R9zFOKK7tKGz0l zAu>RW3E!`&lDmj^q&2i%(Yjb4oWII@R{PPU*%b%r^2QTMnPI>s4s&rz@8G;@y$arF z_%cp({reaRx964Wr2f%I&9%;enFP%Ty2B5xD|visr-Fc(ny^!>p^(8Cjl9e$5DO;ib;)3zb=swH5ys zNZPfp<+K&Z5Ik06Uu#hJ3}ci&7=6K4i?8c|+Qc6(!&do406w-&bYzmv+{Zg5ltH0POmeK&HI$q01V(U39gqwB9?}&17B21pMl*V4cmW zsklqg^bFuF+N+brvU@qG*@s&&$t$popuUHD*Z8rpQYVEwj}EVWltAB)h>qqC`hGmJ zEPv}z`;i2i#ESh&&-R(C%GXqE^!=!QNYWB3M!IL1$m{uMj`vB0iOO<$4cGex%$Vx= z3K-}CQYwLm^KOxR*RQu9h#Qbp=68f!dmB^?3AGo#Fgw(|)-vF|9a==ofai`@IOhi! z*^W@*q!f4ExO;T2*Qe3Wd&fa}w(##at-p@cUAFu@=CK?5++?R3M-Y#c^xjz)cY-(r zm5Z&+7NG#A7xB`2v53~2=DBp@r!}VZ0|l%eT{ozX)ntL0ECql&3)}+>xclkWzj)Yv zL0f$Ba4P=X$OkMGhE`eK5;xja1URD#fHBeiw*Dflr;a;<)t|~U=~%^1(R7C1b1Q=MnNv&Aeo#ha+YoM`|7YZk8($pzLOm05 zwB)NL|ML>Y-L~5h1~Td&@OOn|iZ;Ub>ZhoTO|sUt;&ILM*{(rf;{MVCJ#J+7|B3#O zSQZq?_?GvbZ8IP6#$0Z<563RcT|No>T@iV{Y!5f4 zYy1K`H1e)3*=PLYps3(ZzPiQf1`poF3O@K@KlF6R{&7EvhK&|`}6ZLnkdbcz^$_MZx||tZK|A&2#JlsO`(5vV%fwU2K(Ra z#|V=O?=egU?aqN)iN&gHZ5Q&N`k_Cug>BA%8-`d2nGpk5>232v`gd)%1qO~* zE!$%j%(mzIQ!$v!Ni3cf0@#z7^GIp;`q=Gh9ro*5jF`k5)gE#*^&t&{iZ&d{eZ=2(e$pkmposo;h|_L zI|Ba=r{sTfPKqo;obAW%H>=_|%v;`YMULW2IzNN6F}y>i$1 z>qP;oD9Z7AhCG_F3!E7{)tf{chiX8kKP_H8Q0|3Y6M1NyPyCYm&mc`LOZHya3__gq z)FQ9^Wo=GI6Bf$b6KjXHolUvHaHHUcN+6wFAC-V4z|4=UQ*epqk=3OvVZ)AhCy#Qt zoKPvSP}mnrLSFpz>YeX)DqM&jirGr>|GJ$D+LP>`ZSWKNRgd?RvJOGfe~phO3^D6* z-S8yF0FNPl{-n@xrAZP0*iS3bm20vA&zWk-T_Eq~qV8+~HI z(kBo1dJ?6#lYb`ptTkZRV+atBR8LF1q=*~1*3REdoiaNif*bKvt<8SetK!5<^_nQK zY@k|*3en@YVM~VU2nl61;TKcH;A<==ocpd8`m%iP@e$q~xfusr^ABU!F}%GO;&|wZ zTWC(!{2expHR{Ta{-;{uj@lG zXFj5|m-U6e9+fd)gmR`2uks%+*o9K;pU5AtSKLmB-h&f5>=2#fzF_>GrjxkTv_4gxT+H+!$P%pSb=OMU zFqp-U486&S`<|lqn9h#H6M=NhIiA`-r9*1<@M~L#?|u)*qCXMyJUkX`l7e88l-7pG=ca_{C?zm54DO$8#8BVlSj z$hmD-K17!fOVz$TxtmyrJ-hib<1w4vFU5r}5j4~?$GCDfd0gMm4d{revPNC6J{ZgU zC5pfh`~3&!At6$;_Pwz?Ps<$@tv`k=!FJ#tsQV3gaW0}&JB1f@6{sW5?mQdq4xf&l zxpfbBQ1Jp);o^Jgzr6o2``G9t1I3xIcawm8&Q_YU$%G*>JVIC_N--9fj^O6ZzC~Xc zDC1BVmSVGrWZykAB#5=T4H5GQTQmZR|8OP*K@4)c4naN zOSALXpG8E!$W#5Hcgdc7hTB!+_TwZ_UeF@~>-r(g%zKm?ULP`Nl_hH^1uqPjW<~xc zL>>K5@E%Ugw8SVCnPu2hdMZ=~cxxhBW+g4~bokSOu9t#^I>!M@{KAt??Ud|FyKZL{ zUf=KY`8q!ger^2N^KxEK>9aZA>-D^2O>XxRzc5rLY$)paX2#Loo&FqW6d$W9UmGHt z{%EQsi%w!DOn(c+2bCq~qb=uQ=g0EPT6gB!u6OELz4s`YmCSvjO)KkkFBt%2uk7@- zEKh93qo(Y8Q`#`bJK=^uZ22i%@^*o;S47g9CSc9Bm;hn$=z&3`zlALYq-iYd`)jFr zvaCKmap}8)61Xr*@_F$un4CXGJIMcBwGRKvDw}Nv;4qiuby|G*#bYczfcZ<#WCFk7$bvVnP2PUX?RHGDqR2)^Z0pvqBjq7>UOvHghVJc zVi~4)YFanZOOQq-=eex8=UjV&0ap*vZh!Fv1L-8s##B>hE1W^2ce%mxk#5he*dE=kNsDTpH=pR@NrXSFs{fAP=!6cCR zZg<@YriI!MqWiew9_;MfgPi{c7=M2TcGzW_4LB45#86FrMtW6e_pt7R>DxQF+yOt5 z>&#*|;)<qh zU8i#OgfEV}di?h~6^e@fiGo@&Lw!(jf3iXe{ZCw{ss!*H3_f)T*}axbqh?Vyl@!_a ziaYHI+ahbrn@_i7bv;`EMLBVjT>sis(d-W;bC(-(6A2{B@ee1l8oz2ib7s}h1eE=! zZN;rWv8|xz+$ECxxEj%gFV7fDq#rYjPLyX3=gH2M-EDJf>MK^5dRCcM2xpzCA|T+r zV#_FArf(i$LVGC=?qZlAch#p;Ps9Hp+~$2QX%@cXZLykWJ# zV;;#7{C?0}o(;bPBL_|U4=V!(&KP+=lS4tX}I4@J%7&9s}vk_>psB})|D(V%qR_(yt&QZ^Tl5MJ)d9z$`17t zDe)6T389;=))GjdO!W^EL-!vLHSoXLC)n?p+98?(b}9=*0gQf z%=ONBQT5DcghrozG)G4Yx%Toa9Rg^Q76<&S@811#rW)uqn0(x!iy1%NdAPr=E{>&} z(~}956|`|hJPPH3Au7AYx~zt-s}F6dFfA`3mL$G}YVmSsp07+|iC9g!8^+r`Ha^#w z(smv%2Xo{V{^>SAcd^jo!Qnj}q;^+*RW&0+K1eoqxusk-J5#(z=Zbu3B$x|0l^|Zh zb`OmHgQ?`leI|rBJ0(f4Vj012)atznza=Tbc*0n3j$9HWhv?&@FvY9X#(?3y(X0?t z67sZ~6AH0fkMDAr$K$Fu+G^C>dQU#?^=(-f2Vt|~UX&a5fk~BP^{Ojed)l#n{+Zmg zg541V*F?Fvcc0PL4h$ZO5NEGl{Av8kiHXa0g?*{=ZsY+@xEErHO$MyJ%kRYv9%%p$ za0tk(T8DRL^HiLx;FYms(`(V1Ioj_8-#N84yK#gCuo+?n`CASP*|Q3&6w)11 zPEKi0-i@;?Ki$U>vFa1yZz3h**T_{#2w)c8*T`#UD=DF7{PGpvede@R zXiU4Ll)z;T=GO(x5j@1ygmb8G3;i$Vzwt0Rlmf;TeKkW(I&dcR={Fw9qKKQgL%zNU z1^y?B;P_Tb44lY#hZ3OMTIFZte;)#UzO4BJ`qLlu!|Yp^Z_7NP^xIJMEg5enK`PR+(OOt56@i=Y(coKfSEClwR6_~4_Qj%~ z8;7=}Yc=5QDy{s~ghy4+s|V3jX=e_7)UlQs_`U}qSl<_7h~xd&rNY*Y_WzGP00#rP zR6#dSZ0JJSE%kdhIZEBtKQNSaVma+DB~W@DWC_T9`gXf0t@ynZ$pd1(eW$vWtd6|Q zeY4Z|fV)7Vf6#7u(1h>q0$~{0cN0Du0+hN0ALjrk3wZ;(zqjBs^zwHH?FP60ffjsF zS0OOBPK}`GqNig*&~X9u>0uD*(WF*XdrCb2wjWK5H4+?xE`grD@pJlL3qJI<^?$ns z!dc0psHEM6%hCiR0U5eC>AUaJ)S)jiWdDLl{ulk9E%lv`lj&E84E}O_W4NN{zj3+v zUkg6;we?>MKJ@kT-}wW5{h*-gqQLzgYRApVwl^$E5Rw1Y@8^Fl_|Vt(fBXOD`ax{x z4Ei;>4*F_Y9`8xOfaR+zX`Pw4Pkkk8`cY(!_Lqr%D&T;CjPH3BE;9eat0A9TS6HZp z)ZBF(Fige`rVbZkX9|IQL7mz$gw81E_7-lPXow~t+!8$5wJy$4`5BqhBiCp&4pC5Y zj9e)5)=#S0akD6K;gVN+vF$VS;2TSmuo}3(xD{ z->gR16S{*ctn>#oy^`dAu6ZQ(yN5uJ!i!3&$N2DU2^;Af zzuMm-&W=6U>7P!ZNh;sHYj*zxaC$sNuW;Wz&LcBC>Z@?JbOpylL`_1c>fSyj8R8&k z;Q|-J6bo>&^=G}|7tf(quH$H_T1J81?|Vr<~JDX{TDjoWQ+LoPT}**38DRj5SfzgC#lX5Wkc%s|fn;ECxu z`BU$t@k%WW1@_BC$pF?q@7==7r#po|g{&}<+zW7gkaz0jKFOpQJ%LiCxRh`31?41c z4~)eiPwbgqm>0g}DH|aVY~?3mo@$$)On1#!7KBL@IO<@wIPbD#{>&n7HR-ZeSY^8F zA3B_I&yqLH&RLL&aMBOOW? ze`NOjr)w+kL-)VCRkm=fKKq$OQ#xTphy0ETRV?xl3G7%R(r0+dIIc5%(OidIG#^_n zytmG1vPpTj@0<=ETvXrgYTN`HA4$r2zuj4`N0-cKSz^rsr{5Q!!g(E2JZ`(w7MJJb z`~4@vGYAygE%%vA&ZqL#hE_G7>MrRx{q^XTVuoG9iW(a2I2T80Lup7yKF8_$^C{R^ zPP2ZX#&Yy}E_q%^<v&5y#u8qX+P7bF12jVl={!T^u;JJgPYNhVkuy!o3FZ zsZV!Lnc{M(Ia!=9H-0dzk#i$5p6*e{v{TRHbF=YH4quX@zbf6#dw!`cLFN(YY!2|p zy3kRV^+I;!AD7xD7}i45$BG&kit{4$fR1@cq*Spko>6@oBbt@Ga4cU2N zirWEI!MA#r`M$;tb;KMUZp#Q%{?cn=&(7ClEBNSCzx{+(5f|Aq&7RW)b_$G>`yUmk zOxoL>7f!DfT9!7u;u|i@aLr@v{8@9UOUa(IfnzHLoU@13h&5hBYvv1o)vy2ZlJ?lB zs%{<^<~d(o_Z$0M=?}qVIU8yz?5=z;*})??EIv-{oVw6jrh}i7xkGwd;@l8xkLXm| zr!HJ_K7lkQQz=Q(J3~(Kuc39>nqkn^5=2&+SWo-^Y@qe%*0$=(bx9%c4Ar-kuR6gdOG}p7k87RKO0(bYY?iIv`P+Bd7WQ^csZqwv zcBRP8^ejZ2;fQutdozY#X*24?eSTa%bwd30^SFl>%3vo*m0ksVWlZ4}Wu>Tn*8CWr ztUfWrHMZ+paGTx%&Z69}<6@okiIV|~ZQ_b@!ln-41*0$DUVIiau01er_lVuSW2Z}P zOHj>7fu5M@^JiK(ZG2jC6nI}8*=Q;$a;GBr?B*z5JY}4wzxpEgyF87pD``^-uTvbn@&x;*j>ij_VsvO4#-&YIYrZvf3PCl0w(N}Qw?Jm+=Va~h)f);V_lAAThh^O<*qK_TrVZK`v zA=}`qv2=B%t57*rj+SJlZcdik*K8!b%S3dHhvhUMNgD=xAHi}?gJD%#7DuY-3GPo_ zBwXViFCRul<-*0ay4qkq&cmB)`?&|8RI6YL1?fk=HYjJ z(_|x`@;RhsI2^0wBBdjvBa>;`zbyGw9@_m}7oJVUOB87=k(Ki8YPFSO-|L+_8Td2y z`iF+iP_Q`d;A-rocp|gk{-GoPOl^*U{Hv@)&dwy0T9GiP2CZ_@MdLT`6{D46c^2Ay zUk1^?n*Y-N*rm(8QfCj2dnVQV^yZ^Yfk9*)DcC5gSZ)_xj#eW^u|%WS-6YByyxhvI{@^Wmg1Q^<0&e%ftvb{I2@M z*9)_)0fOP0{R%H4>_WcvNY{V3UydWd6H?B_jrmom>Fd(j`fnDOTD~@Dy>9+i$2ruc zeyO)w3RXlcl;PKSy5bfrB)>mL>JV1KkokTKQNj;fdm&v>!6t3p8z zp*_MvGuK5vr1)}qJ8_-AW~bryygxhAv1**wDU`w6{lLg4jaQj3tnS8xtM{Gj3R zDOt-pqy5ANC*d!$Xm~xlAM`YiiCxq!!ii5P+fVy-*#-aWSqsf{o~88hn;Kf@uW8Du zdbJ81T)dgA;da$;B3(xD3T~vI_bHcI{l#p>xyAJTQ{l1~Itz$6;)+DBQ<_IzsaYDH z5vgp#Bv51x)?czUeUvP2#N+ZMYw1DXN22@x9{@@~wZH1+jflPm7P^Mt(WUGt%GFpe zz;8B`wWU&KcG$8h{YsnapR6F22QDF>^X61*nR9Dk>D{T!6(KqfMrgE!Csvd8+o^X! z-4DN?PG_3W8+cvO+SAN!xJ^q69lexU1;VkPd{)!lN_+IeC}}}F2W{-eLmNb^z$4K~ zpp9LC_R}bBt3!5{OIEZE9STskAHx21+K}MI1`p5PM(Aqpk&u4&1u(-&234LNu7@(PIMtD|i*xuj4C>TBzeUl+4W5aZLXgm^~Yn&u{e=68N^6p(|Ui#>BbND%?^u z=IB?c^AvDD#(2XsI=n3Ff+z|iU&?2T+9#Yi`n*T-W=oD=$*ISA}DF1uGr zLg!8bzH=T`#@%<)z^r)Ky^o|~^TxulZR5Yd?#jqGq}JW;rFdDBL>IYu5(Bi=IaYm_Q?-CCZ=}`)Jg7d! zRg$W(Bk4d+Iirg+J=E!Bix{Q+qRY#TT>ICDFl>(J*s`&KoF@Q1l4yIDD2HD7Q~_tIX&)rkU2+OGRWEJnbX)-Nuu!ZhE;NTODd z*6Q_A5!Tfe!p3T+Pk0oCQI!rd_|yR=p9<_+HaD^O@G=FhLT1lpq(~$#4VC8#oL6m=FyML>!=sRAt3bEMn|P5APGhwK z_X2<2ppoF~+&&UkbanefwHdZ2*W(h5&TtuiI@bOnxjo!m^HpXxj9jH|yUvWpOIvBJ zcck5Qc3Q%58Oob1KF9Emx%6AmQ;wOnH0R9P>d4=mS@ZqbfSB}|HJ_fIp1!`mo}S*H z%&h0>g*(My9h_Y4F}|t#Ht``Y*ckIvA6v5}amLB6(Lu@ak^1u&`?w?~`%H9ZUFp;I z*a|T>z*1qEII#NY+?(5nTwff|+ReOmlHy-e7cl>*3TQkZ-4*BUj?RD=Y*?N_7@84rtRn1h8jwV&%PZzg|1&CAo&g?xCobO=-FX&jk z$vY^jKFrYRY!MBwy7b(F%HoT!6z*TWmPY}L5fe3@+&T2pMtBkc_PrB7kZB~%RsY5} z%zrZ=bbhut{a+9bW_YGzEVn;uZ*Xkv$vSk+w$osRT%6$z!b{Cmzjrk=4Pa|!gbZS> zv_Ea(roz{cZp!bMeUiV&*Mf&}^|}CMHqSMVXwYI@i_?w7-l|E!*L|JZO~S3SDSl7C zMCC~5RW=*A*!?%7!BG~c4_q?Rre;d4c3m1|+d~g@2?{cvi?#8VA3(|PQy}I_!eRb_ z&2z77KkhYM#&fbxKI_;*b6e#-+mf&5Q$9&33N0L&1?&BlS#X~9gU(22t-2$1(sv$4 zIOM%NCd^$$f@KVNgM$w2R7tP9E-Wo$f8ix@$#T`MBZI|f4sG=?ZOVhS_TAo^{=q^m z{-Asv$sp|DOA#$MtfPuZCMH1Uip-dE+W3bN}*VJ#0aa7)9L`p6xbys?=#dEg#?q)D|d%Yuh zTG9Un*0j-(cSqfo%GP(_HUM}H*E6<6! z`j9QA%MuKx%~O#p-KmeVQky!91K@e%0T|h1OWrvdbL%F zsjbU1f46-@!d`fuap6XxruRpR967A19w)$kR-;(c0!yOzWb7O`#w*IrEt}yBKfby_ zZu*;7`Fi^Qg2f@8-8lI5;5`93TBt8b)(|}MT;7@2c8^Mg4)dk61kDZBlxhFM%M*|2 zecJ7=5qvBPoD%wocw}SzX`h3+SE?_BmoG&0`48_2;%RtwlIDQ+IZFkFR7oUK6$rl& z8G4yL`o0UO<|o z805*=uA(Ho%b;+6UgAMme74VinJ)8^h^h1QUr({I4s_mDQGa0iExR<4W~YPFr6a+I z)n5rd^p!71*rIMW<4)|!iqJ25=ys~+R22w&mTnO5XT*W-aG{89)5hSQM%)z*LHQ5q z0>z1PB`XiXK}^L9b&@i4!ifiFu6|Mtf*A4qSFVvTZP4FBc_+HTtnP~95INwq4Qx7dbTtkBeQ8I;=ZwAiV0Ub$X=qzxex;+LNU)Jo@H=!9)W?W1_K< zk&$tbITU1Os0Wz^>jx1{h~@@CV~8*fiwTaOrK{@^>EU4=7-1M6WM&@W5F3PLhI8atZ$5FH}joE`m%*l4rxNTX-5M|y^9(H?}xRg^je*aS1kXi7CtNJ z#QgA31HEJ^J%hBl8ikgTW$~V=A_u40qD%AJuH9dtAEz&+MOLt#zr1i93pU1OfqL?l zt7_DCYUJg4E#0~9giS5Y&qZujxKX`XQqWyEhGFFC;CsFO6!uwVVo?5ZuY`B&3tO|? z^VFtYzw9=58Uy45m>?JI6hU3Ic)QZ8%{uoP{h3c@23(4*!`i+dbco!NTzT}tg>ifM zC4J@PClP(eS%XEAWAzc#mdgEAlu0VLJ;1f~3}?G~9u1W{^&(7HF45EU+fu?EToH9; z6b?=md>{D856#Ack$8YY6gnoisl7OW92>r=%xmbv$9{!FPD%}j107~!VeM8?4X)O> z4|#_w9FP@rihY(psU09y#^k!Gk=A$a+|Dq{2be~=uug|A{i>TGpxW{JECdb+BHC`r{i&Bn$eE_<&lys zETWi3_6+R?1XKIQuI$MBpZS63}3pqq}odaKh+JAbb8$3N=z{{_prh_Qa400G&((fDPGnB7hRb;xQxw z$iA{;bN#BLRuv$U(pkuP>7%V<*zT>jk-vLPgJPFnW zlqW%z01S>VyYfhO0zg1eP;?9dq)3v8073>wNTWz*WAFqtMVO2v0th`E5kTk?0k8p! z#}BJS0LUx^0Z#&&0HEpLE)viLMJk9I`Awn7(lI&N!|fsPQV~P|QIIw?#vg?UAn>eS zMeNO>4u9W-AxYCDH)2Q`k~FCt34Y?;3smMg$PuL;%rA1Q5?@L;z927IvBa zS%Jm>qXZE^PGwcp!2YUE1ds|W9X@06G@3UNK!jrP6d%mj3hC@jbBO>_m;KF!OrsG2 zB#O;cEFMoL3ri3IWIT=tAVUZMg$N*xuy_JU2I&|gfb?MH!{P~w6cJXoYF4(jWFmkt zVN2W0($@uxCxZlZI$O{_9GQ+`B~=mu+xA!;y|cP^m-!c?XNn#!v_d1V!pwX(E7*qAucSt3eNn4*{fr*%$&k9n8fL z(8GYDim><$Dm&W~b~Yk_Y9lDFmZS`~j_QNK6F>qY69Hzj`j7<|8!>1`7>NjQ@eu%X z0?_^Cn<0b%;cs>jWg-lL0yqL-OlXrM1;k>e?G=H5oj6j!&Y5LFA`lS!^{Y69*o`9v zl&^Dd5r+^}I8wmnW!OjJ5TY4J3V50RvPK+23gSosU4xIa#3AGy90Uqr@xv0uA;df! zDPY~6t9Wq;;ff;#Y`R})Ee;{VaioA9w&YxK2$A`%fJ%AXBM}H_!3}>EhrnVSDPSH= zXNoukUc!+A0z8!z#38T)M+%66Df;3NLiAf%aOlcDaR?N~kpj2}Tae-qXoVvM@J;FUQ((lsZS*gdccnEdvYyR|XIwXhwA^-mG`FF8WS+#SC zV(|bT0xy0~t7N6^!{Q-sjp4Mm?`aQ5(pC?r4SY}g^i2ou1H(EX`2L{-V&+JGggPtr zGZqi==zh&_o=u13O@7Gl4Dmc;*M#tfcs_s42mMoS9K@^oH8t%YNFjE9ueIn~%Xv=@ z%dPmH#$eUIf|Yh{IPJmrw8m^Yq|n7m>ljY!{hs#WTg&-uhjlEa{^rGmqx;TMT* zpUi6TBnV}Q!$2ql3dDZ_iyv_$1OhjY3d8vk7Q08$wlM@f0wdKH_KbWN)%3 zodAM#0vaSyMIh839EFY~C`wYm>|x9HH3WiC_b8BnHWp7n5dbQ?UOM}e0HDVa9DQ|= z5SKiKJwWIf(yz0Q5+ETVu5~zql}8%yg%tSrF(3g2LR>UDgkCj5s6qTFwLx5mAc3V= z{I65RNf6gV90uZgK!&&)*%{fK#Y5aO6Hta*gbpc5b0`CGt5T_u0O_lThqz}!+(vBi z6iCpVbs=RU;$!HL(!n3Z)2LJ`BzR*){%$Ij%8sYe!|^mLQf80judbd1ai5?t;jC9^Z!y}!S{Ev~&duXhY z&b#54BOT(c7;X>mWeNonaU6~=A>I~<_a2*~R4ODA{OxKt#M?Q1b(pw(1{3HKK8>G= z%Qu%sqtPJI1~xO&_NzJz#izh*RR2n1}!In%w zgAiYCPBtA<;re=kOhqDDSXg=Dzmt!^MF433IUW9pOB`Gwu!u1nHbeZ5?4FqLfaE_l zfllB*OovnoIXi*Bip=IQ8#S>eDd|1w5Mna_t4i^zSqE z-QVo7pdI!1ev&q-%Mx6L!$5)=omsP!-Elf{=LX=q(m%`+bY2m`p&0 zDcV1`7)1amza}kxRMPqoK$bg8{}U5Lp?D*6Fc>o32Qw^<_J`8IuhPf_#D8fVj-qke zqb|=9*^bMFVb!If1E%YWH)-wr5O8saSzvCEx(A4U$3?Pj}N59m6d=W1s zEcl!F5)#&A&+0~Fk}zokOcG9}L({c5nDqb4!X+d^q)`6h=*2UJV-WHLrx7A|CV;8P zRXQ|%GiS8Q3KRZlnElbH`r1i^;U1BY_#OJAAdn`dxALI*L5SzY>U_Hr3%_%DUR^*J zItR77PRq|UY>7T=q5z53{5T`uH9V0JeZ&gSSQ86K^d&n~6K}`jA+Z?~z*J0;4ykEy zy6mr!9_NDpwBZH+(1e7T_*uHT$;rt&q0vF%Ay8bPPC#@d$AbU2Fd86$kXR}Wk_^S- zL3V`5xz-4YZNWie`H*}q7SF1gPNI0SgKb4=Nmg9Vu2ljOyNV;Tum&jzk_pw6L;|Py!Oy!I4M=kdDm8VAAyT4GfKpO-#+qA!0yaQ1Cz2$N4k3WyoFx z{&X@Vu8f1kl^``IEFO=_#SnmWBqY{^!;?3%aQOX92olE+&(cOQ7!qHt&M=~1S0%;i zR4OEOhV`w80F3mRc=H7MO#Jpg`-~ltOCUz!GbDWsYo&bSmpmA2ZOh=(Kva271HAr46}Sgp>+uu523uu2fDJS`}m z76gERrh#)Ii3%2*vN3q}90LhSA}0Wo#OI%2B6+arG>B(6me3|UMlehsD_G9PU?52X zt5I|eK6m4{I|w8KK*yv`b4R|kWcEDRx4ARQtTV7OTyf-)8>%oQc?tL19ArrHEDn-9 z^Oc!77=jgfYxNZijC$&e%y2TAsQFU(#{#|NuqbVZ*ZE@gll z`9+4LX5b(x_3zn*+1UfQKDM*-;&H=Y$&i!>4wAxr&&bQp2&oIRvXOACFJwq62?t3f ze$RwrXCef$^6bJ90$EwM;2^2|?^zJTS@>C5j^T!Hrf{xBOfc-6ra1wcOjDvm>iamu z&S{3jlOG9aTKq%=h0~!KVm}BX6M&Hb7MiwZL~Pwe#Ge02?Eh*!I9fMOC?J%+^9@qc z*E?;%X!Xq&`F`Wl%H!+8DQWQA2m3zd@8GA5gpD$`WJqQz7Ecz3WZb{rK4RZsG^S%9 z8HfzY%*NtJ?t`K!G^D5mB(nks$t;ItxL7_8fMl|0lynf1@xkgPp9oq)~&A(>~a6n8QtD~!d{ zK#o=nYd|I=S*m(MvI%4wB&)=lE{|yZt(CGH$dK%EHk7gjKSL>dhz!YQ{1Zyq%Q#53 zb_7b0&_4i|Gad$9PUM>+keuysPO>;8 z_mBRUgUFB^NB@%uR@8Ks)^aOxklcz9t>xCHQ!+qE&J&Bz!C(kzNUn%|;af8>IrMMt zgFv8Vuq9OCAi2u#5^iKrRt-zoI4q%cqlgW_0$1hNk&&5IV8^7zY^Cwi~ zkFzhxhyH}h*Ks?+%@UCO5QWWGGDs#1kHnx95h)2sK>-IT$PWjh>`3Kn6s4dKN%ym8 z0x4KfD3E+F7XOt(9OWe-g@rgsVZm^5NFjK*8v0jX%Pv46Q%zac3j8j56Qn?=K??J* zct}B=m7Tq~Kn7_v8px7)6bC7kLh{vYnP+K~bVwl{i~qhIg2EcHqL4xp4pO)e$)CgG z@nmC2;U$g29-<1AABF^p^^cUkiiNwPAP!O#7{OgpW)&rK824N@?uuF?7dEoFq&SCt zfyE_O&k)i<;~!#p#f3OXaRDU5&(c*fjGbBG;&A<&vU4#xKlZ!gS{$U9^}YUrpZZ<# z;K+sV{r)F=T_DAQAGf+d=&3jeJq1!cF|yAk&Cb)t)Ifh08j6XDi%x=;pcA6e0f}*O zK~V|l=;Wv%0zd&h)8fY1DaRTkp2IzijwL8j*`_la1EH5wAhZO_byGl&O*B6kOEgS| z(19aX=_ieX&;ihrB|&lMuy}M-bOJgddRU`~peVL}ap>r{Z&?G;!K@!kmi(xZ5|Uss zqLI=R3WVPGdySL~$Pl{spBnwF$u}LoV#KM*?=e!^M9}`O5ln%U%*W8^A{kQ3{HI1? zQAyB}u)wdnIN5$@#YkPsAYs}_UCM$KNa-qT`Z9W5%6eo-spl8{5f~U36d#XH4T_8Y zh5>|5f`(hq(t_O|V>3i~BSGW4M%yTmvgz+zuUt)rlrw(Z*Ys_KL&~jRUAFQ|ayq1Z zh3&GHnH(;g^nMrhhXsY3|8U5OGP+EOb;*@>i0D#iJ8K zgNEDwwZ+45jB8A&q!ZLeTC75+Kq?x)Z?Q@l8B$sL&pt?qgQDVtgW}M^anX_Ju&99O zxVWHz1oW^*zr+70jZ~!w(?>K?Ri;2HXMV4dsy!J}<^PRF3DFz~1qOZ{6Te01M~w~; zRKIIfMuAk_#?a_K8B*Q)U89j+`)Qbl1`YQi$C#KdK|qhxH62ZXR9k;PCZ^kxq3OJ% z+bEXNWY_x7@i(1Jkp5oRS_(AXWsJH?$~I74o=>R*N2Bz9P)*1^(o6+2jk)ZP)$R50V~j{$-e6-;LbY^p zGiH(@_4nWE`m!@HEG9VKS6LMOR*Z|>+_-e*(TewZm1Ug+Ia1kEsR6s@7i-admM_Da z2F6D(39tcpMFGPq5*&1#^0A_$XKwPVEA`jcbs5;i*Bz5xVCsL*W~Fqhk76%uoqYIE zW)9N5;yRRPh>A%GEOIH$PjuRQ-a(JjkS<+Vege^Vj0!p%@_gUnPU+Z?dp)AGYqtHK zt3A2fqg|S0`wo7|1M^(Hp4~FIBYNnAMf3a!3rSzfVbiW|rO+*(3dk1I zes~&jYJGKs>-PTWgMw+D6`w^{E7bH0Re`YNjor@tJs)o_yew8C3iIu`r+DJ2#7lvK z4Xu?Qifp(tV4iC^+r8x*d6Yt=kiAU98X?N-QrqbSi8V+H$u4+SPFnvkC6!EA5}mYBmFmrh9U}1c^~~<+Y7G89{~J;65+Vd-M6>E)l6qMK8^63qR7(*zr0h5mR;E#tNjWdTH~o zzY}QV$de)%Q?aH_F$`4|zW?>!dP~YFepym*b@6{o=z}{ zzIEt2cDd``^o`}tZ#G{)ac6+|qQUiCaB{zVLi!^8=d>4hWwjfw7@wbE;SrhTHkN!e zB4^hf)yQpE3uad9PqP*Lye!kpR%UHsQ?8udA72#9xa@mOwodRJy7R;{nXa;YmIh${(fd!S4 zA1gjwHKX@}+nzl-u*W=|V&jgzJ8xCKPrXn--{#=(k>#1C4|ux6__f`v=@AL{q3H#*@eOvheWXn ze3?!Cj@UTmaf^)WJ2m1JInHyYl-1_yY)@>>G#`@I-nnw~@>|}63Wt&Pq;gxnMf;Bb z(AU2>abc7(0k&H?;y(EKI#z+ORL`vSx1Ft)$0qlBT|V!5%DqA&Ma^c<9GCU)%Nw>S ziornxMI~Gf0XIm_&&w^jS~SiJw;xUyYCa(@t=J-(@x*f&TVZOIP@{(JW3H;Lg@$tyu42KttPA_ZoWt!>DKe zmE?4N^O;^+@0?FdK^>A@uu|O6s^1u7xDmCaRi6yMLO2P2qSJ z+H#~}fxkJwr%7%XVG^E&rsk;Uuimt2oz+I1`}PCP&pI*WpX1c0m@0&z{jeEUIwy+Ti}9cc!`)#k{k1B{ots z{cgigUxhq8TxKwB@8FUOMt(BqdWKD%euDQcSFXj= zr+lW`YRx>hVz2zI2e3rEeRJfaO{Wx&8?`ZmFimZ!swi_aN4;(*#Dx$CqopHH z04F{#*_)c2C2$wrlkBg0<-*!MUx>eWBKNlGzLr5JbnSWnkLPsH$38rN;GOO$?UlcI zIh_gLXFrj4ejYt`*aTYAc8E6RiL++Q6n{tHj?tMk^!dg!qV zgm0NB_Q>W&@mua>T|K=M zMDs_RRIv>?;dS0}6`xi!km&#uNrqQzy^PCzx%1NT>9DrW?o$24`W~}rMG=jA`@LMd zpPhCc_t;p@L>e-XdQ7A?NaOn0Za@`q3&~AlIzIg{yMVS|0ZVSppq%-rN?Jr>ATElO zqkcFi-c3xbG|jhQb(F2Z-bXijcwa57J2F-*XlQP#XKwzV9y5YaZ7m`FaE9jGgbgQK zT-6Rsp^ly1HQm8N?YPtD3B!8ybahl|EW8b%spLqnEE8`wpK#S&AU4II3a4ctP< zf9$~z9eC3{|MJ>)E>}82=dl|$BFA(Gdd}|(QxUmq;CF}J?5gV}m(YqVm4vgG+5kBQ zlJXuw74ium1-g}yWiT-?vAQ+Y(%s7I@QDmrkuy8%4&BI3MRpzJnLE>6-mIFz6$&u9 z0%5buK=#U6hC9|(@5`z^=e1k!A^kAV394sneE)~Y0@>-~9)~NLT$2Br#}*m>^<#_l zO!W=^^w^>p$LMhPfaCy!IGoF3Q=3RzGgte_#{`i4KtVk~l|i+u&4ho9Kxi zTa;5=fmIvmJAzy@s|Pj==v2YKb1L%`cz-_q*6fs3$&q2<2Wr8XJ*Kk)BQA;_kfb5( z7_$~%%{xhy1|lE9MC7ulC&Y222DgUDXw*~n zH@8om2o9fbp8l{c{Tf23;rT}IDLu}g=nj2hH+4H`#W2gh^YcO%`OV)Da)$MP99y^sL36?)InV)^~Jk^3Rq}W z|D9!V_L2^17gnVu4k5OrJnlKsX%f+|T)9rnc8UxGsS7ZXTJTW@+QGeE;>$Zny_$~T z^-Ad3^PY*r_MhJ!@0(F|-y&_?rk2D+Jo;fNuluK^ytnV)8ij?YP|6RhUrNjF4s4PR zf&T}W@-&X})?~enc97gFKa~1$vJ>dqBAmT4A_X#0>bR%oM?as2YUmudIN56c`q>=XKm_QZ;1^dB{E-n7!shZr58h0f`p2WLrH zy40y1;wh<+V;*frt=RI#JI2+|s)qJ)Ecq^6YKx99P28F*QGzztn7Oac>UH(Q)*+1F zIc5c)8UC2Rn&{`tZ)V2#ZRSca3fzW@KD)r~+XLv%RfVdZ)vS zWmYVfSNE4SJhGU6YR{#&^VPS{8#u8rA70vhX0bxlhWORVCl1Q{eX7`hgHe9kk!)=; zbMGxo_}i-tL2mdEuR=(+sHbb&~Zd-=S17gk$xrvGXc zZxo#O7YGI6ro{$786|z^j)i<4g`HMacypJDX^)b=5ImX;wxrzj@%0i=&Tas`ZNAtGXf@>;b&2Kr z!}{dwJPilh&Eg5=ccO=cHY@-KL#kq<`i$h}Yi=%_(a!)ync!3^C~Cu+qUlq?{+qet zhfZr9%vloAAE>fbRM0+9RVg8vRKEKTo|yBW4q-6BZt21`V_AR;n2Fe+&`XECg4Z^_ z${#FvAWwR5viQxcZgvE|ZPEMrZi|Y!`eTl)qNKImcrTdUb!TU?{C4TjQ+jUYoq}b= zdE-9GbW;(%{O%Vdt%AT6fu<&fW~4ag&_JM1JNtt#|f#PU{u&o%;0BWSq@iG4F->*~UX5 z>MNe5?cc&5wygX7wK1rsIgw?e|M0Y4@lwgf7oMUXch>6NoGmGe5YNrMK24)wiJNxR zyXfzil&z_!HXq;3u?%N4u5%=L=!s^N zfUodnbnVOC39%oV9$jiyNy@XAI2`?diSK`ST5s->fbEy4Lnc82*QU~!f=K7F+SZo{~UuR&?^l446@OyQENy0h%C~paBbC zZju!hDRga=R|CRsne>{uPTghHbpD*< zSq$(P=cWJP)q;DgKp0$OVY&7*uV{OAjyPiN{dz_npW1zLspUJu(F-NK(I??VNADJe zAg~H*YNp>E_-^?6BfKWBydFGVmw#aURHxOY_W8i|E7eZ5cl_@U zb}wO>lME)}A;+nnAjipkC{pu(?Nm?O*4;~V_eDnQ8=So=*jKxOcwpcX@K^u-{;8gU zL4jtFsgV&;&n(Eu%*Y%v*3&Zz4mQ+-f((Pq0*%MLyfsOudd}O+e3X5p4jri+HSNXH zRsF3!H6k_n5<8xM+`${5uqvL*=@W6CRYbQ9&4Ii=$Lc~sN_#cAL@zQq;S{B6s3m6{ zv$DY__3M3JJV3v9CaEMW>f3xElOgAFr07{@zh*5W=gfSV7`4h0l;M4Q|D)G(6BU zsH9;Nrj1332(&Y-cJs|0xmRudSMF6`E7-8sd>S&)aBIV6MQ$o-XW_?7RUrJ4Pq3S= zcOh#XPugQq{@npH`H{gME2>5IsW>V;JRgz_hrdrLc9vd+h0+zr(<+xM zm3(sR-faJQsX9kMYcZaHmix>~k*1hQ2)M3dWt|fq3yXmJ6+f;hA!_y`Yfp|5aOwOP z0$K?5A&bcC{Yaj?Pk2ENbahH&$m4sVL#tP}wBZjYM=yg3K`-;4S_#eQJ5<)FfJ#1{ zGSAj!vqSSuGiAqJ)f;c`81-d7qXe|bSrQKUlYnoDq<^`&J5RtGjU`^rEvaXNMbSL} zm-zmN2cwhsNp-v{yuQ|8of+45{`)7c(apkzAicAs&B=_%RwvtFP5kUF=UOvljBnSV zKYHl%NEOJ=T#tS^g2d-gZpREr`6ASG-ob&eb(d7qLS6CeO7iEn-q9`+M(JG&lRdtwU%9Y z`@ns^)LGIP-KM(Wa}uBZHta83%vJyFX$^;km^H4q7lYgHADRSw7mLLWCClemoI?}^ z&YXu*nr@xv{4cN&^()j9p4%_F6IBJgK9tmrUR65nwD{&1U}t8Ro3ap~09$QZ)~Y!F z;f}lfg_y{WtEc+p2B2%pT~63-fZqq?L3|tpHXGNP&53gfdXw?ec2UNhfE}?@r*HBz zm$WGoudto4mFM zA6nITEC(yAt__%R&jwEucdQBhgibSOkMJB8P|fxevnj=ZZq9o=#GiQ!?>vI{Tt;o1H3P|rI} zPUbUxUQ`Oq6^%BF)VNY#Q000-tGuJFk}_(HHp&YZd7j=#_jx}F0CUB@oMv|kj(;-n zycW0++w9kVrtn`tU>D;Qo(${rMeEitT=V#|uQ($vnD;=79^q1TD&b6*2m+3al@nYz zt+(c7xJb<>3&FSu-AqCT>_tY{a3i}JNwB#A`hOgGZVK> zbX;e}FfvG|ND+YEO|%J^QC>uw`)SP0{Bxi~*9{RNYNuT|25x6Tj^lYp-u;qi8!*~zfA zK2oRts7=i#rVR66V1Xre9a&Hp5Eu98tfz>N&jRX#S28Uxp73apc3(LrvI8W+!N*l@-`Cn>PZt z&7W?D?49M^s1wYG_;2Dn%I4r_pP3A?B(XJwfQB3L2;u?kk>gf1&Y#2VZbWPo+E%p< zPDr`Ec-zgIZHJrXayM8NNzu4^U1#{r^vh1t?jhgyc))@0+;QZ)G$6*cSQyzfu78&?yoVQEJ7T3JX`%brAZ7EOmpA&fqhqK5192T-BCfz%@7j6pX)Jz?@8qS5 z=mR()=#KvCxYvq)g4-031BF+|y%@WOiCFf(xf{{$FE7S&=^N{rn*8ZvteHVn@KSHR zuuwh6RC8amC?gL{kV~Rdid`}>*f%;wf1bICSJDz~mn1vCi7v)|Zhm&d?~ABL&r~Bs z_W>@hu1%h^=V~slR?*(XCEa1S-f~brzvS>dAMwLRoA2{=fX(#cxcqR0p;*pJ36-bD zzTE-PS+3YvhLW3NpQ3a?e^Jsxvb8%c`+YtOChYiSmo7?qKj@7jC!F< zvCY$NKMR|O<6l|TRd(vw<#0ye@Z(MQxN;IDnRh#Y>2qFD>MtJVdj>aW-`u{&v~+n# zTzEv>xY@k+>+@2x=J)GdSeU3!dU`9=Wp?B&KOR11-_M6;Ss>swGLcZ}xCO4SVsNNH3n8{C>4Q0z6)TTz|O?5r@Fq1#8#bLo1X42%p;D%N!@oZGjH&ckFEq=Ry&FfQ( z9-b|F1;@F)$XRXnOxr{WmN|&qo??0Qirb0z{zlvNz1Q&_+j64%GVS9v%xb28enK-#hkuyI@A< z{Q0KVctfi3%%+-sIaqt`tr?jcELV+M_G>kx%kIBO59rYvH9an7hqIe&>Ti1pfA`&} ziPIxzHb^~&#wuS$g6sAf?Px7_<5?c^*6;3KXg%tV&IhR@2LP)Di*pV>QsaRm1}>&w znetdA^zuFVy|xcoe_6-$ zcP4mw>*Ys5pLOfAYUiomy%Og&*VJn{di^7cv+0VLB`jN-)3>O6 z0iuuHU!?E*E-3YaBoa^s!k}KCgLXB>EnN9Z#O&xIVN}bYBKP6vUo`JOa#>58Uj{EV z;$h@R?uzf&PXjUP=c)&3wQ5;~68J zdm@`bCSeOc&yvIl$i&3t&*Cw2@A>m>ob96%LjvLxox&5HQk;BK{FlzdEOl~^4Z%2J zJc(iEF(K}r;Qvp+?HWn|E z0z+P&mwNMbMV}q5P+nu9jqM$@c!T^A30P(v@t9b=U|xFv-0J?>sp87zR0sHD!j%~k zSKf#1AmaJYww{D%IvO|yUcRT^W}#A7xN@_X=oeiJ8%9HVwhu~hf}zJY(R2|dz@aFu?a3NI>`uU z?&%lnHc>t+eaIoLMT=@|tEQW;s8iedh%*FV&? zZLR-IpL>b$B!m)DYu5r;5kZ0JxR$+&1XZkwIM#8DP-4!1VXa?!*T*&G@{Cu9+Rbi% z#&lj$7QD8^XVa(WXLp9|)zPRGgcav)5{|FUJ1Rq$A5g!>bMfvhU2+`dLEVl8A#*JC zat@B{<1zUw`*>Q!1*N9>itK*n+F@q50D(U!^&qB-1UIJ%&&;+u$CDF&$-S%@u&^BhJx1|NYuu{9-GGnf0N8w7 z`%D%wd?h9WU|HGvP0vqxKi9PFJ=vXjV17&UocsR*fRAcU@7oRuAH4pI`+}66Q=r2O zo6HrCZDN%g+iqX7#U{bND+Vx0{A;#k4Q{JRS#>4RW&5C0Zg~bW`|jZtA7YDQI3}rP zKTJ~BjIw&<=7Lw41bo*`QQdmZ5Pv72@O7HU^M=I6$lop=O&H(*aFV(iW%cY}AOGDh zFXL>q8w2V*7lM%MK6kKqZh_@Vr)MQFWVz-|TGOX2VNr*}Fy@oz4O`QDw1xNk@0di{ zJ}Q4;JnBuj(N^d7d)Mrq1bq4FudkT4UrO4UdGn}8I-YCB=lG}pO?-d3ItRkIJcD~> zv+s2*c(NZwJuE!PbkytmV> zc<$^E<~pIS5+k15kkfN#|F=B%lMB_V(EWoH46t73LDQ)iO*3)+DiVCTVT0T)tKA(j zZ}~TgS0B50uz0{I*!)4=+b3lUP3rbSux4q>r%JA_h`}M3kb_*#=1+CCPQk>D?d{J4@PqvPu%j4QlPsjoYD zHA^+9wE0%vUH{Uu5PpAnR@JUseG4wDVQ$3^xCX7J1iP!gRqQ#EiD|9m@p2$}j(RU( zv?a%Ds|PMl0&Qv-vK1$9@~)iWi{3xz!;q+YSLylJtvLy9X+S_ywYgi~2smG*tkQeA z%%bn+Y%W5KbS^g$QCBw|<`+P#t~%G@TjDtLVSs1n)*J6!H`N^pd32-pv1-WB1O8EO zNsngDlgNb2lYp;FMH5(f>2%s6Ov>_|#(8s1X~C#}fi;(W(Ve{ivCHhzsV7s4wGDfp z@7_0<7(BDJJ)xf4o)1!n!CRq<+M)TSPl~pDx{vH9SAlaY^n$T5l8GrK@tZmF6MgW2 zr*m*fg)|8OpIp$>yjNSIX|V$BdAZ(W#ci_dh5iNM-d%X^JNX75HA#f4c0tt)SY`cyuArLl;8I_K4V|A@7tgv*_UjQrG=;vCXt@U5=xY%(qrG!$l zv=GuFOR}{|O0+McNFv++JTpV;-RJ%Nd|%(+>#tYiJag_n_uO;OJ=?uAQUes~PrkpU z`!uhRtM))yr+?_NyOO%9vD*4?wKHs+y&G*(g_k|B*K7VNUYF*1<<$b)_QnM|Ph}fj z`b?sqC>~pPU}w@LUB2rQoe^u@FJFK1xPM$H^10p*vm0a8{nrF;bblG$-_)Rw$Q3R- zV!Na*{EiKtL;bG(?xju(T<{!cedbRjJxGydm+VQ-7;5hbHV{QfYfj9+X8&}>?yU-Z z<+m)ZEfh=F++=fL0-@>FR#V8{_xDOVrI3m_&S?vBQ%@zY+@8GhntnjuDN)`xH5TnY zxn>ugCDkLk?C<*6{*-C97kRd+CgcImY;vsUaPjdGGd{I_=5|7_9yXQP>zqvwT+n{E zxOj4b^KqrMLL3_!^7Kiz+PQe)0}9`2(uzIKt&_H4)s8$dRr{*aep1-Rz50kP>(vKM z`LvDq=bGQmFi=Srp1X&ILt!fM4BLVd#9@4n9+z(VCJxIFhDH{zoh3_8Oq$r?W3E>5 z&!K!!m(<-yjFmT89dpV-QukQ2g4^|8c>8=dAGuiyHxC^WBbO!@kUAXnP82A7(pH7a!@ z-Osz4)SE?5hF)3z_?1eP)7GVW7EiM0++89*uz1_|&Y~|X94|Ki2u>?(?AkIs_px^S zmW-u(+h4jLi+5Q#9%lDtL+f`{Qz0Ieoy%(vZPuKmZu7SsSYO(0L6y(fHUDyr`Y7vO zWmR{ICdUcW*Wa~H+t;OziF!V4abNfPS$buTKydlRoZ6GYk5_h|(b;^{;fRc|%ZX)S zTH?VEeTeE@VL2s7EV3_*m)W(NpKf`?|1tUe}7UHhpgvy1DcUzcjs{Oe);Jjbw zP&@g4u3E7715cYMuZ(_6pQ9apUd!U+H{z`tUTn}il4a7QfecO z7?;qo!ShN+M%fnD6`W??3Y9AwI+`b|-|AQ2eKUS9aAQ8cQcCDsN`K9Io-S7J{?+zB z7G8+1O&Z)N&{WzaBT9}_ia+6T>(qvOiy|du-rXJixMe7*RrtGR{Na+2?)RE0hd!6p z``IJAuapef)^Xbf>+?%!Z24?(NGj{5XT^~BlPeEz(!w{=Eh;A_YO-g9q@R+)qCrj zJ0`o!gNg@gesXl|lD_aT$JA|4pedzHr_)dF&AYN6Io9NWm8DVT9)4G(AF3vPJLjh^ zSMPEEHm_S(d;s5ViK{ZN7fZ;u>!NHtuU2g>P}&l?qav&DFvoih)!c^VTH+xB9IY>n zpWa*)w6)Z}`1`n@o7BLj(T3e$_}3e$97$UA;`Afld3Fw5L-WcvxKGtt%2%(b5SG{- zedy9wo4Z}<7U#6)%d6#;a}jWYWUn`dLwU#VR~el*Shl8LdQ3oea**=$-X&k|DekE& z4F?=kDtubL8LwvZJo@~l{gQifnl)z?9Xov zmFdT2J*UFPB}z&UJ*;JK;jZwxQ*4m=btpSfz`nkh{gJeU<y3YnKnpy>pFKT|RJmLvRM~Q!}UV;pAI*EWqEJ-Q%|Ud$%FHceQ2C_q*V5tvN3;k zxRFPkFlL@lR6FgzAH$V6R+8h$4iU^^qm(rA0MrHnz~8lXzJ0{JErR= zE;fbQC(QYpYc0iIo6+1l{9T}Ez09BhQPfvu@W_UiQv)Y5hqtgfe!6isukV4g>pS#y z{~WxXoaWArx{+!)DH7KiWg&rWNh=*?aSc&u_yDhd+U4_mSDaIQ;~}v9&2T8^cH+Cm4~{N#S{2z^ z!}TdtVYxPZA(3KYjo**urLlG-R`%hNK_fxg4*^k6{Za2&72|cNBza8kc$R?E*TY6d z$_j<+?dFaJJ=?F|_hgmzWJXru>65FCkL;R~TzRix7p>wLv122A?#^EC>XJk==gHRF z5AW$MY?&hu-=WpzwRil;&M#Nzws1dw`YqZ2gob^>rkc#6kkv0b{BA8@ICPb>VM9^5 z?@I@&e$HNgL9c4dkGxeKI>!o2+*4E~>QFGQBS+Vtw<)X|Rq7u4D6@L8Tg#ha!|!!5 zRoAE5?%v7e+ba7DpqtvjcZ=Z5K$u(BjK5#05bWJ3>Z6swW4(H`thr_{ldJ14) zQ~3K2{rh_1@1z2wac+&w@4ugZhA%Ux|EUIe4#)6}FE__ye^+=n^O^p&oYT)uI2`@Y zx4|F^gjW}f;}hwdZy#2pQp!9$M7w`=$~XhPt&JAxBeRM z42Eyrb^P-B@O7!rh9_g3*f`j@Z?SUO#KxUb%~H#aQc55Z#Fh^lS+{ zAav|>b7qod`vW|;L|6QY?(bB|yNlboaiplE2IqY}Z8s(M-ru_9qte60kC7DXBFETX z>#^E{(F*qZO-ok1&7N$3YVltG)UkV?0+NWlPJq$wOIaj z4Owf=&QmL0E)hAO_4fO+r*aM6J?o3NXxuGqd}m>|{$kJi4f&?YtsV~wADumRh3Ctt zwWmj-Qo!D>H&r#KzS?YA8L_m>&mn317Gd?K@{*NpJLHV^BZi$9+aCMlNfHNf9m_+K zi>YbdN{1Ybe0}r3I!*d`P7n{1o-d)SSg6N(<00wvR=>H8dJlZgZ@aX~%hTr5$6~S@ z)jg^BM!HE$?%k_JIGUY|pgao~pF4T}c)990%O}b0<0|;&X?1r33CU`vLug}-`MAoUGG=cb?Th;+eF#A z%>#=XCYD{`U7sA#-s<(R%gukg@?5Eh91EQ%RnHY}L<m&Ao^VLctf!&0MmacbPVm1;qa1iSi9A@=kjf}!hg1ZdlDIMs?5@vO zx}SKv%IEcKrzGd>siwW=*ZmfnU(GaM^s?^-ud$u#QC^`rk2>0S9qQC9zxLim>QJNo zqeFrvF>+5uZYG>t*I!gA{3s~NqrZ{0VAqv1`V#jQgD!{%$Jj4h*vfXbxh<7kyTzT$ z&@Ezkvdl+j>B0Ga1r^8r-4BIsv(S9N*(d#K(c^g51)9Ydub(%$JeQTcsx+wlOhV@1 z!9*i`TrJ0)L&7XI1)`?n`iWj#hudUxwJZG3o>{%Y0=<1Lw{~}3zH1{())E}9=Ow2` zfARK*6=~_&e$O7BLhyKQN~XM-^mcp6cT18_*9-kXO$$V?Z_%&b{Gf(J+l2b@X zWODdx{=sw?x%EG-rZRK7-&l4^I9_T#IhooTUMIIbf7!039xso@KXcy|rgrvR#RV-j zc^u^lYsCK7`O8P%9etT(>b`KlhRA`qOAB&}xV$4(6WK)8bA8j>XE$jx_U?5L(kmEH zG0dJpSyEOyuugP&mXU3j=7zTF-TN9zHUgxxTP;$P>_yM)8P9O5d{s%-3K23B!s88a$o3YIKWu{)~Q~7Zp%JmZOTrSrT zH!1HBG>rT5TJV8fXrU`I}F2>Pf!OhskYn>CGdiy_N ziF`}me5BGTGSB&}@k?)u*9{q^-%Eza+F3=eyCWOSPvkljBN~)pd+GDFj%fN6SLeAe z#+gb@rG78Z#oT5O80@-`(xo2TSABGib@f|A z+D?9@67xObjvEYRe_a15s6D#e{v5IR0>5g#s48_@IeU?+)%&WS9nz6{+v(9X zl5j$JgAl%v)Bn@2?haKYvyqS3B}T16dx zmG6H_zA&(SA9z8$J|3M^_V-R+eKusQ(xo%nd+g9ibqp7x5>o=j#cen{HZ)Y9zl6qgJla3o{3c2%y z%d}~`>;9dWJzh7;=ssk7{q+6nMR_rSuQhWIZs_UrnJh5BYb$+mdrSdKOPyjNvO4Ki z;PnVIsgk4H#PaTq3EiJuzWVz<$7Y#q`=TkRXLVa0uiVCG`7%43=H_d7X?OL_K`r*L zmwR3%YF&M%75vPr`HQB; zcA@^7j*}r=u7_{%!*?Lon@6l*ZLPT3{gyPv-S%}VeSKNtB_U z#Xr>P9l9-^bu@m%pqhHkj;kl@8geyodCOY`B)NIW=y!)4pEGxbZYiHSdO-)>8s%Vb z#BI=P5HJ#X3dwk0BdW1pJ#JpDW9PPtc?yLKmS+EWuW8lS&5_l*y3uI-lY(kTzRl%) zyUW+U@3ME>Vtk6N=rM6KZ-YtlwT1&#eDANUXy3IamFnldk@Lhu<;ySAWjSgzBWm0X zQ^b6f%WRap*u$@0dGKs_jSTypg&Wnz%S9%()Jddf=Q;zRLPd^Krw89V_k!0Nn+Ugqybq2BKWcMNY@f=xI zGnIg*)lB$bp4^wv?eKH$k2LD{CyAf)KIWPvHoJ{B9?jLXYAUe&`7!lM$%l7K2rKrt zeXetR8euYC+BU>B(boK+Kjy*C_a8NHT#~2~QwqvCCdT(-EZrucN@C@wMw-QgQI^hx zvY&&N2yLyX)#l4zP6!SU;XA8txW4f2eer3juZdjfa`N)I1>9Lvkq5m$_n3rB4g1z7 zZtUc}SL1o$f%-G&oE6G*9`Zj)dR{lQz^uCJJ!fvdXjN68r0?y1HRI7xn_C+N?t5hK zxkV}y-B6wUvi=jFXRE!|H}0WH`>%JyC#)lWl;xZIBz~=Z{)R7i@$F9?hNjxYhXGXl z#GBxrbS~DP5@`qg;|*h$DL@_&aEtx@(T^(?y)V4#dXm3lyf5eYeaxqDg4Hf_e)uZU zI8Ez?5$gR6zV#^~!fRy=ar61!kBgnn3_dFM8+%017JQiaFnV!8zs-#;RZk2gB3jY{ zHk;;pw2l50HTUc)@!n%mBY$>n$4R|9E7Pc3Vyyfk1EKLDo|b!MCZ(M7UytosA{Hh+ zZtSm_>SK7+>SIEvuvx)&Vx&sJ*`U*nquc{_bA`U9&+Bj6(EshxL*j_f=7<{MbT-Mm zKYP%R*>kO{C(m!Yv;6L&R~#93hiVVi7QZ*-$(gqxI*s$fm6EB7974>^;tJ0FGW=7n z2?wsLUTDcPj(wq!u)Oxf+d@)Ir%E);n`dIrkEy{CtVohpCE zU2L4Qp?P(u=?7_@2D5#wWl!aMcHX(9w&0M)jqv8M0o{{Li!Lc%FM5>mP4<3l#az{k zc~4B;-=+k`Pknlg&So_MhT)U-sU)6eM@44MAc}dNcv0U-c+@RiewJ?^eTC;OW@P|?HFc!C5iOSFU?@ufrX*PS> zUUccrhYzLIxt)vlJ&x$vRN1`$yEWUuC#liOLMyRXb6>yPb1AZX$=oHQ%WqKb1ig5} zcA)T1okXO#lY5TMA-1A5+4?L?OdrsiU3`bbotyUfr5`XE$m?t07`V9FPqj(T^6jfR zeH#v&ti4w+J~XOP+vlzRS$IJ9eACX4qHoMg{8`3M30qVJDohmd7+h#(7w|bt)Hnb5 zB+xdzLfRua9=^`=f|4n!S;lhV0(IHLiSyt0kIG#=8viVl9*}o?EQQ?uGc3Sax$@}<2D;uxGc05|radY7*-FrtJM#pJB!Pf70 zR>mK&`I=0oJtls={fhU6OBc6Pb;~h3*AV3mk4@kBetvjRU}R~Z$aR5B_FT~Q4T`Sk#uQI>TwMFIQzd_BQNYI@nR`Q9UdbH%5H4$Cs`yOqQC0(W z`_jGjMoR0xJ(k_xy6e-EV|#4)QdbA}pAKE*deC}b*4H9IfhCA=_t{0|o*PtBc(#XT zZ5Tu+3~ptmRePLzBH(KH?U+Sn_1NZ(y%uWqg4{=VsA6RuyrD%$`rq4bj2QQC4AL|# ze~@aUbtC)J)EJIdZIQY`!rq_N@M4bZ+y=$_+6njsW&dYk7GDyI!{00uNiE^E?2V?J zrLbf1ckO+!mH8E#C*9tJEMM|r^r-lHpRO`JJCBR|m&YxwjOxzW`uRwV+SaY}PVGzC zo}4kcer~>|wy&q*$#)0VRPnz!=rE*3=}ha`E;+b8E7K`y>(tr(YbcrbUbb&a(FWjfYXUsRn)9JOwU6u8i3KUvedYG6XJgj-(VIzsel zpM37OCQD7}{^*qZlEFcbmLfqU?Bmp{EqQZyydBKxZ95b9`Jm%2+a>)QgMv4)N4)7U zj{5w((5Wtz@A83{A_5YZJ-51TiG0NS=w#K4&aOy@buXfC7I#n*oBO85ui*EgwthiP zrnZiW`=73`BV<c0c4y*h>AePlVf_TiyhTm8IAk^oWhb$q_$y8U*b5T7X(?edG4^ zW8<-b&!cZd(^SXY-#4i*+3};qOzc2N#h%tZ>Cd05Jq@1<{e&ARP57x`qk^Ao%`jl? zvCOQqj9sJNV!Pb%$JX^J3*Qda9K823Y3-}~XH;J7-|FA|{1CeWEllgcswSdIxA@z1 zxr{2K&Wrrl%@2MZuWk#l3UDW?<@)aRXrx715tgw@!K<6 zZE(bxYf@3m&)=rJ>)KnpOT9x*a{Jv@4V~k5g9T{N{#Q;s!jffO0sMte9yt~kxXnLv zW35+NO5!&;j{Fa)kE3>%HTALDR28k5%*!70+r1PgdNE#UFl_&~h|@=llbuQ3^Ut{( zRoOmYv_kiOSe4b66^m7Ch3gl$e=B}sbyMa0gA+R2w?7-JzYzRP`e1d+*_Cd!W~q3SM?dv&4=C`E1k6o@1(Lr>VND zs(6Qp<{k6T77_OQB{t9LZ?SxL-hAn%%AG7Dm)WV@IYErVU)W}+I2;uplby;L;}H~l z?FWmC;eM8tEt|A&eaeX>l`lQVe!LQ^v`zm6bbj!kdg12EVfae=TR{E|g{ZIQ)40F> zPER8N+i~Ko9*#TC4ZQWqFntq9y2k4^uck7yaIr)B(5gW-aa^_|vVYg)jxfBQtc3uN z>RHaLJm05k57nJZZ|)yi!)_SDGRhmyPT75ScD)jhi%OmPb5J%y0FYBwqWge`gV05YX4~Z=u3C~_O}b`9@xZaH&!oG9!v8r_2ss9TgdIb<$~g- zm3rxlc~*SUC3EeZ(n{wq@D5n&9l!tb%BRWmOrN|gZ|RSmB-dwsTl}JxcwO#U0l%Ey z_I3A`Yla`Tc=p2X#V3Vbu3OGN51Y4sflJsPL3f=Ma~(ap?`LtZAueJWUvH(fbk~s} z!J{90kBJ#QOKv-CcY4L6D~U$+d%XM4SC2=2-Tz~kW5(rSuZzC#%sYx3K<^Mi7M1Zs8!4tk$?D+@5k!gPj@|B_8q6Zx!Ta0mXXGu@y+Tnw=vC<6?fvpTE+eA zx;hGy+?5r&*9%|%VAQ*9e`Z@-$(qeqUT+Z`3ijr@`HJmxy*H=7`3O1ua67f=_~EpT zGnW$Ovq-SWCxzA=W$XQfK+>?tidrvLJrv0NX` zQXjW zoL7(bMeElVo<4t7?|RzYq;I!!S=QKyJ~6LYOr+R-GaBFZ!9&xlw@s?{QXO{1s%~-{+j6RL>X(Y{YQJFa(J(ciHXhPW`P~Eht9mlacUkRSLTEXi zweP@<4Kkk|_kDk?WudjULXo-%Cwnpbe5p#2>Cbh$ie5Uuy>C0Z{f5Ym+YeWBXq?co z(bkFOJ2CmAae^!Khd|L-6<7TGj$oR>+m3#MXNvD9C$)H&!`)L`lpQ_BKB8iS{PITB z!LZTjVnfZ7Pk34`rX46s5e#3EuUp4zzUT{O7=mnf>gMP;w(GQ%ZXBM_ouAR+VaHQH&+TYT+eq$7ubm2;ZhW|fhz>^6R;mulHDrt6 zl&!tu_Ko&g&cAOSP7-iR`x4WyA_%>>;xCvR1Fy}Q9IY4Aedj?cCJmYWT%rNRUT*W6PRE0 z{cc2Phvy=_vmZ9;J}h|d5Lu<(Ej>X?dr}^!0vq??M8SHm%G~I2TJ)sxYBjR*xdnFv zbtLaeH}6WI@Q?eR+VFKTE`H#izT15EAy-#*&orMZdf%*@ zcjug7-j}W)ZC^)9YiiS-@8`;VkfB+AiG06Mz{Q(7*3jLU6&&}3`&@wd~ zf=@2XEDezJX_FwWDLsSO<<7TudpW15A~kK7o6y~Uo|Lckz40F{wFiZ~g=>#2JDGla z|6Y&tJI?wn^d~$zJ24WjY1QDS^LYJj$JT`^K3-irESKOKdv0^=RijDb&-<)Dag_R^ zaNAp!d9m)Le(hTlFKyj9XvM*bD;ExGsy=?}M|J8ggZY=XpStY!)%*U(i+7FEG$(DZ zTzx9hfC%EcMgloYtXfSjW?xKPc7Ls?9Exx-eegAeLF{@&jocmdVJnr0paN*V__phAuxl8J;Ua-;alaHk)q+c(} z-H7}cJRZJg=goza`x_8RoJ8*t72j_Tuag=*=EvzqooJW!Y$vL%yzPwc&#RXyF@4YR z!5aTaO|x)8{r!07^={_l&JRRFrDZPlK3-5qdKOz)o4rAsh4gm#-K$KcjaO^?lp;0- zw%)1;x*NFoNWQ@XYqkyR)NWVMZrR}KJX{9vPn6dm7`iy8`q1-db2dub?Zc0GkK8ry zoK$(aOvp!Dn$&U5R!`v+u5m?Dj(+DJspYG4&Yl)CEbFv1*2}#0#A(Ws!yA|WIJLD! zy1?tBs;6?gXuX;kI(BaPopkFri`>k`sVn4k<2ZXhC$={DQNJv|oe@1OE%tHML*7;5 z^~O$eug0#=<&kDfQKUY>HJCMDIrhF^<+YO0l3G)>k9)M|z&97H-21q3-e&8xo=c|j zoF9wlZye8W(K#9Zs4|o9a+3owJ?GR_WlxI9^2-nAj~6^=e~tTak2BM+=*v&dp0W9Q z4~%mzO2ciLy@Ee>uAR3u^e4BIpso#DcI6#?k=*Zb?I#4w!`1_wvFT4 zd7U5G?#t>*3f$b6=WJOJ@vh{aJ>wb}4P$ZS9MWT{`T#>0CFD8fhvQ zY#Vg=zU{mGiL6DVEUll@a!m#@6ijkibWe+O(=2&#J7bOyQSNgR?>L1tsTsyJ8%>;f zMm%#y?nH*#((cT1XD%swj_g69fh!X!(k5k{0ejd=^`7(}UVATcVZy}S2!c+8BH@_r z>GC=Eg~}RMMxI+{S6fB2-y$Jcadf;n=vWS|UxDuxBI}~FqY~l4EsLAoq{D`X-zL}=T%y#t-thD} z@3z%UQT$G5WWJQ!x9a`h=K5%hkj6*CL^TS^Cy-{(*J6)#Deh63-&qq}m3l3p4%6s}`>$oJa1{vF?1d*5`hzAPlOWo4gy9xUVTtw@c) z>6g4NKKyg?;7{&@wYzWJOErG%{3?6o>$}(YPwT1gSpBjLzLgQzJEbo+FfMq@Ac z-TFE6V!sONuH&=Vb>(jNi%Vi_uWw|_+49AlzxjGqmtX0bwhZHi7cP$h;*3G0oP6yi$9|2dCg9dunRyAnrVNQy?pN-q5}) zfm5B0gL{vh9$pa~7x8K5g>9}i&Dt5dW{)GXjaTh6*cmSLdA#Dn3#0w^N5rf4oaZ}s_?}5-#m;fDo22rnV+B`j za%#k)6Ih((2MSUqJCBUiTRC1l`-P_ z3oZnN#pIm~-gv(L*tyqR7Vi#h(;oP({htqUZckv^8?1I3T-vMMSLgp$f3xhBlwQ4w zeiI=jQSyxi&v$G(av+FFXU@JAWyPsgiom&*Np#w(3CPuBd$jewmYMETgX5 z*+rt<{Izh=Y@v7YEmXT3RV30iR|$lKSU4dvBxeK<|FUIRrD$aFQ1P)Vgn^x_p0LCj z`PF_2eBrh6s+|Na$pw)mIn$EFL)*4SyE^5Yqc>h{)>r9%t6)lrd@Vt9(I<&{;c#w5 z1V@^1cR(yijwA}6;)F<&ogytm!U8s--eI8xuL#1jty@vTs(`>i-;n7UBLor^t{Egb zQ5;E8cv^}R!bWxSwsdl~MS5gtR6H;VL(`_&6h~PTsva3pHz;=1O}9lQ}0 zKe@UsKmSx(L*{R}is0kp5fOMULMI7VeJ)Mi}q6MrV zi~p7jtU1O7^!EUQFbyTa5bIgf_y^xh32$O($Xxti*k;lEooyEDKiGE17vEuM%IsMHYQC>JUA38`ufHk&@z?x(-E!hQ;azUh-Y6yL##HV}H zlEpzSK)bLJT2dT9T8;?83+M8KYD{)aa!Pj4O9B+zcbWs8?#G!1PZ$k=CARN$)G&f$ zJ|MErz>^>i!Tr)GFC4opa6`u!0fspYlgi)QA-~!&+4-}b7moKhQ04x$!>DA&+_23? z6(>v3Qe0qll3+2Ir*oy8Vcx?+s;$)Dem;TX9fOX8!0 z@=#jO)E(OKDQWbF-IZR+7r`_EOP3p`E?v^YrhOWtBm$JCq{QKCAZI$$fFq+7>!LIG z6DE&mE6f={PF->43MIolAVAhhNuuc>>WqFk`OS=Fl~Kmv=@%Y-_;!!4gSszE}80tKr4m>q6yR(u#99p z)rlN~4b3!?w6tH^4sZ-GlM?NOu+UQQG^Zqj%fHTKmMF2cAp#RE@rH_72ck|1@tl}Dh~k8a zItgKe=Dc#y4nc50Nt=o}5?&{44LHqc*aIdAIx7}3tn{78Nd*2l;Xl_)`j9jd#fgL{ z;9(eX7@h)K!U=N$oD?ELpy0`*MN~sP&B5J)25t!%mjN$9b|eczgKUgYHjEbtga~*% z!={X2R*?p+q-kp%5CLQidf=Ho9$-Bl{Cf{<9wYE)W~S%C#SV0t3+ez{fU`tLljrP5Mm zQ=nZW85+VqqB+0=7i>2wNQSF}2ut;cEhE#M7<*-HSV%ySU#JPe-**=w&^O30%%2bt z6dJy1Q-F7XZ&29pdj$lWoP=q!_iCFx+re)lI48ssVFO~AtA**3WoZ;~5(EZ(L?n_T zo&+)=Pz1@W@Qwy?b}yxwUJgk#%q%ESFw4tL2A@dAOfH##C`98Cc3CnWA;bt`I1i!& z2^!MhrQm@!nM{9wACW?KfxBMN1oQ{|J@((#o$UCVDsi6%S#qBSiGV5rQ~}cBz7z!( zi0g-|JNQs;Jxnt|yQo4i6W9mbgTPD_Hu6)VLo^ld2nIq5VU3~5$YK^ZnkFL=?E=_+ z&<__q&9qdEIDEkZjXg-=EBv?M$&3b^E90WPB{81Fn*#Mo%OBb{y> zO$Hee2hwC0Yyw~n@HDav9-Q>=0vDPQ9Ylg8y4bj$VS4fm=)rD6aPCyb@R>w18ShBN zoD;THdB~V_5L^--T17hehHMQ84Gjnmg5^iOy@SKSSb}++{(<@6sE*L<1sM?onG@nL z68l}XmSn$-o5%*UsuU~|Gm zj8TS&6d^?6!7}iglMxGyek_wdb9+O8f=4Xy422_6Sjb`&3R{$@1UAuGvz@I}x-*gi zB?8K+(2h5iAr+Xn6rKh?e&s^YPu>d%&KSXcn`Ff5Y>Na@)}yf621t)A$$MKiMMnt5 zKPQsBFSJod0AvC1;a&q%ArzMlV7p=L5P)^@ zoEWGBV8b5QE&!JU*eKQz3gD*cz8L$WD0d>D3gy25OWbDwEM22v4B)%zR!R45oMDo? z0d-npq8mV4CJ3$gF;M-oFMvt_=4j-JDCd9?B>-Oo=rY0vQsHC;5aECP2|(rP z4wggu`Y>1~pfC_R%mdIWN^=f?D=;uE&>uiQ0PSxPy8(=y?u)T6in6x>YLV-KY5>0i zXet_>53K~JTSh8f8tf zvwV-46vJom@hDpq9iIqzekoUX9D^&&3c=JJP&R0hM{iMywa{f!C}ZLgyfcDxW<^=w zGZAgm9YIcP-Sm#1u#Q`p_^;D=jPMkOU+f2XA@F*boY|S`1af|lvMbR0W2_5#GV$8e z{W1KKX*}c*AcC229tA>3F=9=am?27>4rEirA>Ji0iWI-Z$bNjf zJMhYWp5DC?x(jnN$yd$B-^1``5bvPEIZXWX+4$aRUQrQICVqSto`ZLWzQ_&+oEMUnHKRVhZmhJCSs8eCO&jFp7M*W2_`;e8qblBa-5#Q zi^AOgAvH)KLH#hXk+;KE^eb8LnZc8va9 zLeqHG_pk*ayFqQ+8PyJuA+{a446sg1=EIXT%pIE+*YfF37~X|GC?*zJ$dKrGrYCkl zxPO@tlud_;{WX6bl#4$9#n?8Honqo=<_~+Ci$4Fw*glZ!W8&}39{nLUI(^aPC7Jj) zGk6|Uei`%4(U~{nQEo2!lr8~Ld1nTmTWT7Q$$_ap+|#uxOb*;;^iG{vCn~ek>X}Ye zDE9^oPv=zrC=(wvjpwGI-1{)R1-4`5VJ2Wu8Ftsgmf1Ly*%%Dm%=Y6ho~8iH%Y9>d z1c+v#7z&9hBV&%5sTt5xplFap!-@t>qeKtqG1H3h&#VPm9Locp49j{`JjG>&5L!&nI?xrZRA7I<>jZ&+xLG-NSBRw&10?` z4>QK2yAz)87)|=RD1K$oBOp;;6%*M2%gU5e3I$rbXexlBZ7<63) zl{SWz!YUa5u;{!q>kqO~Vw!PY>db_yP~K$vgxi1!(>Wwj#2Ma&b9!~DgcMq;I0cq9 z$%$SIVy-N2(+r{ah$P+JXZaslK|!F4v8)eOS1_GjzDuKJ40m! zQeCX}MxqL#d>4rn8Y`k84$T-zBh3MXg5XfT9tt`)71W1sf(T4_>)pR;jJ(3CnQRzS zj4~x8McBUKn6AdrCkcU#IM5)Y{K{l>?hPQpuR*1v^I)sf6F8LL1f9=@y}c2m{HwIU zhDZ5bX{Z&1Dl;RX{QhKgzA|*hxQcRLV6%4MfpGd_vYo?Vk!|8Dsn6&N|Yg#4CzNa7{S6G=XQ?q`g5adXV43y6anCSE=c&H{hAn?z z{*HSxmVU<|7I*_d?IDs;0WdJp*t2`m3`!JEZ%~0!*lRdcAP<4bAV@ru>**C16>~=g zs)!_1076N4K}fOCDvr?#>Qe~rAbFNQ_*FwDC5mGuTv`(NgLp9eRB;#%6$E=gi36fy zsNjmXD~o<6}oF}__l3<5lHdxtVKCm35uaYbN||V4!!qQ$qx4{F3D$->)mbS-8LY} zzf%C+a&SmHLjMK(m@>WJ$l`jUsL(1R{MTbx3ZZpTQBkOhHQiLuw>&Bo03jbtiz0{$ z<^PojemW0<+!`%S8VTxA{2EP%ijd6TSwI9RVEceX1&mgKTEXhmrp+SWnZ_(4;z$s= zp+e7MP%LJqr8-0dLuLmVzm}<1-*1|NOnl3S0H~sm_UV< zXnLp$q=Nr)Dln^-E`|yp`RnZdm!}eRZ9;`lQYffO3noTjSNJ@`;EM46b%cd<`9-b{ z4kMu9VgA7(0lR&Duyc=4-!MY(Hs265EI5P^La*g+*1c9?tY1>0Usfi8oRdL(Ftv-q z3^!)b!B(qa@)1%0BLdc=$DF9h_P;W~II|5`EWWx(_nM&oHJYJeg`jqspmyn^%Vmh; zA1f&d=M#F!RMt(oH-*mqE)^!6oeg4u-n25w?hDyK`-vjnHQ;7=tL z86=`2gTE=MDv2>;1dpm_VyldK=Q*>T6N!ok8~E$kMVtho4OYckz?qjHOvteGiB(b81Z*%Y`+2kMr$?t_Cdu0DX7|ztGgqk zn2U<-VfKe&$3LwjiUkmKT~wcg4K@UANDwH{qbOJ=tm5(?evzPf{u_E)&6sZ^WG6;= zgq=Z72O>!n8iB%yM53Iq0E-@pBr`l9JrZFqH^K@L3W8u#VZ$I22C-vMHU@EE(6paI zZenk7LV?~ni5ZB{Q1P(;J`xeno{2=n`xudk`0am0BI1xg{dfa5X7 zsS9qJVTw2^F&|qEZ{>%D)7;cP^P>EKeLu@PsPJ2*^YtZQ$!~8oS z1PBvpsKh-m=1Wyw-3=&$?x-a0_pzB?l7+6}P|iRG|4?ikB2Gpn1){(;fYT;p>Vw6n zn2r$;7R9HU$LM3lS$Nhz4GR)-}v|F|cd0j&1#vv~3xas;7ew&CY^;9*L|zAwG0ogNz-3;P0< z-jBuc=yEuP!%Q1o7&*lVtuS9NofSj>LDkn`P6?IHrxzGN`qE`&y*T)xmvkXUo-Jxn zRFZRCG7Xh}N{eDdjLZuKSgV=x2$i5m4_U}?nE-1o!#TS(B!eJp5-OvN%E-r}D@=eL zjXkaq#>yNc#zSI_$}9rDu?Q*!z7q*HsLYXHd{X4k9cS?TNAzBQ^chUy@f|~~PHE*$xpS#X z8lK|ieXlEz9W>I{0j_HN`hhdj|EbD?%B~*;9X&jwIa{ z?8L$~ETZxACu|J}3ZqACTYQ5;5sTlY1R{_vgrFJ|!K`V11Ko$O&O4#DNicFsg+!7kqwI3!&&RQ!a#}IixG7Jmj`m zl|z2<@2ki*E6BkDX3cXTMFe4;RRKcfJ(zvrxbil^mPD-jX5g?P9pqA&E{d6<;b{O zL$5ec=&=?wFd>q`v%&nZy#-Y#JdKq`Miu4}!C_-}Y;X*9QRoCh1=`WkQJAAt@PsGN z-(88qHq4dS(2E*yednL$vDu}K-|8Bu_G>IlgE)tNDF#&l3!qR%rw#!CaD|84ESQf_ zc<{?0D_Fp+8Ea0R);xN)sxSt<$C!x#fioI@P#wq>>DwN}Am)rkC{R>RLJ7*ut&9aH zqRyDW2)h{`hOh*|N6~E+LoW1J%%9@$&Q^jb!Jmi{yf7tXm@_QqB%y>5GP)w3;XMg( zh~V&>YB1vzW+X0nDnDOoZo%CAe0r9)6bmMOn2|UFu{fccit5NI2lPek$QAK@h(wJ>ac7Uq1)vQfM7>4PfiY`>< z<`qK~K?I7vvvwsj;FNZTGc}MeeNjkQ+=wb3q%k5H5U}FY-!}|XDzbEw8MzAVBwSY_ zYz+?aC4~9IJwNOYW+=VtfhB{#Wh|)TJBB0Gg@cV>#S6x^nK`h8CqLB6L{T|Gc~Z&sPxF(>dJ@7$rvN1^%Ae{Bw& zSHgWeCg*iPk!k4R*Z|w1bIt$tt{tlU6NdOnAL7?VJ9Mrf5&r+{z>HgV=v+g_CI-)< zd>51+5}COPf6>|h^$k0E`67j3=^z1erU-%u2HFK@z#mHvx=V4{eIP+@*l3+*GUNJW0&0F$N{1vxM?4={wG$MooD#R z{d7Q^!9`5Cx`&7J2ND#89k6_Co;%ng8x?S!z!WyVX+(V}Bco)-05CEeX3^#{EgBA; zcbI}=N5v3;(scwogP1Yg;;iVr+jL6v1YW%UM{fFCEot5;5uNuOf->G@U;v&l2K=5r zKSjT!kIXvzU^-wu7!~%zeEGi?OcLw zrFV$_*AAH3R#9YnKQQ(VBveJ~SL!!^FDQPh=n*)%pgZPiNnZa~vo)($3dJEJs&a@C zi(pHQh@wh^uwE)hsc`I$f%LiHQI)fpW3ySxrB}{iEt4E5V(0?6SBslbcsRo#p{h_F zffoux<>C1g(LoPY?T(?}I-1!{qS)R-7x2(Gg!&u1jsMsX;M5mL!bMgpO5YRAy-I@3 zN>4SRhYu#Cuh zGAg&&r_dNbQgEy*0S8VJRF{Kr!plqN5Ji0sL?5dD#nUSNYL-! z!ivOTr&h2ZnUaVSr%l_4tBiXgG%9wvAA!^x>>gt^>7O7$)&4m4VTLjP;|ECSPT>EC z&yHZW0FENz5DE+tMD}oV9@0rhse}wt!_3mc?;t-cYy{6kUwB5H6zd0Dc_yDD!+AVN zC|iciMi&$&-syTNwlcINx~mcc>R_d4=)!MUy__jha9j%y5Sd;Nge?U{ssCn3dLbUh zr2HB)g~~WNrjN-Ol3BuqG5V*+fZ@P)Vb;YvIIa1gP67FU_Nf|F-3O{DkO4pyMGRGM zB*HyJRDB1Hkv5>}BpTGHz}~}!Q1x1DQ~o+fXKITmJ?IS;c29ry{CLK?E-txrcR4p3 z*{AMVh}Z8m0#ri^EQ$IftnS62sR6fm{%0}-5NtnUSgGIhrTe1+U(f`-snSxR=q?u}8H7TW; z7}89Pwabhz>0U$7-vtFjGF6WXx~Itwht!(FKz;TFPb{q^Pd9@o1iUH^Ua&|gI(Gd4 z7NVKKIU`vV)kHCVz(PO}w8RVxf*=Mmqd36_53$Sb=BOqdfFlUhv9O9e989kJEB{Qh zASRpce*9^MN1RmpdUq)|XfB@3*E zgo}rlIaH?@8X?s&dd7Z9iKmJ~^+zJgB?-(DIBV>KlSz(LxaE)pAMXnFZM9VT8j`cf zgYbCuRHo6B#oU3aFXJ^Ez8Ak?%^uL0DILNG2tlQ1xn@>WdnYy>?_zJxXJ801OA7kI zym_Ms3Iy-Wy4p0Yak1hMJibfX<>iT7Zf|4V|DFLx;WnAKcjTj|c*M19taAClt$C7|Q`QD@e?o z5bVQPn4fWioaO&(n10VPmTeS+Qpjc5VzAd7K{&UW`n=DB- z?bjyR+}(R;&tuNaoS8W@1E3mhG)vv?53tmTrB|}lc5vP>gP+U8ADFrZBaw`VO@-(q z^)Za)v3_jQ)T2FYQVLFS&BAxTCe2p5m|Zq?SeTW)gE_g*H`S|!1$@e(8I*g)T(Ey# zm&Y>+nX80apfG|>81DPHSSc_>&NLL$kA*iqWb1C}=a@k*Z z-jFdh3)Q!K2!bS>N-@`I4W|JJ68bOwX&JfF%1naFR?+MY+%d^kKXmM8yN1qv{-(q4 zYHQwG?W|kB;&8Kf`PrWjd!)oa0!H@KtJe$P51X0=mM0w2Sp>$;re52&i63Z`eG%pA zo!&NX9B?U5eo7Hy7SLnVV2jWqn!Q<1=6n}wZ4TL+@7Pdt^!*e6SXXoK+M5g4jSYU$ zX&bfgpTC(nw#GSZ{|kS7;s?#-sVDkiQ}>IrWz*YbFM`0NvImV00R~ggawP8>Yhf-71KfTlpZChRqZ?SZf`JX(YHRy-ojJ7`Bl z7cxYfViaro{t=%1xG5QLI~;`~Cp-}I0A2fLVaFfDBNV+olq%iMd7kZ||>Uz26ov=OlE z`U&-k^}1Qvzh7sLTi|-@`R$WRKe~Q+@x)boGu;P2e&JtrC%$*2^uf$zhs^%v#wblN z#>UIofr*mcPWZP4?b&SF2fa#zD}?*Apswzg`jyDmtE+FQzn-nCch<6o2ASYIxpoD2 zf7Uy;U$UjDQ++Kqwg>nQSv}bicvsHS6T!zbt|VO3$Vj`X{u1XhQnyJ*h;v-sCcThG zPr40)TDUx-EaZA0tc39|1MQjfVrqyl8Zq|+-erux1(j(?nv&n44FZM(>BM&WE z&L)&~5IPpr!t`gL7Lu(pE*FGMIlnqZP%U8cAMpbtKuv7+7<~9UCtka&-91 zw~z09XWx{!-e~PuW!s+E@a0E^p5KmKS9S6JMA%yhb07`i0_oQ?Tf92Zn6V|O3x%rH z+saSLzOWL&Bp=f~w zsS1T=Y9YmhvXP}A^9%J5qBPTJhs}+@7-50b@?9+RB~VH*pejn6HzHJ%OwK*Mt0n=2 zA7(cD>5O(1%ar*)EGw+1A3o@pFTUKMzRw9IJtp%u7E0JhKz8KXbNuNths(K zPVqKEnJ&ktTEC8>E1ZNhTCr#`39bQv^(f2C3xz(bNhs+Mc@_K+A!{qtCp1dix;X%o zhpL9NC2)WVk7~176F|-{%X)##a|Bo@EDPxyJgmtgRm~3Lgq*LGNk+Da9<(LlD_@8^ znj6E^SQg?bWUesFPU)U1%(4}x2^~iJE+|4FUPzt_tMhic(5u)K{?-by=n`Qq(W7)St!BUX{ z(5kpe&x&dJK+Mbt6y4$&k=nsgltcuqHvYFS(nJq(Y~n%K-G*4Ah~j2Bqh&u3JNVp+ zfNg&JN^u+hpa4F@;OoG};ttYGbLNPqSWb15zFuM3NcjTywzP>q8V-6^)nJ%+e7k@) z&?EF;8ZMfK#fZ$5*QlL~1xz*gJXh0$+=lA04UT?tN0vZ88Ge<`hbvc~^LZ`7} zCD86CqTK+EN(8(HkUH5LndDw9zAVWk=X7lPc|o@%YYPQ%N{?o_6@I)d?VRW7kE6;* zYQACz?dtl4nsGrg;MM_d9ft#F_yVn7TC1x&I652c1aZA3S69 z4*w9JPw()L$L#xL;6B~MX$Q7!9>i}~kI8v z2c5%9`TGTg;XapzfEi1apVAn%!IjE9 z8$w1oCljnu2>THBLK5qqn;?W+Hn)TJ=||2xpawz_5+a_b#dv~l=6NNGYAhH&9~Pg1 z5f46N6e=z;lQ|rv(FyNL5^TW)0qKn;*aEr!T?89Tt%OecQ`K-I zjkZAMe}>$k59}b((ePz1z!56ZF)P^GWI%it*g>)FULvK&C@mDgROYP13Z81zkr+Zy z?OWTncw5`OEigf&NRo20;63pLta`!8o~zDx39DoYD@=;5SVG%I_u3L&1xrNs#`%=J zal=L1I){u|xbR}f;d>Tsolkia9ufvHyaW#2>e!UvG}OaMVugsE=e()}XhkGt5gQcB zta$9AcrZ{i9n)<3MO~cVeHcB8f6y5*Uaau`STXI2is^2(Gx`EfiULCru%fYkyjbBk zpuYh$5vt9Cb1ICVG*+kPj4jQ=ie~zIsmwy&Vo{id6;;CP6SW$Nn~HSwq4Q0E6}4-u z$PCWokrzQ$bUC_oLKQMDggQNn6}^m(;lU|-wC^e`=!+2Evj__-I^9bR7FKi!bE6V0 ztayz7KdQh2#JJTnDTY|gVwwL)yR+r34nST;SG$;uW|GW3R=aquc*0kEnr6lZ)z)&%Xw-CPZIuej{RBa9vs0l*( z5k+cH1V>dIih;8)&U~618mSW_pb+{B;srMyhgQ&T>Zb7d3ffKGboyUGyQ!PP=XnG$ zZKZBj(k9qci<&%LrOkk9csVLq$&<9x0v!k@g9;9}@?c2g;=4FT&@C(J^y9@D0&La` zCzgC`tqq-^w|L!VlpTpQr%U}O5X*%cif};z4k_tSjCH6Nq>*D#K6M<3fHYxTcyAt@ zscFzXaK;YXg_8W#fmn+WeAk;Hi>g~Y`4&3@hZ zSIq{{4bGCCgf1N0rzdDCcC!(X zN7(dlg1Qo7CCA8TX+Tr;vCBg%6rk zWFMjlT(g9vC7TSYQEWCLd{c1ntD_8>6vgq@KXQQ(fH zn~mIb!Y1Uzz8fN}mnnX2-g-!A)M0}BW~ImZ;!bz(D=Rw*B%KuUd=TV0NlBH~ zj@p?qf^uuajePy7ArCb)!-=ylerwBS!VCQngCS3*I1g(IQV=$vwBg{g)O{#)$Q$+G zxF571Ag-r4s00(n~VUM1Py0{uvteMV?z{bbjc!OjK8{#-G1%2yBWxV!Rr zbIzALm2cmT=ruztTc1f@y|ZrFO-oi*U6$nhG5U6c?YM7sfl=*pwYVnV z{qe1{lRtm{@X4`DpY5oc{fDUs7XId)#D1JcGb%rQ2p#dOU5i+bj|pWj;?-x&u-V(uitxm z^R|8C60X}j-|;|X$%u}LL-Hrhp0{VY|IsNs4ra99{M4^^zxCkGqWf!Z$liWulJ8XF zshbBH%9SaB(14r1Njj_hhHP4}uk+}fTPoAe+wUx>RxjIg@Wm{5jNn7 zyys^pb`P60^zP&L{p6t!7wl}?Fn+_#E0YfxCyvbh^jDAHRogLt)#SZ<=Z_yfd*zvp zXBWKorulgN$mL7d+6L_$Idnq#__^0yQOsvEZ}A|0o&U(2_QtoBfwvDi_Vn)_JiU9O z_R~p~OE2ZmW=YtJIIuf53fS2awlWUvUmm}Nw>w9|`s2WE4~llLm9SNDVACEJuwDr} zKMw5LZ34Dj!dAzDJz6YaeG;}N4(#|;0XtX1E{FqraJj(4JPErn4(yE;qTLD!yC@Fq z`P&6-rG&jM4(!A-0qd8rY8=>Q0|jiAgk^DHt2c_d&6lu? Date: Sun, 19 May 2024 08:36:24 -0400 Subject: [PATCH 565/700] refactor: Replace JwtSecret with alloy's version of it (#8299) Co-authored-by: Matthias Seitz --- Cargo.lock | 7 +- crates/node-core/src/args/rpc_server.rs | 2 +- crates/node-core/src/utils.rs | 4 +- crates/rpc/rpc-builder/src/auth.rs | 8 +- crates/rpc/rpc-builder/src/lib.rs | 8 +- crates/rpc/rpc-layer/Cargo.toml | 7 +- crates/rpc/rpc-layer/src/auth_client_layer.rs | 2 +- crates/rpc/rpc-layer/src/auth_layer.rs | 6 +- crates/rpc/rpc-layer/src/jwt_secret.rs | 425 ------------------ crates/rpc/rpc-layer/src/jwt_validator.rs | 2 +- crates/rpc/rpc-layer/src/lib.rs | 5 +- 11 files changed, 21 insertions(+), 455 deletions(-) delete mode 100644 crates/rpc/rpc-layer/src/jwt_secret.rs diff --git a/Cargo.lock b/Cargo.lock index 7097fb1e769f4..f482c94ce8c2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7783,19 +7783,14 @@ dependencies = [ name = "reth-rpc-layer" version = "0.2.0-beta.7" dependencies = [ + "alloy-rpc-types-engine", "assert_matches", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", "jsonrpsee", - "jsonwebtoken 8.3.0", "pin-project", - "rand 0.8.5", - "reth-fs-util", - "reth-primitives", - "serde", "tempfile", - "thiserror", "tokio", "tower", "tracing", diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node-core/src/args/rpc_server.rs index c464463171779..f67ef6acb74fd 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node-core/src/args/rpc_server.rs @@ -493,7 +493,7 @@ impl RethRpcConfig for RpcServerArgs { } fn rpc_secret_key(&self) -> Option { - self.rpc_jwtsecret.clone() + self.rpc_jwtsecret } } diff --git a/crates/node-core/src/utils.rs b/crates/node-core/src/utils.rs index 84a3bef7be58e..f9b4ff599ca6c 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node-core/src/utils.rs @@ -12,7 +12,7 @@ use reth_interfaces::p2p::{ use reth_network::NetworkManager; use reth_primitives::{BlockHashOrNumber, ChainSpec, HeadersDirection, SealedBlock, SealedHeader}; use reth_provider::BlockReader; -use reth_rpc_layer::{JwtError, JwtSecret}; +use reth_rpc_types::engine::{JwtError, JwtSecret}; use std::{ env::VarError, path::{Path, PathBuf}, @@ -33,7 +33,7 @@ pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result jsonrpsee::http_client::HttpClient> { // Create a middleware that adds a new JWT token to every request. - let secret_layer = AuthClientLayer::new(self.secret.clone()); + let secret_layer = AuthClientLayer::new(self.secret); let middleware = tower::ServiceBuilder::default().layer(secret_layer); jsonrpsee::http_client::HttpClientBuilder::default() .set_http_middleware(middleware) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 036127fdf28df..c8976f03cf6a7 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1630,7 +1630,7 @@ impl RpcServerConfig { /// Creates the [AuthLayer] if any fn maybe_jwt_layer(&self) -> Option> { - self.jwt_secret.clone().map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) + self.jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) } /// Builds the ws and http server(s). @@ -1701,7 +1701,7 @@ impl RpcServerConfig { http_local_addr: Some(addr), ws_local_addr: Some(addr), server: WsHttpServers::SamePort(server), - jwt_secret: self.jwt_secret.clone(), + jwt_secret: self.jwt_secret, }) } @@ -1760,7 +1760,7 @@ impl RpcServerConfig { http_local_addr, ws_local_addr, server: WsHttpServers::DifferentPort { http: http_server, ws: ws_server }, - jwt_secret: self.jwt_secret.clone(), + jwt_secret: self.jwt_secret, }) } @@ -2062,7 +2062,7 @@ impl RpcServer { } /// Return the JwtSecret of the server pub fn jwt(&self) -> Option { - self.ws_http.jwt_secret.clone() + self.ws_http.jwt_secret } /// Returns the [`SocketAddr`] of the ws server if started. diff --git a/crates/rpc/rpc-layer/Cargo.toml b/crates/rpc/rpc-layer/Cargo.toml index 546770f94377e..b08bb21e76c72 100644 --- a/crates/rpc/rpc-layer/Cargo.toml +++ b/crates/rpc/rpc-layer/Cargo.toml @@ -11,19 +11,14 @@ repository.workspace = true workspace = true [dependencies] -reth-primitives.workspace = true -reth-fs-util.workspace = true +alloy-rpc-types-engine.workspace = true http.workspace = true hyper.workspace = true tower.workspace = true http-body.workspace = true pin-project.workspace = true -jsonwebtoken = "8" -rand.workspace = true -serde.workspace = true -thiserror.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/rpc/rpc-layer/src/auth_client_layer.rs b/crates/rpc/rpc-layer/src/auth_client_layer.rs index 4c845796ede2d..1b57608a7c3aa 100644 --- a/crates/rpc/rpc-layer/src/auth_client_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_client_layer.rs @@ -24,7 +24,7 @@ impl Layer for AuthClientLayer { type Service = AuthClientService; fn layer(&self, inner: S) -> Self::Service { - AuthClientService::new(self.secret.clone(), inner) + AuthClientService::new(self.secret, inner) } } diff --git a/crates/rpc/rpc-layer/src/auth_layer.rs b/crates/rpc/rpc-layer/src/auth_layer.rs index 4803d2c987392..bbf353f84db47 100644 --- a/crates/rpc/rpc-layer/src/auth_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_layer.rs @@ -155,6 +155,9 @@ where #[cfg(test)] mod tests { + use super::*; + use crate::JwtAuthValidator; + use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret}; use http::{header, Method, Request, StatusCode}; use hyper::{body, Body}; use jsonrpsee::{ @@ -166,9 +169,6 @@ mod tests { time::{SystemTime, UNIX_EPOCH}, }; - use super::AuthLayer; - use crate::{jwt_secret::Claims, JwtAuthValidator, JwtError, JwtSecret}; - const AUTH_PORT: u32 = 8551; const AUTH_ADDR: &str = "0.0.0.0"; const SECRET: &str = "f79ae8046bc11c9927afe911db7143c51a806c4a537cc08e0d37140b0192f430"; diff --git a/crates/rpc/rpc-layer/src/jwt_secret.rs b/crates/rpc/rpc-layer/src/jwt_secret.rs deleted file mode 100644 index b3d536078e71b..0000000000000 --- a/crates/rpc/rpc-layer/src/jwt_secret.rs +++ /dev/null @@ -1,425 +0,0 @@ -use jsonwebtoken::{decode, errors::ErrorKind, Algorithm, DecodingKey, Validation}; -use rand::Rng; -use reth_fs_util::{self as fs, FsPathError}; -use reth_primitives::hex::{self, encode as hex_encode}; -use serde::{Deserialize, Serialize}; -use std::{ - path::Path, - str::FromStr, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; -use thiserror::Error; - -/// Errors returned by the [`JwtSecret`] -#[derive(Error, Debug)] -pub enum JwtError { - /// An error encountered while decoding the hexadecimal string for the JWT secret. - #[error(transparent)] - JwtSecretHexDecodeError(#[from] hex::FromHexError), - - /// The JWT key length provided is invalid, expecting a specific length. - #[error("JWT key is expected to have a length of {0} digits. {1} digits key provided")] - InvalidLength(usize, usize), - - /// The signature algorithm used in the JWT is not supported. Only HS256 is supported. - #[error("unsupported signature algorithm. Only HS256 is supported")] - UnsupportedSignatureAlgorithm, - - /// The provided signature in the JWT is invalid. - #[error("provided signature is invalid")] - InvalidSignature, - - /// The "iat" (issued-at) claim in the JWT is not within the allowed ±60 seconds from the - /// current time. - #[error("IAT (issued-at) claim is not within ±60 seconds from the current time")] - InvalidIssuanceTimestamp, - - /// The Authorization header is missing or invalid in the context of JWT validation. - #[error("Authorization header is missing or invalid")] - MissingOrInvalidAuthorizationHeader, - - /// An error occurred during JWT decoding. - #[error("JWT decoding error: {0}")] - JwtDecodingError(String), - - /// An error related to file system path handling encountered during JWT operations. - #[error(transparent)] - JwtFsPathError(#[from] FsPathError), - - /// An I/O error occurred during JWT operations. - #[error(transparent)] - IOError(#[from] std::io::Error), -} - -/// Length of the hex-encoded 256 bit secret key. -/// A 256-bit encoded string in Rust has a length of 64 digits because each digit represents 4 bits -/// of data. In hexadecimal representation, each digit can have 16 possible values (0-9 and A-F), so -/// 4 bits can be represented using a single hex digit. Therefore, to represent a 256-bit string, -/// we need 64 hexadecimal digits (256 bits ÷ 4 bits per digit = 64 digits). -const JWT_SECRET_LEN: usize = 64; - -/// The JWT `iat` (issued-at) claim cannot exceed +-60 seconds from the current time. -const JWT_MAX_IAT_DIFF: Duration = Duration::from_secs(60); - -/// The execution layer client MUST support at least the following alg HMAC + SHA256 (HS256) -const JWT_SIGNATURE_ALGO: Algorithm = Algorithm::HS256; - -/// Value-object holding a reference to a hex-encoded 256-bit secret key. -/// A JWT secret key is used to secure JWT-based authentication. The secret key is -/// a shared secret between the server and the client and is used to calculate a digital signature -/// for the JWT, which is included in the JWT along with its payload. -/// -/// See also: [Secret key - Engine API specs](https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md#key-distribution) -#[derive(Clone, PartialEq, Eq)] -pub struct JwtSecret([u8; 32]); - -impl JwtSecret { - /// Creates an instance of [`JwtSecret`]. - /// - /// Returns an error if one of the following applies: - /// - `hex` is not a valid hexadecimal string - /// - `hex` argument length is less than `JWT_SECRET_LEN` - /// - /// This strips the leading `0x`, if any. - pub fn from_hex>(hex: S) -> Result { - let hex: &str = hex.as_ref().trim().trim_start_matches("0x"); - if hex.len() != JWT_SECRET_LEN { - Err(JwtError::InvalidLength(JWT_SECRET_LEN, hex.len())) - } else { - let hex_bytes = hex::decode(hex)?; - // is 32bytes, see length check - let bytes = hex_bytes.try_into().expect("is expected len"); - Ok(JwtSecret(bytes)) - } - } - - /// Tries to load a [`JwtSecret`] from the specified file path. - /// I/O or secret validation errors might occur during read operations in the form of - /// a [`JwtError`]. - pub fn from_file(fpath: &Path) -> Result { - let hex = fs::read_to_string(fpath)?; - let secret = JwtSecret::from_hex(hex)?; - Ok(secret) - } - - /// Creates a random [`JwtSecret`] and tries to store it at the specified path. I/O errors might - /// occur during write operations in the form of a [`JwtError`] - pub fn try_create(fpath: &Path) -> Result { - if let Some(dir) = fpath.parent() { - // Create parent directory - fs::create_dir_all(dir)? - } - - let secret = JwtSecret::random(); - let bytes = &secret.0; - let hex = hex::encode(bytes); - fs::write(fpath, hex)?; - Ok(secret) - } - - /// Validates a JWT token along the following rules: - /// - The JWT signature is valid. - /// - The JWT is signed with the `HMAC + SHA256 (HS256)` algorithm. - /// - The JWT `iat` (issued-at) claim is a timestamp within +-60 seconds from the current time. - /// - /// See also: [JWT Claims - Engine API specs](https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md#jwt-claims) - pub fn validate(&self, jwt: String) -> Result<(), JwtError> { - let mut validation = Validation::new(JWT_SIGNATURE_ALGO); - // ensure that the JWT has an `iat` claim - validation.set_required_spec_claims(&["iat"]); - let bytes = &self.0; - - match decode::(&jwt, &DecodingKey::from_secret(bytes), &validation) { - Ok(token) => { - if !token.claims.is_within_time_window() { - Err(JwtError::InvalidIssuanceTimestamp)? - } - } - Err(err) => match *err.kind() { - ErrorKind::InvalidSignature => Err(JwtError::InvalidSignature)?, - ErrorKind::InvalidAlgorithm => Err(JwtError::UnsupportedSignatureAlgorithm)?, - _ => { - let detail = format!("{err}"); - Err(JwtError::JwtDecodingError(detail))? - } - }, - }; - - Ok(()) - } - - /// Generates a random [`JwtSecret`] containing a hex-encoded 256 bit secret key. - pub fn random() -> Self { - let random_bytes: [u8; 32] = rand::thread_rng().gen(); - let secret = hex_encode(random_bytes); - JwtSecret::from_hex(secret).unwrap() - } - - /// Encode the header and claims given and sign the payload using the algorithm from the header - /// and the key. - /// - /// ```rust - /// use reth_rpc_layer::{Claims, JwtSecret}; - /// - /// let my_claims = Claims { iat: 0, exp: None }; - /// let secret = JwtSecret::random(); - /// let token = secret.encode(&my_claims).unwrap(); - /// ``` - pub fn encode(&self, claims: &Claims) -> Result { - let bytes = &self.0; - let key = jsonwebtoken::EncodingKey::from_secret(bytes); - let algo = jsonwebtoken::Header::new(Algorithm::HS256); - jsonwebtoken::encode(&algo, claims, &key) - } -} - -impl std::fmt::Debug for JwtSecret { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("JwtSecretHash").field(&"{{}}").finish() - } -} - -impl FromStr for JwtSecret { - type Err = JwtError; - - fn from_str(s: &str) -> Result { - JwtSecret::from_hex(s) - } -} - -/// Claims in JWT are used to represent a set of information about an entity. -/// Claims are essentially key-value pairs that are encoded as JSON objects and included in the -/// payload of a JWT. They are used to transmit information such as the identity of the entity, the -/// time the JWT was issued, and the expiration time of the JWT, among others. -/// -/// The Engine API spec requires that just the `iat` (issued-at) claim is provided. -/// It ignores claims that are optional or additional for this specification. -#[derive(Debug, Serialize, Deserialize)] -pub struct Claims { - /// The "iat" value MUST be a number containing a NumericDate value. - /// According to the RFC A NumericDate represents the number of seconds since - /// the UNIX_EPOCH. - /// - [`RFC-7519 - Spec`](https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6) - /// - [`RFC-7519 - Notations`](https://www.rfc-editor.org/rfc/rfc7519#section-2) - pub iat: u64, - /// Expiration, if any - pub exp: Option, -} - -impl Claims { - fn is_within_time_window(&self) -> bool { - let now = SystemTime::now(); - let now_secs = now.duration_since(UNIX_EPOCH).unwrap().as_secs(); - now_secs.abs_diff(self.iat) <= JWT_MAX_IAT_DIFF.as_secs() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use assert_matches::assert_matches; - use jsonwebtoken::{encode, EncodingKey, Header}; - use tempfile::tempdir; - - #[test] - fn from_hex() { - let key = "f79ae8046bc11c9927afe911db7143c51a806c4a537cc08e0d37140b0192f430"; - let secret: Result = JwtSecret::from_hex(key); - assert!(secret.is_ok()); - - let secret: Result = JwtSecret::from_hex(key); - assert!(secret.is_ok()); - } - - #[test] - fn original_key_integrity_across_transformations() { - let original = "f79ae8046bc11c9927afe911db7143c51a806c4a537cc08e0d37140b0192f430"; - let secret = JwtSecret::from_hex(original).unwrap(); - let bytes = &secret.0; - let computed = hex_encode(bytes); - assert_eq!(original, computed); - } - - #[test] - fn secret_has_64_hex_digits() { - let expected_len = 64; - let secret = JwtSecret::random(); - let hex = hex::encode(secret.0); - assert_eq!(hex.len(), expected_len); - } - - #[test] - fn creation_ok_hex_string_with_0x() { - let hex: String = - "0x7365637265747365637265747365637265747365637265747365637265747365".into(); - let result = JwtSecret::from_hex(hex); - assert!(result.is_ok()); - } - - #[test] - fn creation_error_wrong_len() { - let hex = "f79ae8046"; - let result = JwtSecret::from_hex(hex); - assert!(matches!(result, Err(JwtError::InvalidLength(_, _)))); - } - - #[test] - fn creation_error_wrong_hex_string() { - let hex: String = "This__________Is__________Not_______An____Hex_____________String".into(); - let result = JwtSecret::from_hex(hex); - assert!(matches!(result, Err(JwtError::JwtSecretHexDecodeError(_)))); - } - - #[test] - fn validation_ok() { - let secret = JwtSecret::random(); - let claims = Claims { iat: to_u64(SystemTime::now()), exp: Some(10000000000) }; - let jwt: String = secret.encode(&claims).unwrap(); - - let result = secret.validate(jwt); - - assert!(matches!(result, Ok(()))); - } - - #[test] - fn validation_error_iat_out_of_window() { - let secret = JwtSecret::random(); - - // Check past 'iat' claim more than 60 secs - let offset = Duration::from_secs(JWT_MAX_IAT_DIFF.as_secs() + 1); - let out_of_window_time = SystemTime::now().checked_sub(offset).unwrap(); - let claims = Claims { iat: to_u64(out_of_window_time), exp: Some(10000000000) }; - let jwt: String = secret.encode(&claims).unwrap(); - - let result = secret.validate(jwt); - - assert!(matches!(result, Err(JwtError::InvalidIssuanceTimestamp))); - - // Check future 'iat' claim more than 60 secs - let offset = Duration::from_secs(JWT_MAX_IAT_DIFF.as_secs() + 1); - let out_of_window_time = SystemTime::now().checked_add(offset).unwrap(); - let claims = Claims { iat: to_u64(out_of_window_time), exp: Some(10000000000) }; - let jwt: String = secret.encode(&claims).unwrap(); - - let result = secret.validate(jwt); - - assert!(matches!(result, Err(JwtError::InvalidIssuanceTimestamp))); - } - - #[test] - fn validation_error_wrong_signature() { - let secret_1 = JwtSecret::random(); - let claims = Claims { iat: to_u64(SystemTime::now()), exp: Some(10000000000) }; - let jwt: String = secret_1.encode(&claims).unwrap(); - - // A different secret will generate a different signature. - let secret_2 = JwtSecret::random(); - let result = secret_2.validate(jwt); - assert!(matches!(result, Err(JwtError::InvalidSignature))); - } - - #[test] - fn validation_error_unsupported_algorithm() { - let secret = JwtSecret::random(); - let bytes = &secret.0; - - let key = EncodingKey::from_secret(bytes); - let unsupported_algo = Header::new(Algorithm::HS384); - - let claims = Claims { iat: to_u64(SystemTime::now()), exp: Some(10000000000) }; - let jwt: String = encode(&unsupported_algo, &claims, &key).unwrap(); - let result = secret.validate(jwt); - - assert!(matches!(result, Err(JwtError::UnsupportedSignatureAlgorithm))); - } - - #[test] - fn valid_without_exp_claim() { - let secret = JwtSecret::random(); - - let claims = Claims { iat: to_u64(SystemTime::now()), exp: None }; - let jwt: String = secret.encode(&claims).unwrap(); - - let result = secret.validate(jwt); - - assert!(matches!(result, Ok(()))); - } - - #[test] - fn ephemeral_secret_created() { - let fpath: &Path = Path::new("secret0.hex"); - assert!(not_exists(fpath)); - JwtSecret::try_create(fpath).expect("A secret file should be created"); - assert!(exists(fpath)); - delete(fpath); - } - - #[test] - fn valid_secret_provided() { - let fpath = Path::new("secret1.hex"); - assert!(not_exists(fpath)); - - let secret = JwtSecret::random(); - write(fpath, &hex(&secret)); - - match JwtSecret::from_file(fpath) { - Ok(gen_secret) => { - delete(fpath); - assert_eq!(hex(&gen_secret), hex(&secret)); - } - Err(_) => { - delete(fpath); - } - } - } - - #[test] - fn invalid_hex_provided() { - let fpath = Path::new("secret2.hex"); - write(fpath, "invalid hex"); - let result = JwtSecret::from_file(fpath); - assert!(result.is_err()); - delete(fpath); - } - - #[test] - fn provided_file_not_exists() { - let fpath = Path::new("secret3.hex"); - let result = JwtSecret::from_file(fpath); - assert_matches!(result, - Err(JwtError::JwtFsPathError(FsPathError::Read { source: _, path })) if path == fpath.to_path_buf() - ); - assert!(!exists(fpath)); - } - - #[test] - fn provided_file_is_a_directory() { - let dir = tempdir().unwrap(); - let result = JwtSecret::from_file(dir.path()); - assert_matches!(result, Err(JwtError::JwtFsPathError(FsPathError::Read { source: _, path })) if path == dir.into_path()); - } - - fn hex(secret: &JwtSecret) -> String { - hex::encode(secret.0) - } - - fn delete(path: &Path) { - std::fs::remove_file(path).unwrap(); - } - - fn write(path: &Path, s: &str) { - std::fs::write(path, s).unwrap(); - } - - fn not_exists(path: &Path) -> bool { - !exists(path) - } - - fn exists(path: &Path) -> bool { - std::fs::metadata(path).is_ok() - } - - fn to_u64(time: SystemTime) -> u64 { - time.duration_since(UNIX_EPOCH).unwrap().as_secs() - } -} diff --git a/crates/rpc/rpc-layer/src/jwt_validator.rs b/crates/rpc/rpc-layer/src/jwt_validator.rs index 0f5124f9ac95d..5bed6135da9cd 100644 --- a/crates/rpc/rpc-layer/src/jwt_validator.rs +++ b/crates/rpc/rpc-layer/src/jwt_validator.rs @@ -26,7 +26,7 @@ impl AuthValidator for JwtAuthValidator { fn validate(&self, headers: &HeaderMap) -> Result<(), Response> { match get_bearer(headers) { - Some(jwt) => match self.secret.validate(jwt) { + Some(jwt) => match self.secret.validate(&jwt) { Ok(_) => Ok(()), Err(e) => { error!(target: "engine::jwt-validator", "Invalid JWT: {e}"); diff --git a/crates/rpc/rpc-layer/src/lib.rs b/crates/rpc/rpc-layer/src/lib.rs index dbe0700964194..a379461dfb031 100644 --- a/crates/rpc/rpc-layer/src/lib.rs +++ b/crates/rpc/rpc-layer/src/lib.rs @@ -12,12 +12,13 @@ use http::{HeaderMap, Response}; mod auth_client_layer; mod auth_layer; -mod jwt_secret; mod jwt_validator; +// Export alloy JWT types +pub use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret}; + pub use auth_client_layer::{secret_to_bearer_header, AuthClientLayer, AuthClientService}; pub use auth_layer::AuthLayer; -pub use jwt_secret::{Claims, JwtError, JwtSecret}; pub use jwt_validator::JwtAuthValidator; /// General purpose trait to validate Http Authorization headers. It's supposed to be integrated as From de79f2657cdd5fd3b0fe9a3182e3765ade10a834 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 20 May 2024 09:49:03 +0200 Subject: [PATCH 566/700] chore: integrate discv5 config builder in networkconfig builder (#7856) Co-authored-by: Emilia Hane --- Cargo.lock | 1 + bin/reth/Cargo.toml | 1 + bin/reth/src/commands/p2p/mod.rs | 86 +++++++++++++--------------- crates/net/discv5/src/config.rs | 6 +- crates/net/network/src/config.rs | 81 +++++++++++++------------- crates/node-core/src/args/network.rs | 52 +++++++++++++---- crates/node-core/src/node_config.rs | 85 +++++++++------------------ 7 files changed, 155 insertions(+), 157 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f482c94ce8c2c..511b7da62fe1e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6376,6 +6376,7 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-discv4", + "reth-discv5", "reth-downloaders", "reth-ethereum-payload-builder", "reth-evm", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index b1d9b1638efa8..b95140aadfa4a 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -44,6 +44,7 @@ reth-payload-builder.workspace = true reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-discv4.workspace = true +reth-discv5.workspace = true reth-static-file = { workspace = true } reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index c3ad0231b030a..8ad8fadf1d34f 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -4,7 +4,7 @@ use crate::{ args::{ get_secret_key, utils::{chain_help, chain_spec_value_parser, hash_or_num_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, DiscoveryArgs, + DatabaseArgs, DiscoveryArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, utils::get_single_header, @@ -14,12 +14,11 @@ use clap::{Parser, Subcommand}; use discv5::ListenConfig; use reth_config::Config; use reth_db::create_db; -use reth_discv4::NatResolver; use reth_interfaces::p2p::bodies::client::BodiesClient; -use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord}; +use reth_primitives::{BlockHashOrNumber, ChainSpec}; use reth_provider::ProviderFactory; use std::{ - net::{SocketAddrV4, SocketAddrV6}, + net::{IpAddr, SocketAddrV4, SocketAddrV6}, path::PathBuf, sync::Arc, }; @@ -53,31 +52,14 @@ pub struct Command { #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] datadir: MaybePlatformPath, - /// Secret key to use for this node. - /// - /// This also will deterministically set the peer ID. - #[arg(long, value_name = "PATH")] - p2p_secret_key: Option, - /// Disable the discovery service. #[command(flatten)] - pub discovery: DiscoveryArgs, - - /// Target trusted peer - #[arg(long)] - trusted_peer: Option, - - /// Connect only to trusted peers - #[arg(long)] - trusted_only: bool, + pub network: NetworkArgs, /// The number of retries per request #[arg(long, default_value = "5")] retries: usize, - #[arg(long, default_value = "any")] - nat: NatResolver, - #[command(flatten)] db: DatabaseArgs, @@ -113,65 +95,75 @@ impl Command { let mut config: Config = confy::load_path(&config_path).unwrap_or_default(); - if let Some(peer) = self.trusted_peer { + for &peer in &self.network.trusted_peers { config.peers.trusted_nodes.insert(peer); } - if config.peers.trusted_nodes.is_empty() && self.trusted_only { + if config.peers.trusted_nodes.is_empty() && self.network.trusted_only { eyre::bail!("No trusted nodes. Set trusted peer with `--trusted-peer ` or set `--trusted-only` to `false`") } - config.peers.trusted_nodes_only = self.trusted_only; + config.peers.trusted_nodes_only = self.network.trusted_only; let default_secret_key_path = data_dir.p2p_secret(); - let secret_key_path = self.p2p_secret_key.clone().unwrap_or(default_secret_key_path); + let secret_key_path = + self.network.p2p_secret_key.clone().unwrap_or(default_secret_key_path); let p2p_secret_key = get_secret_key(&secret_key_path)?; + let rlpx_socket = (self.network.addr, self.network.port).into(); + let boot_nodes = self.chain.bootnodes().unwrap_or_default(); let mut network_config_builder = config - .network_config(self.nat, None, p2p_secret_key) + .network_config(self.network.nat, None, p2p_secret_key) .chain_spec(self.chain.clone()) .disable_discv4_discovery_if(self.chain.chain.is_optimism()) - .boot_nodes(self.chain.bootnodes().unwrap_or_default()); + .boot_nodes(boot_nodes.clone()); - network_config_builder = self.discovery.apply_to_builder(network_config_builder); - - let mut network_config = network_config_builder.build(Arc::new(ProviderFactory::new( - noop_db, - self.chain.clone(), - data_dir.static_files(), - )?)); - - if !self.discovery.disable_discovery && - (self.discovery.enable_discv5_discovery || - network_config.chain_spec.chain.is_optimism()) - { - network_config = network_config.discovery_v5_with_config_builder(|builder| { + network_config_builder = self + .network + .discovery + .apply_to_builder(network_config_builder, rlpx_socket) + .map_discv5_config_builder(|builder| { let DiscoveryArgs { - discv5_addr: discv5_addr_ipv4, + discv5_addr, discv5_addr_ipv6, - discv5_port: discv5_port_ipv4, + discv5_port, discv5_port_ipv6, discv5_lookup_interval, discv5_bootstrap_lookup_interval, discv5_bootstrap_lookup_countdown, .. - } = self.discovery; + } = self.network.discovery; + + // Use rlpx address if none given + let discv5_addr_ipv4 = discv5_addr.or(match self.network.addr { + IpAddr::V4(ip) => Some(ip), + IpAddr::V6(_) => None, + }); + let discv5_addr_ipv6 = discv5_addr_ipv6.or(match self.network.addr { + IpAddr::V4(_) => None, + IpAddr::V6(ip) => Some(ip), + }); builder .discv5_config( discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( - discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), + discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port)), discv5_addr_ipv6 .map(|addr| SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)), )) .build(), ) + .add_unsigned_boot_nodes(boot_nodes.into_iter()) .lookup_interval(discv5_lookup_interval) .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) - .build() }); - } + + let network_config = network_config_builder.build(Arc::new(ProviderFactory::new( + noop_db, + self.chain.clone(), + data_dir.static_files(), + )?)); let network = network_config.start_network().await?; let fetch_client = network.fetch_client().await?; diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 2a246d3d57d0c..5d4d2dfaded31 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -250,7 +250,7 @@ impl ConfigBuilder { } /// Config used to bootstrap [`discv5::Discv5`]. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct Config { /// Config used by [`discv5::Discv5`]. Contains the [`ListenConfig`], with the discovery listen /// socket. @@ -296,9 +296,7 @@ impl Config { discovered_peer_filter: None, } } -} -impl Config { /// Returns the discovery (UDP) socket contained in the [`discv5::Config`]. Returns the IPv6 /// socket, if both IPv4 and v6 are configured. This socket will be advertised to peers in the /// local [`Enr`](discv5::enr::Enr). @@ -416,7 +414,7 @@ pub fn discv5_sockets_wrt_rlpx_addr( /// A boot node can be added either as a string in either 'enode' URL scheme or serialized from /// [`Enr`](discv5::Enr) type. -#[derive(Debug, PartialEq, Eq, Hash, Display)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, Display)] pub enum BootNode { /// An unsigned node record. #[display(fmt = "{_0}")] diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index c2a7b32389dcd..4bd1dab8835e2 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -104,43 +104,6 @@ impl NetworkConfig { self } - /// Sets the config to use for the discovery v5 protocol, with help of the - /// [`reth_discv5::ConfigBuilder`]. - /// ``` - /// use reth_network::NetworkConfigBuilder; - /// use secp256k1::{rand::thread_rng, SecretKey}; - /// - /// let sk = SecretKey::new(&mut thread_rng()); - /// let network_config = NetworkConfigBuilder::new(sk).build(()); - /// let fork_id = network_config.status.forkid; - /// let network_config = network_config - /// .discovery_v5_with_config_builder(|builder| builder.fork(b"eth", fork_id).build()); - /// ``` - - pub fn discovery_v5_with_config_builder( - self, - f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::Config, - ) -> Self { - let network_stack_id = NetworkStackId::id(&self.chain_spec); - let fork_id = self.chain_spec.latest_fork_id(); - let boot_nodes = self.boot_nodes.clone(); - - let mut builder = reth_discv5::Config::builder(self.listener_addr) - .add_unsigned_boot_nodes(boot_nodes.into_iter()); - - if let Some(id) = network_stack_id { - builder = builder.fork(id, fork_id) - } - - self.set_discovery_v5(f(builder)) - } - - /// Sets the config to use for the discovery v5 protocol. - pub fn set_discovery_v5(mut self, discv5_config: reth_discv5::Config) -> Self { - self.discovery_v5_config = Some(discv5_config); - self - } - /// Sets the address for the incoming RLPx connection listener. pub fn set_listener_addr(mut self, listener_addr: SocketAddr) -> Self { self.listener_addr = listener_addr; @@ -180,6 +143,9 @@ pub struct NetworkConfigBuilder { dns_discovery_config: Option, /// How to set up discovery version 4. discovery_v4_builder: Option, + /// How to set up discovery version 5. + #[serde(skip)] + discovery_v5_builder: Option, /// All boot nodes to start network discovery with. boot_nodes: HashSet, /// Address to use for discovery @@ -222,6 +188,7 @@ impl NetworkConfigBuilder { secret_key, dns_discovery_config: Some(Default::default()), discovery_v4_builder: Some(Default::default()), + discovery_v5_builder: None, boot_nodes: Default::default(), discovery_addr: None, listener_addr: None, @@ -348,12 +315,17 @@ impl NetworkConfigBuilder { } /// Sets the discv4 config to use. - // pub fn discovery(mut self, builder: Discv4ConfigBuilder) -> Self { self.discovery_v4_builder = Some(builder); self } + /// Sets the discv5 config to use. + pub fn discovery_v5(mut self, builder: reth_discv5::ConfigBuilder) -> Self { + self.discovery_v5_builder = Some(builder); + self + } + /// Sets the dns discovery config to use. pub fn dns_discovery(mut self, config: DnsDiscoveryConfig) -> Self { self.dns_discovery_config = Some(config); @@ -420,6 +392,36 @@ impl NetworkConfigBuilder { } } + /// Calls a closure on [`reth_discv5::ConfigBuilder`], if discv5 discovery is enabled and the + /// builder has been set. + /// ``` + /// use reth_network::NetworkConfigBuilder; + /// use reth_primitives::MAINNET; + /// use reth_provider::test_utils::NoopProvider; + /// use secp256k1::{rand::thread_rng, SecretKey}; + /// + /// let sk = SecretKey::new(&mut thread_rng()); + /// let fork_id = MAINNET.latest_fork_id(); + /// let network_config = NetworkConfigBuilder::new(sk) + /// .map_discv5_config_builder(|builder| builder.fork(b"eth", fork_id)) + /// .build(NoopProvider::default()); + /// ``` + pub fn map_discv5_config_builder( + mut self, + f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::ConfigBuilder, + ) -> Self { + if let Some(mut builder) = self.discovery_v5_builder { + if let Some(network_stack_id) = NetworkStackId::id(&self.chain_spec) { + let fork_id = self.chain_spec.latest_fork_id(); + builder = builder.fork(network_stack_id, fork_id); + } + + self.discovery_v5_builder = Some(f(builder)); + } + + self + } + /// Adds a new additional protocol to the RLPx sub-protocol list. pub fn add_rlpx_sub_protocol(mut self, protocol: impl IntoRlpxSubProtocol) -> Self { self.extra_protocols.push(protocol); @@ -458,6 +460,7 @@ impl NetworkConfigBuilder { secret_key, mut dns_discovery_config, discovery_v4_builder, + discovery_v5_builder, boot_nodes, discovery_addr, listener_addr, @@ -511,7 +514,7 @@ impl NetworkConfigBuilder { boot_nodes, dns_discovery_config, discovery_v4_config: discovery_v4_builder.map(|builder| builder.build()), - discovery_v5_config: None, + discovery_v5_config: discovery_v5_builder.map(|builder| builder.build()), discovery_v4_addr: discovery_addr.unwrap_or(DEFAULT_DISCOVERY_ADDRESS), listener_addr, peers_config: peers_config.unwrap_or_default(), diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 8202739bc90d7..0c808637a34bc 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -20,7 +20,7 @@ use reth_network::{ use reth_primitives::{mainnet_nodes, ChainSpec, NodeRecord}; use secp256k1::SecretKey; use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, ops::Not, path::PathBuf, sync::Arc, @@ -119,7 +119,10 @@ impl NetworkArgs { secret_key: SecretKey, default_peers_file: PathBuf, ) -> NetworkConfigBuilder { - let chain_bootnodes = chain_spec.bootnodes().unwrap_or_else(mainnet_nodes); + let boot_nodes = self + .bootnodes + .clone() + .unwrap_or_else(|| chain_spec.bootnodes().unwrap_or_else(mainnet_nodes)); let peers_file = self.peers_file.clone().unwrap_or(default_peers_file); // Configure peer connections @@ -136,7 +139,6 @@ impl NetworkArgs { self.soft_limit_byte_size_pooled_transactions_response_on_pack_request, ), }; - // Configure basic network stack let mut network_config_builder = config .network_config(self.nat, self.persistent_peers_file(peers_file), secret_key) @@ -144,8 +146,8 @@ impl NetworkArgs { SessionsConfig::default().with_upscaled_event_buffer(peers_config.max_peers()), ) .peer_config(peers_config) - .boot_nodes(self.bootnodes.clone().unwrap_or(chain_bootnodes)) - .chain_spec(chain_spec) + .boot_nodes(boot_nodes.clone()) + .chain_spec(chain_spec.clone()) .transactions_manager_config(transactions_manager_config); // Configure node identity @@ -154,7 +156,29 @@ impl NetworkArgs { HelloMessageWithProtocols::builder(peer_id).client_version(&self.identity).build(), ); - self.discovery.apply_to_builder(network_config_builder) + let rlpx_socket = (self.addr, self.port).into(); + network_config_builder = + self.discovery.apply_to_builder(network_config_builder, rlpx_socket); + + if chain_spec.is_optimism() && !self.discovery.disable_discovery { + network_config_builder = + network_config_builder.discovery_v5(reth_discv5::Config::builder(rlpx_socket)); + } + + network_config_builder.map_discv5_config_builder(|builder| { + let DiscoveryArgs { + discv5_lookup_interval, + discv5_bootstrap_lookup_interval, + discv5_bootstrap_lookup_countdown, + .. + } = self.discovery; + + builder + .add_unsigned_boot_nodes(boot_nodes.into_iter()) + .lookup_interval(discv5_lookup_interval) + .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) + .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) + }) } /// If `no_persist_peers` is false then this returns the path to the persistent peers file path. @@ -228,11 +252,13 @@ pub struct DiscoveryArgs { #[arg(id = "discovery.port", long = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] pub port: u16, - /// The UDP IPv4 address to use for devp2p peer discovery version 5. + /// The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx + /// address, if it's also IPv4. #[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", default_value = None)] pub discv5_addr: Option, - /// The UDP IPv6 address to use for devp2p peer discovery version 5. + /// The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx + /// address, if it's also IPv6. #[arg(id = "discovery.v5.addr.ipv6", long = "discovery.v5.addr.ipv6", value_name = "DISCOVERY_V5_ADDR_IPV6", default_value = None)] pub discv5_addr_ipv6: Option, @@ -270,6 +296,7 @@ impl DiscoveryArgs { pub fn apply_to_builder( &self, mut network_config_builder: NetworkConfigBuilder, + rlpx_tcp_socket: SocketAddr, ) -> NetworkConfigBuilder { if self.disable_discovery || self.disable_dns_discovery { network_config_builder = network_config_builder.disable_dns_discovery(); @@ -279,6 +306,11 @@ impl DiscoveryArgs { network_config_builder = network_config_builder.disable_discv4_discovery(); } + if !self.disable_discovery && self.enable_discv5_discovery { + network_config_builder = + network_config_builder.discovery_v5(reth_discv5::Config::builder(rlpx_tcp_socket)); + } + network_config_builder } @@ -295,8 +327,8 @@ impl Default for DiscoveryArgs { Self { disable_discovery: false, disable_dns_discovery: false, - disable_discv4_discovery: cfg!(feature = "optimism"), - enable_discv5_discovery: cfg!(feature = "optimism"), + disable_discv4_discovery: false, + enable_discv5_discovery: false, addr: DEFAULT_DISCOVERY_ADDR, port: DEFAULT_DISCOVERY_PORT, discv5_addr: None, diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 5cb28c87307a0..52333c1471420 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -27,7 +27,7 @@ use reth_provider::{ use reth_tasks::TaskExecutor; use secp256k1::SecretKey; use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, + net::{IpAddr, SocketAddr, SocketAddrV4, SocketAddrV6}, path::PathBuf, sync::Arc, }; @@ -456,8 +456,7 @@ impl NodeConfig { secret_key: SecretKey, default_peers_path: PathBuf, ) -> NetworkConfig { - let cfg_builder = self - .network + self.network .network_config(config, self.chain.clone(), secret_key, default_peers_path) .with_task_executor(Box::new(executor)) .set_head(head) @@ -471,39 +470,30 @@ impl NodeConfig { self.network.discovery.addr, // set discovery port based on instance number self.network.discovery.port + self.instance - 1, - )); - - let config = cfg_builder.build(client); - - if self.network.discovery.disable_discovery || - !self.network.discovery.enable_discv5_discovery && - !config.chain_spec.chain.is_optimism() - { - return config - } - - let rlpx_addr = config.listener_addr().ip(); - // work around since discv5 config builder can't be integrated into network config builder - // due to unsatisfied trait bounds - config.discovery_v5_with_config_builder(|builder| { - let DiscoveryArgs { - discv5_addr, - discv5_addr_ipv6, - discv5_port, - discv5_port_ipv6, - discv5_lookup_interval, - discv5_bootstrap_lookup_interval, - discv5_bootstrap_lookup_countdown, - .. - } = self.network.discovery; - - let discv5_addr_ipv4 = discv5_addr.or_else(|| ipv4(rlpx_addr)); - let discv5_addr_ipv6 = discv5_addr_ipv6.or_else(|| ipv6(rlpx_addr)); - let discv5_port_ipv4 = discv5_port + self.instance - 1; - let discv5_port_ipv6 = discv5_port_ipv6 + self.instance - 1; - - builder - .discv5_config( + )) + .map_discv5_config_builder(|builder| { + let DiscoveryArgs { + discv5_addr, + discv5_addr_ipv6, + discv5_port, + discv5_port_ipv6, + .. + } = self.network.discovery; + + // Use rlpx address if none given + let discv5_addr_ipv4 = discv5_addr.or(match self.network.addr { + IpAddr::V4(ip) => Some(ip), + IpAddr::V6(_) => None, + }); + let discv5_addr_ipv6 = discv5_addr_ipv6.or(match self.network.addr { + IpAddr::V4(_) => None, + IpAddr::V6(ip) => Some(ip), + }); + + let discv5_port_ipv4 = discv5_port + self.instance - 1; + let discv5_port_ipv6 = discv5_port_ipv6 + self.instance - 1; + + builder.discv5_config( discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), discv5_addr_ipv6 @@ -511,11 +501,8 @@ impl NodeConfig { )) .build(), ) - .lookup_interval(discv5_lookup_interval) - .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) - .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) - .build() - }) + }) + .build(client) } /// Change rpc port numbers based on the instance number, using the inner @@ -551,19 +538,3 @@ impl Default for NodeConfig { } } } - -/// Returns the address if this is an [`Ipv4Addr`]. -pub fn ipv4(ip: IpAddr) -> Option { - match ip { - IpAddr::V4(ip) => Some(ip), - IpAddr::V6(_) => None, - } -} - -/// Returns the address if this is an [`Ipv6Addr`]. -pub fn ipv6(ip: IpAddr) -> Option { - match ip { - IpAddr::V4(_) => None, - IpAddr::V6(ip) => Some(ip), - } -} From 383693762b9261cf9f6676451dbd6be86ab47fb0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 20 May 2024 16:21:57 +0200 Subject: [PATCH 567/700] fix(txpool): local transaction configuration (#8323) --- crates/ethereum/node/src/node.rs | 4 +++- crates/transaction-pool/src/validate/eth.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 87bc54d15a0ce..026e39cf09f5c 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -109,10 +109,12 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let data_dir = ctx.data_dir(); + let pool_config = ctx.pool_config(); let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec()) .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) + .with_local_transactions_config(pool_config.local_transactions_config.clone()) .with_additional_tasks(1) .build_with_tasks( ctx.provider().clone(), @@ -121,7 +123,7 @@ where ); let transaction_pool = - reth_transaction_pool::Pool::eth_pool(validator, blob_store, ctx.pool_config()); + reth_transaction_pool::Pool::eth_pool(validator, blob_store, pool_config); info!(target: "reth::cli", "Transaction pool initialized"); let transactions_path = data_dir.txpool_transactions(); diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index abee8a5850901..5c0a2db5ad068 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -479,7 +479,7 @@ impl EthTransactionValidatorBuilder { } /// Whether to allow exemptions for local transaction exemptions. - pub fn set_local_transactions_config( + pub fn with_local_transactions_config( mut self, local_transactions_config: LocalTransactionConfig, ) -> Self { From 5943c4707d3a3a7debd6bd0f4ab27d3f5cd657a5 Mon Sep 17 00:00:00 2001 From: Victor Shih Date: Mon, 20 May 2024 15:05:17 -0400 Subject: [PATCH 568/700] feat: add additional message type metrics to EthRequestHandlerMetrics (#8319) Co-authored-by: Victor Shih --- crates/net/network/src/eth_requests.rs | 10 +++++++--- crates/net/network/src/metrics.rs | 14 ++++++++++---- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 3268ff8987afe..7cca3799570ae 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -144,7 +144,7 @@ where request: GetBlockHeaders, response: oneshot::Sender>, ) { - self.metrics.received_headers_requests.increment(1); + self.metrics.eth_headers_requests_received_total.increment(1); let headers = self.get_headers_response(request); let _ = response.send(Ok(BlockHeaders(headers))); } @@ -155,7 +155,7 @@ where request: GetBlockBodies, response: oneshot::Sender>, ) { - self.metrics.received_bodies_requests.increment(1); + self.metrics.eth_bodies_requests_received_total.increment(1); let mut bodies = Vec::new(); let mut total_bytes = 0; @@ -192,6 +192,8 @@ where request: GetReceipts, response: oneshot::Sender>, ) { + self.metrics.eth_receipts_requests_received_total.increment(1); + let mut receipts = Vec::new(); let mut total_bytes = 0; @@ -249,7 +251,9 @@ where IncomingEthRequest::GetBlockBodies { peer_id, request, response } => { this.on_bodies_request(peer_id, request, response) } - IncomingEthRequest::GetNodeData { .. } => {} + IncomingEthRequest::GetNodeData { .. } => { + this.metrics.eth_node_data_requests_received_total.increment(1); + } IncomingEthRequest::GetReceipts { peer_id, request, response } => { this.on_receipts_request(peer_id, request, response) } diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index d976560ab8eac..989dc431b64b1 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -310,11 +310,17 @@ impl DisconnectMetrics { #[derive(Metrics)] #[metrics(scope = "network")] pub struct EthRequestHandlerMetrics { - /// Number of received headers requests - pub(crate) received_headers_requests: Counter, + /// Number of GetBlockHeaders requests received + pub(crate) eth_headers_requests_received_total: Counter, - /// Number of received bodies requests - pub(crate) received_bodies_requests: Counter, + /// Number of GetReceipts requests received + pub(crate) eth_receipts_requests_received_total: Counter, + + /// Number of GetBlockBodies requests received + pub(crate) eth_bodies_requests_received_total: Counter, + + /// Number of GetNodeData requests received + pub(crate) eth_node_data_requests_received_total: Counter, } /// Eth67 announcement metrics, track entries by TxType From adbe1ba9b1a25b022322bd878649c2bf6e098d54 Mon Sep 17 00:00:00 2001 From: Vid Kersic <38610409+Vid201@users.noreply.github.com> Date: Tue, 21 May 2024 10:31:08 +0200 Subject: [PATCH 569/700] chore: use with_chain_id for CfgEnv (#8327) --- crates/ethereum/engine-primitives/src/payload.rs | 3 +-- crates/optimism/payload/src/payload.rs | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 55a97c96ddc51..ed3f484b8d0c0 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -220,8 +220,7 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { parent: &Header, ) -> (CfgEnvWithHandlerCfg, BlockEnv) { // configure evm env based on parent block - let mut cfg = CfgEnv::default(); - cfg.chain_id = chain_spec.chain().id(); + let cfg = CfgEnv::default().with_chain_id(chain_spec.chain().id()); // ensure we're not missing any timestamp based hardforks let spec_id = revm_spec_by_timestamp_after_merge(chain_spec, self.timestamp()); diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 9cd47ef4256f9..182dadfed9b2e 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -105,8 +105,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { parent: &Header, ) -> (CfgEnvWithHandlerCfg, BlockEnv) { // configure evm env based on parent block - let mut cfg = CfgEnv::default(); - cfg.chain_id = chain_spec.chain().id(); + let cfg = CfgEnv::default().with_chain_id(chain_spec.chain().id()); // ensure we're not missing any timestamp based hardforks let spec_id = revm_spec_by_timestamp_after_merge(chain_spec, self.timestamp()); From 5100ddd28e3ae0396aa97633d6f700d77aefb50f Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 21 May 2024 10:45:23 +0200 Subject: [PATCH 570/700] fix: prevent CREATE tx for EIP-4844 types (#8291) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- crates/consensus/common/src/validation.rs | 3 +- crates/primitives/src/alloy_compat.rs | 3 +- crates/primitives/src/revm/env.rs | 5 +-- crates/primitives/src/transaction/eip4844.rs | 33 +++++++++++++++---- crates/primitives/src/transaction/mod.rs | 29 ++++++++++++---- .../rpc-types-compat/src/transaction/mod.rs | 6 ++-- .../rpc-types-compat/src/transaction/typed.rs | 3 +- .../rpc-types/src/eth/transaction/typed.rs | 6 ++-- crates/rpc/rpc/src/eth/api/transactions.rs | 8 +++-- .../storage/codecs/derive/src/compact/mod.rs | 12 +++++-- crates/storage/codecs/src/lib.rs | 18 ++++++++++ crates/transaction-pool/src/test_utils/gen.rs | 6 +++- .../transaction-pool/src/test_utils/mock.rs | 19 ++++++++--- crates/transaction-pool/src/traits.rs | 6 ++-- crates/transaction-pool/src/validate/eth.rs | 2 +- 15 files changed, 120 insertions(+), 39 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index bf94937209cd2..40c36bfa51c28 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -284,7 +284,8 @@ mod tests { max_priority_fee_per_gas: 0x28f000fff, max_fee_per_blob_gas: 0x7, gas_limit: 10, - to: Address::default().into(), + placeholder: Some(()), + to: Address::default(), value: U256::from(3_u64), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 5e2bff8173cae..be8144e901200 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -182,7 +182,8 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TxKind::Create, TxKind::Call), + placeholder: tx.to.map(|_| ()), + to: tx.to.unwrap_or_default(), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, input: tx.input, diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index f519abc058231..0c16f5482f8d1 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -272,10 +272,7 @@ where tx_env.gas_limit = tx.gas_limit; tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); - tx_env.transact_to = match tx.to { - TxKind::Call(to) => TransactTo::Call(to), - TxKind::Create => TransactTo::create(), - }; + tx_env.transact_to = TransactTo::Call(tx.to); tx_env.value = tx.value; tx_env.data = tx.input.clone(); tx_env.chain_id = Some(tx.chain_id); diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index f2130ce50eb9f..885e457b8bb55 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -1,10 +1,10 @@ use super::access_list::AccessList; use crate::{ - constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Bytes, ChainId, Signature, TxKind, TxType, + constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Address, Bytes, ChainId, Signature, TxType, B256, U256, }; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; -use reth_codecs::{main_codec, Compact}; +use reth_codecs::{main_codec, Compact, CompactPlaceholder}; use std::mem; #[cfg(feature = "c-kzg")] @@ -46,9 +46,12 @@ pub struct TxEip4844 { /// /// This is also known as `GasTipCap` pub max_priority_fee_per_gas: u128, - /// The 160-bit address of the message call’s recipient or, for a contract creation - /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TxKind, + /// TODO(debt): this should be removed if we break the DB. + /// Makes sure that the Compact bitflag struct has one bit after the above field: + /// + pub placeholder: Option, + /// The 160-bit address of the message call’s recipient. + pub to: Address, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -148,6 +151,7 @@ impl TxEip4844 { max_priority_fee_per_gas: Decodable::decode(buf)?, max_fee_per_gas: Decodable::decode(buf)?, gas_limit: Decodable::decode(buf)?, + placeholder: Some(()), to: Decodable::decode(buf)?, value: Decodable::decode(buf)?, input: Decodable::decode(buf)?, @@ -195,7 +199,7 @@ impl TxEip4844 { mem::size_of::() + // gas_limit mem::size_of::() + // max_fee_per_gas mem::size_of::() + // max_priority_fee_per_gas - self.to.size() + // to + mem::size_of::
() + // to mem::size_of::() + // value self.access_list.size() + // access_list self.input.len() + // input @@ -272,3 +276,20 @@ impl TxEip4844 { keccak256(&buf) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{address, bytes}; + + #[test] + fn backwards_compatible_txkind_test() { + // TxEip4844 encoded with TxKind on to field + // holesky tx hash: <0xa3b1668225bf0fbfdd6c19aa6fd071fa4ff5d09a607c67ccd458b97735f745ac> + let tx = bytes!("224348a100426844cb2dc6c0b2d05e003b9aca0079c9109b764609df928d16fc4a91e9081f7e87db09310001019101fb28118ceccaabca22a47e35b9c3f12eb2dcb25e5c543d5b75e6cd841f0a05328d26ef16e8450000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000052000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000007b399987d24fc5951f3e94a4cb16e87414bf22290000000000000000000000001670090000000000000000000000000000010001302e31382e302d64657600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009e640a6aadf4f664cf467b795c31332f44acbe6c000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000002c00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006614c2d1000000000000000000000000000000000000000000000000000000000014012c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041f06fd78f4dcdf089263524731620941747b9b93fd8f631557e25b23845a78b685bd82f9d36bce2f4cc812b6e5191df52479d349089461ffe76e9f2fa2848a0fe1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000410819f04aba17677807c61ae72afdddf7737f26931ecfa8af05b7c669808b36a2587e32c90bb0ed2100266dd7797c80121a109a2b0fe941ca5a580e438988cac81c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); + let (tx, _) = TxEip4844::from_compact(&tx, tx.len()); + assert_eq!(tx.to, address!("79C9109b764609df928d16fC4a91e9081F7e87DB")); + assert_eq!(tx.placeholder, Some(())); + assert_eq!(tx.input, bytes!("ef16e8450000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000052000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000007b399987d24fc5951f3e94a4cb16e87414bf22290000000000000000000000001670090000000000000000000000000000010001302e31382e302d64657600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009e640a6aadf4f664cf467b795c31332f44acbe6c000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000002c00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006614c2d1000000000000000000000000000000000000000000000000000000000014012c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041f06fd78f4dcdf089263524731620941747b9b93fd8f631557e25b23845a78b685bd82f9d36bce2f4cc812b6e5191df52479d349089461ffe76e9f2fa2848a0fe1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000410819f04aba17677807c61ae72afdddf7737f26931ecfa8af05b7c669808b36a2587e32c90bb0ed2100266dd7797c80121a109a2b0fe941ca5a580e438988cac81c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")); + } +} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c441a32774111..fb5abf08c0ff3 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -77,7 +77,7 @@ pub const MIN_LENGTH_EIP2930_TX_ENCODED: usize = 14; /// Minimum length of a rlp-encoded eip1559 transaction. pub const MIN_LENGTH_EIP1559_TX_ENCODED: usize = 15; /// Minimum length of a rlp-encoded eip4844 transaction. -pub const MIN_LENGTH_EIP4844_TX_ENCODED: usize = 17; +pub const MIN_LENGTH_EIP4844_TX_ENCODED: usize = 37; /// Minimum length of a rlp-encoded deposit transaction. #[cfg(feature = "optimism")] pub const MIN_LENGTH_DEPOSIT_TX_ENCODED: usize = 65; @@ -174,14 +174,14 @@ impl Transaction { /// Gets the transaction's [`TxKind`], which is the address of the recipient or /// [`TxKind::Create`] if the transaction is a contract creation. - pub fn kind(&self) -> &TxKind { + pub fn kind(&self) -> TxKind { match self { Transaction::Legacy(TxLegacy { to, .. }) | Transaction::Eip2930(TxEip2930 { to, .. }) | - Transaction::Eip1559(TxEip1559 { to, .. }) | - Transaction::Eip4844(TxEip4844 { to, .. }) => to, + Transaction::Eip1559(TxEip1559 { to, .. }) => *to, + Transaction::Eip4844(TxEip4844 { to, .. }) => TxKind::Call(*to), #[cfg(feature = "optimism")] - Transaction::Deposit(TxDeposit { to, .. }) => to, + Transaction::Deposit(TxDeposit { to, .. }) => *to, } } @@ -1462,6 +1462,11 @@ impl proptest::arbitrary::Arbitrary for TransactionSigned { .then(Signature::optimism_deposit_tx_signature) .unwrap_or(sig); + if let Transaction::Eip4844(ref mut tx_eip_4844) = transaction { + tx_eip_4844.placeholder = + if tx_eip_4844.to != Address::default() { Some(()) } else { None }; + } + let mut tx = TransactionSigned { hash: Default::default(), signature: sig, transaction }; tx.hash = tx.recalculate_hash(); @@ -2035,7 +2040,10 @@ mod tests { ); let encoded = alloy_rlp::encode(signed_tx); - assert_eq!(hex!("9003ce8080808080808080c080c0808080"), encoded[..]); + assert_eq!( + hex!("a403e280808080809400000000000000000000000000000000000000008080c080c0808080"), + encoded[..] + ); assert_eq!(MIN_LENGTH_EIP4844_TX_ENCODED, encoded.len()); TransactionSigned::decode(&mut &encoded[..]).unwrap(); @@ -2150,4 +2158,13 @@ mod tests { let written_bytes = tx_signed_no_hash.to_compact(&mut buff); from_compact_zstd_unaware(&buff, written_bytes); } + + #[test] + fn create_txs_disallowed_for_eip4844() { + let data = + [3, 208, 128, 128, 123, 128, 120, 128, 129, 129, 128, 192, 129, 129, 192, 128, 128, 9]; + let res = TransactionSigned::decode_enveloped(&mut &data[..]); + + assert!(res.is_err()); + } } diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 6a35429c53d61..bb945ce8faf57 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -1,7 +1,7 @@ //! Compatibility functions for rpc `Transaction` type. use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; -use reth_primitives::{BlockNumber, TransactionSignedEcRecovered, TxKind, TxType, B256}; +use reth_primitives::{Address, BlockNumber, TransactionSignedEcRecovered, TxKind, TxType, B256}; use reth_rpc_types::Transaction; use signature::from_primitive_signature; pub use typed::*; @@ -42,9 +42,9 @@ fn fill( let signer = tx.signer(); let signed_tx = tx.into_signed(); - let to = match signed_tx.kind() { + let to: Option
= match signed_tx.kind() { TxKind::Create => None, - TxKind::Call(to) => Some(*to), + TxKind::Call(to) => Some(Address(*to)), }; #[allow(unreachable_patterns)] diff --git a/crates/rpc/rpc-types-compat/src/transaction/typed.rs b/crates/rpc/rpc-types-compat/src/transaction/typed.rs index b119a0956c4ba..21e492218a4a4 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/typed.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/typed.rs @@ -47,7 +47,8 @@ pub fn to_primitive_transaction( gas_limit: tx.gas_limit.to(), max_fee_per_gas: tx.max_fee_per_gas.to(), max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), - to: tx.kind, + placeholder: Some(()), + to: tx.to, value: tx.value, access_list: tx.access_list, blob_versioned_hashes: tx.blob_versioned_hashes, diff --git a/crates/rpc/rpc-types/src/eth/transaction/typed.rs b/crates/rpc/rpc-types/src/eth/transaction/typed.rs index 6526bc2b6cc10..e3d70d3548612 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/typed.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/typed.rs @@ -2,7 +2,7 @@ //! transaction deserialized from the json input of an RPC call. Depending on what fields are set, //! it can be converted into the container type [`TypedTransactionRequest`]. -use alloy_primitives::{Bytes, TxKind, B256, U256}; +use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use alloy_rpc_types::{AccessList, BlobTransactionSidecar}; /// Container type for various Ethereum transaction requests @@ -100,8 +100,8 @@ pub struct EIP4844TransactionRequest { pub max_fee_per_gas: U256, /// The gas limit for the transaction pub gas_limit: U256, - /// The kind of transaction (e.g., Call, Create) - pub kind: TxKind, + /// The recipient of the transaction + pub to: Address, /// The value of the transaction pub value: U256, /// The input data for the transaction diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 721cef3db1f8a..dc01dc12c3859 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -979,7 +979,11 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: to.unwrap_or(RpcTransactionKind::Create), + #[allow(clippy::manual_unwrap_or_default)] // clippy is suggesting here unwrap_or_default + to: match to { + Some(RpcTransactionKind::Call(to)) => to, + _ => Address::default(), + }, access_list: access_list.unwrap_or_default(), // eip-4844 specific. @@ -1802,7 +1806,7 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( res_receipt.contract_address = Some(from.create(transaction.transaction.nonce())); } Call(addr) => { - res_receipt.to = Some(*addr); + res_receipt.to = Some(Address(*addr)); } } diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index e67adb6fd99a1..d4732256e9c37 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -145,8 +145,16 @@ fn should_use_alt_impl(ftype: &String, segment: &syn::PathSegment) -> bool { if let (Some(path), 1) = (arg_path.path.segments.first(), arg_path.path.segments.len()) { - if ["B256", "Address", "Address", "Bloom", "TxHash", "BlockHash"] - .contains(&path.ident.to_string().as_str()) + if [ + "B256", + "Address", + "Address", + "Bloom", + "TxHash", + "BlockHash", + "CompactPlaceholder", + ] + .contains(&path.ident.to_string().as_str()) { return true } diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 907fee440d8e5..9dcef12730da2 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -70,6 +70,24 @@ pub trait Compact: Sized { } } +/// To be used with `Option` to place or replace one bit on the bitflag struct. +pub type CompactPlaceholder = (); + +impl Compact for CompactPlaceholder { + #[inline] + fn to_compact(self, _: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + 0 + } + + #[inline] + fn from_compact(buf: &[u8], _: usize) -> (Self, &[u8]) { + ((), buf) + } +} + macro_rules! impl_uint_compact { ($($name:tt),+) => { $( diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 5c335e5d6edab..e5c99ec03ea32 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -184,7 +184,11 @@ impl TransactionBuilder { gas_limit: self.gas_limit, max_fee_per_gas: self.max_fee_per_gas, max_priority_fee_per_gas: self.max_priority_fee_per_gas, - to: self.to, + placeholder: None, + to: match self.to { + TxKind::Call(to) => to, + TxKind::Create => Address::default(), + }, value: self.value, access_list: self.access_list, input: self.input, diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 948c47109a1a7..dded89a0ed6f5 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -186,8 +186,10 @@ pub enum MockTransaction { max_fee_per_blob_gas: u128, /// The gas limit for the transaction. gas_limit: u64, + /// Placeholder for backwards compatibility. + placeholder: Option<()>, /// The transaction's destination. - to: TxKind, + to: Address, /// The value of the transaction. value: U256, /// The access list associated with the transaction. @@ -276,7 +278,8 @@ impl MockTransaction { max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_fee_per_blob_gas: DATA_GAS_PER_BLOB as u128, gas_limit: 0, - to: Address::random().into(), + placeholder: Some(()), + to: Address::random(), value: Default::default(), input: Bytes::new(), access_list: Default::default(), @@ -684,12 +687,12 @@ impl PoolTransaction for MockTransaction { } /// Returns the transaction kind associated with the transaction. - fn kind(&self) -> &TxKind { + fn kind(&self) -> TxKind { match self { MockTransaction::Legacy { to, .. } | MockTransaction::Eip1559 { to, .. } | - MockTransaction::Eip4844 { to, .. } | - MockTransaction::Eip2930 { to, .. } => to, + MockTransaction::Eip2930 { to, .. } => *to, + MockTransaction::Eip4844 { to, .. } => TxKind::Call(*to), } } @@ -851,6 +854,7 @@ impl TryFromRecoveredTransaction for MockTransaction { gas_limit, max_fee_per_gas, max_priority_fee_per_gas, + placeholder, to, value, input, @@ -866,6 +870,7 @@ impl TryFromRecoveredTransaction for MockTransaction { max_priority_fee_per_gas, max_fee_per_blob_gas, gas_limit, + placeholder, to, value, input, @@ -977,12 +982,14 @@ impl From for Transaction { input, sidecar, size: _, + placeholder, } => Self::Eip4844(TxEip4844 { chain_id, nonce, gas_limit, max_fee_per_gas, max_priority_fee_per_gas, + placeholder, to, value, access_list, @@ -1081,6 +1088,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { max_fee_per_blob_gas, access_list, blob_versioned_hashes: _, + placeholder, }) => MockTransaction::Eip4844 { chain_id: *chain_id, sender, @@ -1090,6 +1098,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { max_priority_fee_per_gas: *max_priority_fee_per_gas, max_fee_per_blob_gas: *max_fee_per_blob_gas, gas_limit: *gas_limit, + placeholder: *placeholder, to: *to, value: *value, input: input.clone(), diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index ca91b00daf537..fba43e899c20a 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -813,12 +813,12 @@ pub trait PoolTransaction: /// Returns the transaction's [`TxKind`], which is the address of the recipient or /// [`TxKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> &TxKind; + fn kind(&self) -> TxKind; /// Returns the recipient of the transaction if it is not a [TxKind::Create] /// transaction. fn to(&self) -> Option
{ - (*self.kind()).to().copied() + self.kind().to().copied() } /// Returns the input data of this transaction. @@ -1063,7 +1063,7 @@ impl PoolTransaction for EthPooledTransaction { /// Returns the transaction's [`TxKind`], which is the address of the recipient or /// [`TxKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> &TxKind { + fn kind(&self) -> TxKind { self.transaction.kind() } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 5c0a2db5ad068..057154d2fe6f4 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -708,7 +708,7 @@ pub fn ensure_intrinsic_gas( if transaction.gas_limit() < calculate_intrinsic_gas_after_merge( transaction.input(), - transaction.kind(), + &transaction.kind(), &access_list, is_shanghai, ) From 54cd8cf9b005fda80d18dd18f98f7836e7b98f75 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 21 May 2024 10:59:17 +0200 Subject: [PATCH 571/700] fix: Chain::split returns NoSplitPending for block number > tip (#8285) --- crates/storage/provider/src/chain.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 9b9c66d4bc6b4..2ff70bc4add74 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -289,7 +289,10 @@ impl Chain { block_number } ChainSplitTarget::Number(block_number) => { - if block_number >= chain_tip { + if block_number > chain_tip { + return ChainSplit::NoSplitPending(self) + } + if block_number == chain_tip { return ChainSplit::NoSplitCanonical(self) } if block_number < *self.blocks.first_entry().expect("chain is never empty").key() { @@ -588,7 +591,7 @@ mod tests { // split at higher number assert_eq!( chain.clone().split(ChainSplitTarget::Number(10)), - ChainSplit::NoSplitCanonical(chain.clone()) + ChainSplit::NoSplitPending(chain.clone()) ); // split at lower number From 23738e6db567f1f688db2e990a6276c48d784977 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 May 2024 11:21:50 +0200 Subject: [PATCH 572/700] fix(cli): tx fetcher args help (#8297) Co-authored-by: Oliver Nordbjerg --- crates/node-core/src/args/network.rs | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 0c808637a34bc..350e7c4a1b1a4 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -90,19 +90,24 @@ pub struct NetworkArgs { #[arg(long)] pub max_inbound_peers: Option, - /// Soft limit for the byte size of a `PooledTransactions` response on assembling a - /// `GetPooledTransactions` request. Spec'd at 2 MiB. - /// - /// . - #[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, help = "Sets the soft limit for the byte size of pooled transactions response. Specified at 2 MiB by default. This is a spec'd value that should only be set for experimental purposes on a testnet.",long_help = None)] + /// Experimental, for usage in research. Sets the max accumulated byte size of transactions + /// to pack in one response. + /// Spec'd at 2MiB. + #[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, verbatim_doc_comment)] pub soft_limit_byte_size_pooled_transactions_response: usize, - /// Default soft limit for the byte size of a `PooledTransactions` response on assembling a - /// `GetPooledTransactions` request. This defaults to less - /// than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when - /// assembling a `PooledTransactions` response. Default - /// is 128 KiB. - #[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,help = "Sets the soft limit for the byte size of a single pooled transactions response when packing multiple responses into a single packet for a `GetPooledTransactions` request. Specified at 128 Kib by default.",long_help = None)] + /// Experimental, for usage in research. Sets the max accumulated byte size of transactions to + /// request in one request. + /// + /// Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a + /// transaction announcement (see RLPx specs). This allows a node to request a specific size + /// response. + /// + /// By default, nodes request only 128 KiB worth of transactions, but should a peer request + /// more, up to 2 MiB, a node will answer with more than 128 KiB. + /// + /// Default is 128 KiB. + #[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, verbatim_doc_comment)] pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, } From 043b4a9cda8d738283b414897335c1863bfd34c2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 May 2024 11:49:43 +0200 Subject: [PATCH 573/700] fix(op): fix logs (#8329) --- bin/reth/src/commands/import_op.rs | 7 ++++--- bin/reth/src/commands/import_receipts.rs | 4 ++-- crates/net/downloaders/src/file_client.rs | 2 +- crates/net/downloaders/src/receipt_file_client.rs | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 5362b45b08b9d..577fc5de3d25c 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -121,7 +121,7 @@ impl ImportOpCommand { info!(target: "reth::cli", "Chain file chunk read"); total_decoded_blocks += file_client.headers_len(); - total_decoded_txns += file_client.bodies_len(); + total_decoded_txns += file_client.total_transactions(); for (block_number, body) in file_client.bodies_iter_mut() { body.transactions.retain(|tx| { @@ -172,16 +172,17 @@ impl ImportOpCommand { let provider = provider_factory.provider()?; - let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_blocks = provider.tx_ref().entries::()?; let total_imported_txns = provider.tx_ref().entries::()?; if total_decoded_blocks != total_imported_blocks || - total_decoded_txns != total_imported_txns + total_decoded_txns != total_imported_txns + total_filtered_out_dup_txns { error!(target: "reth::cli", total_decoded_blocks, total_imported_blocks, total_decoded_txns, + total_filtered_out_dup_txns, total_imported_txns, "Chain was partially imported" ); diff --git a/bin/reth/src/commands/import_receipts.rs b/bin/reth/src/commands/import_receipts.rs index e6aae327af58b..018ff132b94b4 100644 --- a/bin/reth/src/commands/import_receipts.rs +++ b/bin/reth/src/commands/import_receipts.rs @@ -19,7 +19,7 @@ use reth_provider::{ BundleStateWithReceipts, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, StaticFileProviderFactory, StaticFileWriter, }; -use tracing::{debug, error, info}; +use tracing::{debug, error, info, trace}; use std::{path::PathBuf, sync::Arc}; @@ -89,7 +89,7 @@ impl ImportReceiptsCommand { for stage in StageId::ALL { let checkpoint = provider.get_stage_checkpoint(stage)?; - debug!(target: "reth::cli", + trace!(target: "reth::cli", ?stage, ?checkpoint, "Read stage checkpoints from db" diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 85fac46428225..7ce222d57401a 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -172,7 +172,7 @@ impl FileClient { /// Returns the current number of transactions in the client. pub fn total_transactions(&self) -> usize { - self.bodies.iter().flat_map(|(_, body)| &body.transactions).count() + self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions.len()) } } diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs index b6291d0a3dc48..0eaa4ff1b7763 100644 --- a/crates/net/downloaders/src/receipt_file_client.rs +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -43,7 +43,7 @@ impl FromReader for ReceiptFileClient { trace!(target: "downloaders::file", target_num_bytes=num_bytes, capacity=stream.read_buffer().capacity(), - coded=?HackReceiptFileCodec, + codec=?HackReceiptFileCodec, "init decode stream" ); From 0b8ab1e083c6d83a45bb4f30f83caf50d06a9f45 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 21 May 2024 12:15:43 +0200 Subject: [PATCH 574/700] fix: remove some `expect` usage in blockchain_tree (#8278) --- crates/blockchain-tree/src/blockchain_tree.rs | 81 ++++++++++++++----- 1 file changed, 61 insertions(+), 20 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 689994471200c..2a0bfb8bae652 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -280,7 +280,10 @@ where /// * `BundleState` changes that happened at the asked `block_hash` /// * `BTreeMap` list of past pending and canonical hashes, That are /// needed for evm `BLOCKHASH` opcode. - /// Return none if block unknown. + /// Return none if: + /// * block unknown. + /// * chain_id not present in state. + /// * there are no parent hashes stored. pub fn post_state_data(&self, block_hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?block_hash, "Searching for post state data"); @@ -290,14 +293,22 @@ where if let Some(chain_id) = self.block_indices().get_blocks_chain_id(&block_hash) { trace!(target: "blockchain_tree", ?block_hash, "Constructing post state data based on non-canonical chain"); // get block state - let chain = self.state.chains.get(&chain_id).expect("Chain should be present"); + let Some(chain) = self.state.chains.get(&chain_id) else { + debug!(target: "blockchain_tree", ?chain_id, "Chain with ID not present"); + return None; + }; let block_number = chain.block_number(block_hash)?; let state = chain.state_at_block(block_number)?; // get parent hashes let mut parent_block_hashes = self.all_chain_hashes(chain_id); let first_pending_block_number = - *parent_block_hashes.first_key_value().expect("There is at least one block hash").0; + if let Some(key_value) = parent_block_hashes.first_key_value() { + *key_value.0 + } else { + debug!(target: "blockchain_tree", ?chain_id, "No blockhashes stored"); + return None; + }; let canonical_chain = canonical_chain .iter() .filter(|&(key, _)| key < first_pending_block_number) @@ -600,13 +611,17 @@ where while let Some(block) = dependent_block.pop_back() { // Get chain of dependent block. - let chain_id = - self.block_indices().get_blocks_chain_id(&block).expect("Block should be in tree"); + let Some(chain_id) = self.block_indices().get_blocks_chain_id(&block) else { + debug!(target: "blockchain_tree", ?block, "Block not in tree"); + return Default::default(); + }; // Find all blocks that fork from this chain. - for chain_block in - self.state.chains.get(&chain_id).expect("Chain should be in tree").blocks().values() - { + let Some(chain) = self.state.chains.get(&chain_id) else { + debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); + return Default::default(); + }; + for chain_block in chain.blocks().values() { if let Some(forks) = self.block_indices().fork_to_child().get(&chain_block.hash()) { // If there are sub forks append them for processing. dependent_block.extend(forks); @@ -623,6 +638,8 @@ where /// This method searches for any chain that depended on this block being part of the canonical /// chain. Each dependent chain's state is then updated with state entries removed from the /// plain state during the unwind. + /// Returns the result of inserting the chain or None if any of the dependent chains is not + /// in the tree. fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { // iterate over all blocks in chain and find any fork blocks that are in tree. for (number, block) in chain.blocks().iter() { @@ -637,8 +654,10 @@ where // prepend state to all chains that fork from this block. for chain_id in chains_to_bump { - let chain = - self.state.chains.get_mut(&chain_id).expect("Chain should be in tree"); + let Some(chain) = self.state.chains.get_mut(&chain_id) else { + debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); + return None; + }; debug!(target: "blockchain_tree", unwound_block= ?block.num_hash(), @@ -723,12 +742,16 @@ where /// /// if it is canonical or extends the canonical chain, return [BlockAttachment::Canonical] /// if it does not extend the canonical chain, return [BlockAttachment::HistoricalFork] + /// if the block is not in the tree or its chain id is not valid, return None #[track_caller] fn is_block_inside_chain(&self, block: &BlockNumHash) -> Option { // check if block known and is already in the tree if let Some(chain_id) = self.block_indices().get_blocks_chain_id(&block.hash) { // find the canonical fork of this chain - let canonical_fork = self.canonical_fork(chain_id).expect("Chain id is valid"); + let Some(canonical_fork) = self.canonical_fork(chain_id) else { + debug!(target: "blockchain_tree", chain_id=?chain_id, block=?block.hash, "Chain id not valid"); + return None; + }; // if the block's chain extends canonical chain return if canonical_fork == self.block_indices().canonical_tip() { Some(BlockAttachment::Canonical) @@ -1050,9 +1073,14 @@ where }; // we are splitting chain at the block hash that we want to make canonical - let canonical = self - .remove_and_split_chain(chain_id, ChainSplitTarget::Hash(block_hash)) - .expect("to be present"); + let Some(canonical) = + self.remove_and_split_chain(chain_id, ChainSplitTarget::Hash(block_hash)) + else { + debug!(target: "blockchain_tree", ?block_hash, ?chain_id, "Chain not present"); + return Err(CanonicalError::from(BlockchainTreeError::BlockSideChainIdConsistency { + chain_id: chain_id.into(), + })); + }; trace!(target: "blockchain_tree", chain = ?canonical, "Found chain to make canonical"); durations_recorder.record_relative(MakeCanonicalAction::SplitChain); @@ -1062,9 +1090,14 @@ where // loop while fork blocks are found in Tree. while let Some(chain_id) = self.block_indices().get_blocks_chain_id(&fork_block.hash) { // canonical chain is lower part of the chain. - let canonical = self - .remove_and_split_chain(chain_id, ChainSplitTarget::Number(fork_block.number)) - .expect("fork is present"); + let Some(canonical) = + self.remove_and_split_chain(chain_id, ChainSplitTarget::Number(fork_block.number)) + else { + debug!(target: "blockchain_tree", ?fork_block, ?chain_id, "Fork not present"); + return Err(CanonicalError::from( + BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() }, + )); + }; fork_block = canonical.fork_block(); chains_to_promote.push(canonical); } @@ -1072,13 +1105,21 @@ where let old_tip = self.block_indices().canonical_tip(); // Merge all chains into one chain. - let mut new_canon_chain = chains_to_promote.pop().expect("There is at least one block"); + let Some(mut new_canon_chain) = chains_to_promote.pop() else { + debug!(target: "blockchain_tree", "No blocks in the chain to make canonical"); + return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { + block_hash: fork_block.hash, + })); + }; trace!(target: "blockchain_tree", ?new_canon_chain, "Merging chains"); let mut chain_appended = false; for chain in chains_to_promote.into_iter().rev() { - chain_appended = true; trace!(target: "blockchain_tree", ?chain, "Appending chain"); - new_canon_chain.append_chain(chain).expect("We have just build the chain."); + let block_hash = chain.fork_block().hash; + new_canon_chain.append_chain(chain).map_err(|_| { + CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }) + })?; + chain_appended = true; } durations_recorder.record_relative(MakeCanonicalAction::MergeAllChains); From 6b14cbc5e75b12efcbf5e7085ad6ac25a82f4ee3 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 21 May 2024 13:01:44 +0200 Subject: [PATCH 575/700] fix: reject trailing bytes when decoding transactions (#8296) --- crates/primitives/src/transaction/mod.rs | 41 ++++++++++++++++++++---- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index fb5abf08c0ff3..2201b5f0d42a3 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1248,7 +1248,7 @@ impl TransactionSigned { /// Decodes en enveloped EIP-2718 typed transaction. /// - /// This should be used _only_ be used internally in general transaction decoding methods, + /// This should _only_ be used internally in general transaction decoding methods, /// which have already ensured that the input is a typed transaction with the following format: /// `tx-type || rlp(tx-data)` /// @@ -1324,18 +1324,27 @@ impl TransactionSigned { /// /// For EIP-2718 typed transactions, the format is encoded as the type of the transaction /// followed by the rlp of the transaction: `type || rlp(tx-data)`. - pub fn decode_enveloped(data: &mut &[u8]) -> alloy_rlp::Result { - if data.is_empty() { + /// + /// Both for legacy and EIP-2718 transactions, an error will be returned if there is an excess + /// of bytes in input data. + pub fn decode_enveloped(input_data: &mut &[u8]) -> alloy_rlp::Result { + if input_data.is_empty() { return Err(RlpError::InputTooShort) } // Check if the tx is a list - if data[0] >= EMPTY_LIST_CODE { + let output_data = if input_data[0] >= EMPTY_LIST_CODE { // decode as legacy transaction - TransactionSigned::decode_rlp_legacy_transaction(data) + TransactionSigned::decode_rlp_legacy_transaction(input_data)? } else { - TransactionSigned::decode_enveloped_typed_transaction(data) + TransactionSigned::decode_enveloped_typed_transaction(input_data)? + }; + + if !input_data.is_empty() { + return Err(RlpError::UnexpectedLength); } + + Ok(output_data) } /// Returns the length without an RLP header - this is used for eth/68 sizes. @@ -2167,4 +2176,24 @@ mod tests { assert!(res.is_err()); } + + #[test] + fn decode_envelope_fails_on_trailing_bytes_legacy() { + let data = [201, 3, 56, 56, 128, 43, 36, 27, 128, 3, 192]; + + let result = TransactionSigned::decode_enveloped(&mut data.as_ref()); + + assert!(result.is_err()); + assert_eq!(result, Err(RlpError::UnexpectedLength)); + } + + #[test] + fn decode_envelope_fails_on_trailing_bytes_eip2718() { + let data = hex!("02f872018307910d808507204d2cb1827d0094388c818ca8b9251b393131c08a736a67ccb19297880320d04823e2701c80c001a0cf024f4815304df2867a1a74e9d2707b6abda0337d2d54a4438d453f4160f190a07ac0e6b3bc9395b5b9c8b9e6d77204a236577a5b18467b9175c01de4faa208d900"); + + let result = TransactionSigned::decode_enveloped(&mut data.as_ref()); + + assert!(result.is_err()); + assert_eq!(result, Err(RlpError::UnexpectedLength)); + } } From c97963b3547347c4e190998cf0b1a2b1e95c0b3b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 May 2024 13:17:18 +0200 Subject: [PATCH 576/700] fix(op): skip tx root validation for filtered out dup txns (#8316) --- bin/reth/src/commands/import_op.rs | 58 ++--------------------- crates/consensus/common/src/validation.rs | 7 ++- crates/net/downloaders/src/file_client.rs | 8 ++-- crates/primitives/src/lib.rs | 1 + crates/primitives/src/op_mainnet.rs | 52 ++++++++++++++++++++ 5 files changed, 66 insertions(+), 60 deletions(-) create mode 100644 crates/primitives/src/op_mainnet.rs diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 577fc5de3d25c..5576a1077bb03 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -21,7 +21,7 @@ use reth_downloaders::file_client::{ use reth_node_core::init::init_genesis; -use reth_primitives::{hex, stage::StageId, PruneModes, TxHash}; +use reth_primitives::{op_mainnet::is_dup_tx, stage::StageId, PruneModes}; use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -124,8 +124,8 @@ impl ImportOpCommand { total_decoded_txns += file_client.total_transactions(); for (block_number, body) in file_client.bodies_iter_mut() { - body.transactions.retain(|tx| { - if is_duplicate(tx.hash, *block_number) { + body.transactions.retain(|_| { + if is_dup_tx(block_number) { total_filtered_out_dup_txns += 1; return false } @@ -197,55 +197,3 @@ impl ImportOpCommand { Ok(()) } } - -/// A transaction that has been replayed in chain below Bedrock. -#[derive(Debug)] -pub struct ReplayedTx { - tx_hash: TxHash, - original_block: u64, -} - -impl ReplayedTx { - /// Returns a new instance. - pub const fn new(tx_hash: TxHash, original_block: u64) -> Self { - Self { tx_hash, original_block } - } -} - -/// Transaction 0x9ed8..9cb9, first seen in block 985. -pub const TX_BLOCK_985: ReplayedTx = ReplayedTx::new( - TxHash::new(hex!("9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9")), - 985, -); - -/// Transaction 0x86f8..76e5, first seen in block 123 322. -pub const TX_BLOCK_123_322: ReplayedTx = ReplayedTx::new( - TxHash::new(hex!("c033250c5a45f9d104fc28640071a776d146d48403cf5e95ed0015c712e26cb6")), - 123_322, -); - -/// Transaction 0x86f8..76e5, first seen in block 1 133 328. -pub const TX_BLOCK_1_133_328: ReplayedTx = ReplayedTx::new( - TxHash::new(hex!("86f8c77cfa2b439e9b4e92a10f6c17b99fce1220edf4001e4158b57f41c576e5")), - 1_133_328, -); - -/// Transaction 0x3cc2..cd4e, first seen in block 1 244 152. -pub const TX_BLOCK_1_244_152: ReplayedTx = ReplayedTx::new( - TxHash::new(hex!("3cc27e7cc8b7a9380b2b2f6c224ea5ef06ade62a6af564a9dd0bcca92131cd4e")), - 1_244_152, -); - -/// List of original occurrences of all duplicate transactions below Bedrock. -pub const TX_DUP_ORIGINALS: [ReplayedTx; 4] = - [TX_BLOCK_985, TX_BLOCK_123_322, TX_BLOCK_1_133_328, TX_BLOCK_1_244_152]; - -/// Returns `true` if transaction is the second or third appearance of the transaction. -pub fn is_duplicate(tx_hash: TxHash, block_number: u64) -> bool { - for ReplayedTx { tx_hash: dup_tx_hash, original_block } in TX_DUP_ORIGINALS { - if tx_hash == dup_tx_hash && block_number != original_block { - return true - } - } - false -} diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 40c36bfa51c28..8a3d9588e9246 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -6,6 +6,7 @@ use reth_primitives::{ eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, MAXIMUM_EXTRA_DATA_SIZE, }, + op_mainnet::is_dup_tx, ChainSpec, GotExpected, Hardfork, Header, SealedBlock, SealedHeader, }; @@ -73,8 +74,10 @@ pub fn validate_block_standalone( } // Check transaction root - if let Err(error) = block.ensure_transaction_root_valid() { - return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + if !chain_spec.is_optimism_mainnet() || !is_dup_tx(block.number) { + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + } } // EIP-4895: Beacon chain push withdrawals as operations diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 7ce222d57401a..f79b8744fe5b9 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -164,10 +164,12 @@ impl FileClient { } /// Returns a mutable iterator over bodies in the client. - pub fn bodies_iter_mut(&mut self) -> impl Iterator { + /// + /// Panics, if file client headers and bodies are not mapping 1-1. + pub fn bodies_iter_mut(&mut self) -> impl Iterator { let bodies = &mut self.bodies; - let headers = &self.headers; - headers.keys().zip(bodies.values_mut()) + let numbers = &self.hash_to_number; + bodies.iter_mut().map(|(hash, body)| (numbers[hash], body)) } /// Returns the current number of transactions in the client. diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 1dd2562e9d63d..3c57158f1a3c4 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -35,6 +35,7 @@ mod header; mod integer_list; mod log; mod net; +pub mod op_mainnet; pub mod proofs; mod prune; mod receipt; diff --git a/crates/primitives/src/op_mainnet.rs b/crates/primitives/src/op_mainnet.rs new file mode 100644 index 0000000000000..c60504e92b4c3 --- /dev/null +++ b/crates/primitives/src/op_mainnet.rs @@ -0,0 +1,52 @@ +//! Helpers for working with replayed OP mainnet OVM transactions (in blocks below Bedrock). + +/// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, +/// replayed in blocks: +/// +/// 19 022 +/// 45 036 +pub const TX_BLOCK_985: [u64; 2] = [19_022, 45_036]; + +/// Transaction 0xc033250c5a45f9d104fc28640071a776d146d48403cf5e95ed0015c712e26cb6 in block +/// 123 322, replayed in block: +/// +/// 123 542 +pub const TX_BLOCK_123_322: u64 = 123_542; + +/// Transaction 0x86f8c77cfa2b439e9b4e92a10f6c17b99fce1220edf4001e4158b57f41c576e5 in block +/// 1 133 328, replayed in blocks: +/// +/// 1 135 391 +/// 1 144 468 +pub const TX_BLOCK_1_133_328: [u64; 2] = [1_135_391, 1_144_468]; + +/// Transaction 0x3cc27e7cc8b7a9380b2b2f6c224ea5ef06ade62a6af564a9dd0bcca92131cd4e in block +/// 1 244 152, replayed in block: +/// +/// 1 272 994 +pub const TX_BLOCK_1_244_152: u64 = 1_272_994; + +/// The six blocks with replayed transactions. +pub const BLOCK_NUMS_REPLAYED_TX: [u64; 6] = [ + TX_BLOCK_985[0], + TX_BLOCK_985[1], + TX_BLOCK_123_322, + TX_BLOCK_1_133_328[0], + TX_BLOCK_1_133_328[1], + TX_BLOCK_1_244_152, +]; + +/// Returns `true` if transaction is the second or third appearance of the transaction. The blocks +/// with replayed transaction happen to only contain the single transaction. +pub fn is_dup_tx(block_number: u64) -> bool { + if block_number > BLOCK_NUMS_REPLAYED_TX[5] { + return false + } + + // these blocks just have one transaction! + if BLOCK_NUMS_REPLAYED_TX.contains(&block_number) { + return true + } + + false +} From 106d44930788f8c31b79b754ab1fcb9d767ff405 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 21 May 2024 16:24:15 +0200 Subject: [PATCH 577/700] fix: disambiguate use of next when validating ForkId (#8320) --- crates/ethereum-forks/src/forkid.rs | 92 ++++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 8 deletions(-) diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index ee4edb8bdf8f6..b0aba0d5abdc7 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -21,6 +21,7 @@ use std::{ use thiserror::Error; const CRC_32_IEEE: Crc = Crc::::new(&CRC_32_ISO_HDLC); +const TIMESTAMP_BEFORE_ETHEREUM_MAINNET: u64 = 1_300_000_000; /// `CRC32` hash of all previous forks starting from genesis block. #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -313,16 +314,25 @@ impl ForkFilter { return Ok(()) } - // We check if this fork is time-based or block number-based - // NOTE: This is a bit hacky but I'm unsure how else we can figure out when to use - // timestamp vs when to use block number.. - let head_block_or_time = match self.cache.epoch_start { - ForkFilterKey::Block(_) => self.head.number, - ForkFilterKey::Time(_) => self.head.timestamp, + let is_incompatible = if self.head.number < TIMESTAMP_BEFORE_ETHEREUM_MAINNET { + // When the block number is less than an old timestamp before Ethereum mainnet, + // we check if this fork is time-based or block number-based by estimating that, + // if fork_id.next is bigger than the old timestamp, we are dealing with a + // timestamp, otherwise with a block. + (fork_id.next > TIMESTAMP_BEFORE_ETHEREUM_MAINNET && + self.head.timestamp >= fork_id.next) || + (fork_id.next <= TIMESTAMP_BEFORE_ETHEREUM_MAINNET && + self.head.number >= fork_id.next) + } else { + // Extra safety check to future-proof for when Ethereum has over a billion blocks. + let head_block_or_time = match self.cache.epoch_start { + ForkFilterKey::Block(_) => self.head.number, + ForkFilterKey::Time(_) => self.head.timestamp, + }; + head_block_or_time >= fork_id.next }; - //... compare local head to FORK_NEXT. - return if head_block_or_time >= fork_id.next { + return if is_incompatible { // 1a) A remotely announced but remotely not passed block is already passed locally, // disconnect, since the chains are incompatible. Err(ValidationError::LocalIncompatibleOrStale { @@ -588,6 +598,72 @@ mod tests { filter.validate(remote), Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote }) ); + + // Block far in the future (block number bigger than TIMESTAMP_BEFORE_ETHEREUM_MAINNET), not + // compatible. + filter + .set_head(Head { number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 1, ..Default::default() }); + let remote = ForkId { + hash: ForkHash(hex!("668db0af")), + next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 1, + }; + assert_eq!( + filter.validate(remote), + Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote }) + ); + + // Block far in the future (block number bigger than TIMESTAMP_BEFORE_ETHEREUM_MAINNET), + // compatible. + filter + .set_head(Head { number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 1, ..Default::default() }); + let remote = ForkId { + hash: ForkHash(hex!("668db0af")), + next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 2, + }; + assert_eq!(filter.validate(remote), Ok(())); + + // block number smaller than TIMESTAMP_BEFORE_ETHEREUM_MAINNET and + // fork_id.next > TIMESTAMP_BEFORE_ETHEREUM_MAINNET && self.head.timestamp >= fork_id.next, + // not compatible. + filter.set_head(Head { + number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 1, + timestamp: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 2, + ..Default::default() + }); + let remote = ForkId { + hash: ForkHash(hex!("668db0af")), + next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 1, + }; + assert_eq!( + filter.validate(remote), + Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote }) + ); + + // block number smaller than TIMESTAMP_BEFORE_ETHEREUM_MAINNET and + // fork_id.next <= TIMESTAMP_BEFORE_ETHEREUM_MAINNET && self.head.number >= fork_id.next, + // not compatible. + filter + .set_head(Head { number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 1, ..Default::default() }); + let remote = ForkId { + hash: ForkHash(hex!("668db0af")), + next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 2, + }; + assert_eq!( + filter.validate(remote), + Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote }) + ); + + // block number smaller than TIMESTAMP_BEFORE_ETHEREUM_MAINNET and + // !((fork_id.next > TIMESTAMP_BEFORE_ETHEREUM_MAINNET && self.head.timestamp >= + // fork_id.next) || (fork_id.next <= TIMESTAMP_BEFORE_ETHEREUM_MAINNET && self.head.number + // >= fork_id.next)), compatible. + filter + .set_head(Head { number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 2, ..Default::default() }); + let remote = ForkId { + hash: ForkHash(hex!("668db0af")), + next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 1, + }; + assert_eq!(filter.validate(remote), Ok(())); } #[test] From 97156a8e7a98c0bb51ab11d5646c6fa169580541 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 21 May 2024 10:24:31 -0400 Subject: [PATCH 578/700] feat: add actions lint to pull request workflow (#8183) --- .github/workflows/lint-actions.yml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/workflows/lint-actions.yml diff --git a/.github/workflows/lint-actions.yml b/.github/workflows/lint-actions.yml new file mode 100644 index 0000000000000..d60885ad54d49 --- /dev/null +++ b/.github/workflows/lint-actions.yml @@ -0,0 +1,19 @@ +name: Lint GitHub Actions workflows +on: + pull_request: + merge_group: + push: + branches: [main] + +jobs: + actionlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Download actionlint + id: get_actionlint + run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) + shell: bash + - name: Check workflow files + run: SHELLCHECK_OPTS="-S error" ${{ steps.get_actionlint.outputs.executable }} -color + shell: bash From 8a02e6e0e1b3fdeecf658570b421907710a51c2b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 21 May 2024 16:49:13 +0200 Subject: [PATCH 579/700] chore: rm leftover test assets (#8328) --- .../test_data/call_tracer/default.json | 21 ---------- .../test_data/call_tracer/legacy.json | 19 --------- .../test_data/call_tracer/only_top_call.json | 10 ----- .../test_data/call_tracer/with_log.json | 20 --------- .../test_data/default/structlogs_01.json | 1 - .../test_data/pre_state_tracer/default.json | 20 --------- .../test_data/pre_state_tracer/diff_mode.json | 41 ------------------- .../test_data/pre_state_tracer/legacy.json | 25 ----------- 8 files changed, 157 deletions(-) delete mode 100644 crates/rpc/rpc-types/test_data/call_tracer/default.json delete mode 100644 crates/rpc/rpc-types/test_data/call_tracer/legacy.json delete mode 100644 crates/rpc/rpc-types/test_data/call_tracer/only_top_call.json delete mode 100644 crates/rpc/rpc-types/test_data/call_tracer/with_log.json delete mode 100644 crates/rpc/rpc-types/test_data/default/structlogs_01.json delete mode 100644 crates/rpc/rpc-types/test_data/pre_state_tracer/default.json delete mode 100644 crates/rpc/rpc-types/test_data/pre_state_tracer/diff_mode.json delete mode 100644 crates/rpc/rpc-types/test_data/pre_state_tracer/legacy.json diff --git a/crates/rpc/rpc-types/test_data/call_tracer/default.json b/crates/rpc/rpc-types/test_data/call_tracer/default.json deleted file mode 100644 index 553b2a39795da..0000000000000 --- a/crates/rpc/rpc-types/test_data/call_tracer/default.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "calls": [ - { - "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", - "gas": "0x6d05", - "gasUsed": "0x0", - "input": "0x", - "to": "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", - "type": "CALL", - "value": "0x6f05b59d3b20000" - } - ], - "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", - "gas": "0x10738", - "gasUsed": "0x9751", - "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", - "output": "0x0000000000000000000000000000000000000000000000000000000000000001", - "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", - "type": "CALL", - "value": "0x0" -} diff --git a/crates/rpc/rpc-types/test_data/call_tracer/legacy.json b/crates/rpc/rpc-types/test_data/call_tracer/legacy.json deleted file mode 100644 index b89e7ae86c4b4..0000000000000 --- a/crates/rpc/rpc-types/test_data/call_tracer/legacy.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "calls": [ - { - "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", - "input": "0x", - "to": "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", - "type": "CALL", - "value": "0x6f05b59d3b20000" - } - ], - "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", - "gas": "0x10738", - "gasUsed": "0x9751", - "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", - "output": "0x0000000000000000000000000000000000000000000000000000000000000001", - "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", - "type": "CALL", - "value": "0x0" -} diff --git a/crates/rpc/rpc-types/test_data/call_tracer/only_top_call.json b/crates/rpc/rpc-types/test_data/call_tracer/only_top_call.json deleted file mode 100644 index 327bb427874b1..0000000000000 --- a/crates/rpc/rpc-types/test_data/call_tracer/only_top_call.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "from": "0x4f5777744b500616697cb655dcb02ee6cd51deb5", - "gas": "0x2dced", - "gasUsed": "0x1a9e5", - "to": "0x200edd17f30485a8735878661960cd7a9a95733f", - "input": "0xba51a6df0000000000000000000000000000000000000000000000000000000000000000", - "output": "0xba51a6df00000000000000000000000000000000000000000000000000000000", - "value": "0x8ac7230489e80000", - "type": "CALL" -} diff --git a/crates/rpc/rpc-types/test_data/call_tracer/with_log.json b/crates/rpc/rpc-types/test_data/call_tracer/with_log.json deleted file mode 100644 index 2528bbc048489..0000000000000 --- a/crates/rpc/rpc-types/test_data/call_tracer/with_log.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "from": "0xd1220a0cf47c7b9be7a2e6ba89f429762e7b9adb", - "gas": "0x1f36d", - "gasUsed": "0xc6a5", - "to": "0xf4eced2f682ce333f96f2d8966c613ded8fc95dd", - "input": "0xa9059cbb000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb0000000000000000000000000000000000000000000000000000000000989680", - "logs": [ - { - "address": "0xf4eced2f682ce333f96f2d8966c613ded8fc95dd", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x000000000000000000000000d1220a0cf47c7b9be7a2e6ba89f429762e7b9adb", - "0x000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb" - ], - "data": "0x0000000000000000000000000000000000000000000000000000000000989680" - } - ], - "value": "0x0", - "type": "CALL" -} diff --git a/crates/rpc/rpc-types/test_data/default/structlogs_01.json b/crates/rpc/rpc-types/test_data/default/structlogs_01.json deleted file mode 100644 index 1812c5d3e337d..0000000000000 --- a/crates/rpc/rpc-types/test_data/default/structlogs_01.json +++ /dev/null @@ -1 +0,0 @@ -{"structLogs":[{"pc":0,"op":"PUSH1","gas":24595,"gasCost":3,"depth":1,"stack":[],"memory":[]},{"pc":2,"op":"PUSH1","gas":24592,"gasCost":3,"depth":1,"stack":["0x80"],"memory":[]},{"pc":4,"op":"MSTORE","gas":24589,"gasCost":12,"depth":1,"stack":["0x80","0x40"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]},{"pc":5,"op":"CALLVALUE","gas":24577,"gasCost":2,"depth":1,"stack":[],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":6,"op":"DUP1","gas":24575,"gasCost":3,"depth":1,"stack":["0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":7,"op":"ISZERO","gas":24572,"gasCost":3,"depth":1,"stack":["0x0","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8,"op":"PUSH2","gas":24569,"gasCost":3,"depth":1,"stack":["0x0","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":11,"op":"JUMPI","gas":24566,"gasCost":10,"depth":1,"stack":["0x0","0x1","0x10"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":16,"op":"JUMPDEST","gas":24556,"gasCost":1,"depth":1,"stack":["0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":17,"op":"POP","gas":24555,"gasCost":2,"depth":1,"stack":["0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":18,"op":"PUSH1","gas":24553,"gasCost":3,"depth":1,"stack":[],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":20,"op":"CALLDATASIZE","gas":24550,"gasCost":2,"depth":1,"stack":["0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":21,"op":"LT","gas":24548,"gasCost":3,"depth":1,"stack":["0x4","0x44"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":22,"op":"PUSH2","gas":24545,"gasCost":3,"depth":1,"stack":["0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":25,"op":"JUMPI","gas":24542,"gasCost":10,"depth":1,"stack":["0x0","0x1fb"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":26,"op":"PUSH1","gas":24532,"gasCost":3,"depth":1,"stack":[],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":28,"op":"CALLDATALOAD","gas":24529,"gasCost":3,"depth":1,"stack":["0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":29,"op":"PUSH1","gas":24526,"gasCost":3,"depth":1,"stack":["0xa22cb46500000000000000000000000000000000000111abe46ff893f3b2fdf1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":31,"op":"SHR","gas":24523,"gasCost":3,"depth":1,"stack":["0xa22cb46500000000000000000000000000000000000111abe46ff893f3b2fdf1","0xe0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":32,"op":"DUP1","gas":24520,"gasCost":3,"depth":1,"stack":["0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":33,"op":"PUSH4","gas":24517,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":38,"op":"GT","gas":24514,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465","0x6352211e"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":39,"op":"PUSH2","gas":24511,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":42,"op":"JUMPI","gas":24508,"gasCost":10,"depth":1,"stack":["0xa22cb465","0x0","0x11a"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":43,"op":"DUP1","gas":24498,"gasCost":3,"depth":1,"stack":["0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":44,"op":"PUSH4","gas":24495,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":49,"op":"GT","gas":24492,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465","0x95d89b41"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":50,"op":"PUSH2","gas":24489,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":53,"op":"JUMPI","gas":24486,"gasCost":10,"depth":1,"stack":["0xa22cb465","0x0","0xad"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":54,"op":"DUP1","gas":24476,"gasCost":3,"depth":1,"stack":["0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":55,"op":"PUSH4","gas":24473,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":60,"op":"GT","gas":24470,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465","0xdb006a75"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":61,"op":"PUSH2","gas":24467,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":64,"op":"JUMPI","gas":24464,"gasCost":10,"depth":1,"stack":["0xa22cb465","0x1","0x7c"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":124,"op":"JUMPDEST","gas":24454,"gasCost":1,"depth":1,"stack":["0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":125,"op":"DUP1","gas":24453,"gasCost":3,"depth":1,"stack":["0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":126,"op":"PUSH4","gas":24450,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":131,"op":"EQ","gas":24447,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465","0x95d89b41"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":132,"op":"PUSH2","gas":24444,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":135,"op":"JUMPI","gas":24441,"gasCost":10,"depth":1,"stack":["0xa22cb465","0x0","0x3c7"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":136,"op":"DUP1","gas":24431,"gasCost":3,"depth":1,"stack":["0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":137,"op":"PUSH4","gas":24428,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":142,"op":"EQ","gas":24425,"gasCost":3,"depth":1,"stack":["0xa22cb465","0xa22cb465","0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":143,"op":"PUSH2","gas":24422,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":146,"op":"JUMPI","gas":24419,"gasCost":10,"depth":1,"stack":["0xa22cb465","0x1","0x3cf"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":975,"op":"JUMPDEST","gas":24409,"gasCost":1,"depth":1,"stack":["0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":976,"op":"PUSH2","gas":24408,"gasCost":3,"depth":1,"stack":["0xa22cb465"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":979,"op":"PUSH2","gas":24405,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":982,"op":"CALLDATASIZE","gas":24402,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":983,"op":"PUSH1","gas":24400,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":985,"op":"PUSH2","gas":24397,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":988,"op":"JUMP","gas":24394,"gasCost":8,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x2231"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8753,"op":"JUMPDEST","gas":24386,"gasCost":1,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8754,"op":"PUSH1","gas":24385,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8756,"op":"DUP1","gas":24382,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8757,"op":"PUSH1","gas":24379,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8759,"op":"DUP4","gas":24376,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x40"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8760,"op":"DUP6","gas":24373,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x40","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8761,"op":"SUB","gas":24370,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x40","0x4","0x44"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8762,"op":"SLT","gas":24367,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x40","0x40"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8763,"op":"ISZERO","gas":24364,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8764,"op":"PUSH2","gas":24361,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8767,"op":"JUMPI","gas":24358,"gasCost":10,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x1","0x2243"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8771,"op":"JUMPDEST","gas":24348,"gasCost":1,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8772,"op":"PUSH2","gas":24347,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8775,"op":"DUP4","gas":24344,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8776,"op":"PUSH2","gas":24341,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8779,"op":"JUMP","gas":24338,"gasCost":8,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x211a"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8474,"op":"JUMPDEST","gas":24330,"gasCost":1,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8475,"op":"DUP1","gas":24329,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8476,"op":"CALLDATALOAD","gas":24326,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8477,"op":"PUSH1","gas":24323,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8479,"op":"PUSH1","gas":24320,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8481,"op":"PUSH1","gas":24317,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8483,"op":"SHL","gas":24314,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1","0xa0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8484,"op":"SUB","gas":24311,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x10000000000000000000000000000000000000000"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8485,"op":"DUP2","gas":24308,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0xffffffffffffffffffffffffffffffffffffffff"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8486,"op":"AND","gas":24305,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0xffffffffffffffffffffffffffffffffffffffff","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8487,"op":"DUP2","gas":24302,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8488,"op":"EQ","gas":24299,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x111abe46ff893f3b2fdf1f759a8a8","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8489,"op":"PUSH2","gas":24296,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8492,"op":"JUMPI","gas":24293,"gasCost":10,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x18b8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":6328,"op":"JUMPDEST","gas":24283,"gasCost":1,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":6329,"op":"SWAP2","gas":24282,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x224c","0x4","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":6330,"op":"SWAP1","gas":24279,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x111abe46ff893f3b2fdf1f759a8a8","0x4","0x224c"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":6331,"op":"POP","gas":24276,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x111abe46ff893f3b2fdf1f759a8a8","0x224c","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":6332,"op":"JUMP","gas":24274,"gasCost":8,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x111abe46ff893f3b2fdf1f759a8a8","0x224c"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8780,"op":"JUMPDEST","gas":24266,"gasCost":1,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8781,"op":"SWAP2","gas":24265,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x0","0x0","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8782,"op":"POP","gas":24262,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8783,"op":"PUSH1","gas":24260,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8785,"op":"DUP4","gas":24257,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x20"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8786,"op":"ADD","gas":24254,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x20","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8787,"op":"CALLDATALOAD","gas":24251,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x24"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8788,"op":"DUP1","gas":24248,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8789,"op":"ISZERO","gas":24245,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8790,"op":"ISZERO","gas":24242,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8791,"op":"DUP2","gas":24239,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8792,"op":"EQ","gas":24236,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8793,"op":"PUSH2","gas":24233,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8796,"op":"JUMPI","gas":24230,"gasCost":10,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1","0x1","0x2260"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8800,"op":"JUMPDEST","gas":24220,"gasCost":1,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8801,"op":"DUP1","gas":24219,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8802,"op":"SWAP2","gas":24216,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x0","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8803,"op":"POP","gas":24213,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8804,"op":"POP","gas":24211,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8805,"op":"SWAP3","gas":24209,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x44","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8806,"op":"POP","gas":24206,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x1","0x4","0x111abe46ff893f3b2fdf1f759a8a8","0x44"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8807,"op":"SWAP3","gas":24204,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x3dd","0x1","0x4","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8808,"op":"SWAP1","gas":24201,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x4","0x3dd"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8809,"op":"POP","gas":24198,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x3dd","0x4"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":8810,"op":"JUMP","gas":24196,"gasCost":8,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x3dd"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":989,"op":"JUMPDEST","gas":24188,"gasCost":1,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":990,"op":"PUSH2","gas":24187,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":993,"op":"JUMP","gas":24184,"gasCost":8,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xc94"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3220,"op":"JUMPDEST","gas":24176,"gasCost":1,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3221,"op":"PUSH1","gas":24175,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3223,"op":"PUSH1","gas":24172,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3225,"op":"PUSH1","gas":24169,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3227,"op":"SHL","gas":24166,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1","0x1","0xa0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3228,"op":"SUB","gas":24163,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1","0x10000000000000000000000000000000000000000"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3229,"op":"DUP3","gas":24160,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xffffffffffffffffffffffffffffffffffffffff"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3230,"op":"AND","gas":24157,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xffffffffffffffffffffffffffffffffffffffff","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3231,"op":"CALLER","gas":24154,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3232,"op":"EQ","gas":24152,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3233,"op":"ISZERO","gas":24149,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3234,"op":"PUSH2","gas":24146,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3237,"op":"JUMPI","gas":24143,"gasCost":10,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x1","0xced"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3309,"op":"JUMPDEST","gas":24133,"gasCost":1,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3310,"op":"CALLER","gas":24132,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3311,"op":"PUSH1","gas":24130,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3313,"op":"DUP2","gas":24127,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3314,"op":"DUP2","gas":24124,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3315,"op":"MSTORE","gas":24121,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0"],"memory":["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3316,"op":"PUSH1","gas":24118,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3318,"op":"PUSH1","gas":24115,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x5"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3320,"op":"SWAP1","gas":24112,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x5","0x20"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3321,"op":"DUP2","gas":24109,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x5"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3322,"op":"MSTORE","gas":24106,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x5","0x20"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3323,"op":"PUSH1","gas":24103,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3325,"op":"DUP1","gas":24100,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3326,"op":"DUP4","gas":24097,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x40"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3327,"op":"KECCAK256","gas":24094,"gasCost":42,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x40","0x0"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3328,"op":"PUSH1","gas":24052,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3330,"op":"PUSH1","gas":24049,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0x1"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3332,"op":"PUSH1","gas":24046,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0x1","0x1"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3334,"op":"SHL","gas":24043,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0x1","0x1","0xa0"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3335,"op":"SUB","gas":24040,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0x1","0x10000000000000000000000000000000000000000"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3336,"op":"DUP8","gas":24037,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0xffffffffffffffffffffffffffffffffffffffff"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3337,"op":"AND","gas":24034,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0xffffffffffffffffffffffffffffffffffffffff","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3338,"op":"DUP1","gas":24031,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3339,"op":"DUP6","gas":24028,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0x111abe46ff893f3b2fdf1f759a8a8","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3340,"op":"MSTORE","gas":24025,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0x111abe46ff893f3b2fdf1f759a8a8","0x111abe46ff893f3b2fdf1f759a8a8","0x0"],"memory":["000000000000000000000000a7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3341,"op":"SWAP1","gas":24022,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3342,"op":"DUP4","gas":24019,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x111abe46ff893f3b2fdf1f759a8a8","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3343,"op":"MSTORE","gas":24016,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x111abe46ff893f3b2fdf1f759a8a8","0x7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0x20"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","0000000000000000000000000000000000000000000000000000000000000005","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3344,"op":"SWAP3","gas":24013,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x0","0x20","0x40","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3345,"op":"DUP2","gas":24010,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x0"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3346,"op":"SWAP1","gas":24007,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x0","0x40"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3347,"op":"KECCAK256","gas":24004,"gasCost":42,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x40","0x0"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3348,"op":"DUP1","gas":23962,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3349,"op":"SLOAD","gas":23959,"gasCost":2100,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"],"storage":{"6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a":"0000000000000000000000000000000000000000000000000000000000000000"}},{"pc":3350,"op":"PUSH1","gas":21859,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x0"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3352,"op":"NOT","gas":21856,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x0","0xff"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3353,"op":"AND","gas":21853,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x0","0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3354,"op":"DUP7","gas":21850,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x0"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3355,"op":"ISZERO","gas":21847,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x0","0x1"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3356,"op":"ISZERO","gas":21844,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x0","0x0"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3357,"op":"SWAP1","gas":21841,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x0","0x1"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3358,"op":"DUP2","gas":21838,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x1","0x0"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3359,"op":"OR","gas":21835,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x1","0x0","0x1"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3360,"op":"SWAP1","gas":21832,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x1","0x1"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3361,"op":"SWAP2","gas":21829,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a","0x1","0x1"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3362,"op":"SSTORE","gas":21826,"gasCost":20000,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x1","0x1","0x6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"],"storage":{"6693dabf5ec7ab1a0d1c5bc58451f85d5e44d504c9ffeb75799bfdb61aa2997a":"0000000000000000000000000000000000000000000000000000000000000001"}},{"pc":3363,"op":"SWAP1","gas":1826,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x40","0x1"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3364,"op":"MLOAD","gas":1823,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x1","0x40"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3365,"op":"SWAP1","gas":1820,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x1","0x80"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3366,"op":"DUP2","gas":1817,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x80","0x1"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080"]},{"pc":3367,"op":"MSTORE","gas":1814,"gasCost":9,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x80","0x1","0x80"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]},{"pc":3368,"op":"SWAP2","gas":1805,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x111abe46ff893f3b2fdf1f759a8a8","0x20","0x80"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3369,"op":"SWAP3","gas":1802,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x80","0x20","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3370,"op":"SWAP2","gas":1799,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0x80","0x20","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3371,"op":"PUSH32","gas":1796,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x20","0x80"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3404,"op":"SWAP2","gas":1793,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x20","0x80","0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3405,"op":"ADD","gas":1790,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31","0x80","0x20"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3406,"op":"PUSH1","gas":1787,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31","0xa0"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3408,"op":"MLOAD","gas":1784,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31","0xa0","0x40"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3409,"op":"DUP1","gas":1781,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31","0xa0","0x80"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3410,"op":"SWAP2","gas":1778,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31","0xa0","0x80","0x80"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3411,"op":"SUB","gas":1775,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31","0x80","0x80","0xa0"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3412,"op":"SWAP1","gas":1772,"gasCost":3,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31","0x80","0x20"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3413,"op":"LOG3","gas":1769,"gasCost":1756,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1","0x111abe46ff893f3b2fdf1f759a8a8","0xa7194f8a5f509ed2c95ade0b4efb6940a45d7a11","0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31","0x20","0x80"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3414,"op":"POP","gas":13,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8","0x1"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3415,"op":"POP","gas":11,"gasCost":2,"depth":1,"stack":["0xa22cb465","0x27b","0x111abe46ff893f3b2fdf1f759a8a8"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":3416,"op":"JUMP","gas":9,"gasCost":8,"depth":1,"stack":["0xa22cb465","0x27b"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":635,"op":"JUMPDEST","gas":1,"gasCost":1,"depth":1,"stack":["0xa22cb465"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]},{"pc":636,"op":"STOP","gas":0,"gasCost":0,"depth":1,"stack":["0xa22cb465"],"memory":["00000000000000000000000000000000000111abe46ff893f3b2fdf1f759a8a8","7d3429278e27616819652c726b56f6b8ffeea2d2c23cf663064312a58b0422d2","0000000000000000000000000000000000000000000000000000000000000080","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000001"]}],"gas":46107,"failed":false,"returnValue":""} \ No newline at end of file diff --git a/crates/rpc/rpc-types/test_data/pre_state_tracer/default.json b/crates/rpc/rpc-types/test_data/pre_state_tracer/default.json deleted file mode 100644 index 43e69b11bdb47..0000000000000 --- a/crates/rpc/rpc-types/test_data/pre_state_tracer/default.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "0x082d4cdf07f386ffa9258f52a5c49db4ac321ec6": { - "balance": "0xc820f93200f4000", - "nonce": 94 - }, - "0x332b656504f4eabb44c8617a42af37461a34e9dc": { - "balance": "0x11faea4f35e5af80000", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - }, - "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5": { - "balance": "0xbf681825be002ac452", - "nonce": 28922 - }, - "0x82effbaaaf28614e55b2ba440fb198e0e5789b0f": { - "balance": "0xb3d0ac5cb94df6f6b0", - "nonce": 1 - } -} diff --git a/crates/rpc/rpc-types/test_data/pre_state_tracer/diff_mode.json b/crates/rpc/rpc-types/test_data/pre_state_tracer/diff_mode.json deleted file mode 100644 index 0654d26f546ec..0000000000000 --- a/crates/rpc/rpc-types/test_data/pre_state_tracer/diff_mode.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "pre": { - "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { - "balance": "0x0", - "nonce": 22 - }, - "0x1585936b53834b021f68cc13eeefdec2efc8e724": { - "balance": "0x0" - }, - "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { - "balance": "0x4d87094125a369d9bd5", - "nonce": 1, - "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" - } - }, - "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { - "balance": "0x1780d77678137ac1b775", - "nonce": 29072 - } - }, - "post": { - "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { - "balance": "0x6f05b59d3b20000" - }, - "0x1585936b53834b021f68cc13eeefdec2efc8e724": { - "balance": "0x420eed1bd6c00" - }, - "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { - "balance": "0x4d869a3b70062eb9bd5", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b95e" - } - }, - "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { - "balance": "0x1780d7725724a9044b75", - "nonce": 29073 - } - } -} diff --git a/crates/rpc/rpc-types/test_data/pre_state_tracer/legacy.json b/crates/rpc/rpc-types/test_data/pre_state_tracer/legacy.json deleted file mode 100644 index dbefb198c4067..0000000000000 --- a/crates/rpc/rpc-types/test_data/pre_state_tracer/legacy.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { - "balance": "0x0", - "code": "0x", - "nonce": 22, - "storage": {} - }, - "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { - "balance": "0x4d87094125a369d9bd5", - "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", - "nonce": 1, - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", - "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", - "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" - } - }, - "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { - "balance": "0x1780d77678137ac1b775", - "code": "0x", - "nonce": 29072, - "storage": {} - } -} From e089c5c37d3ca06da5461fc3b2bc1d3f8b5fdcb7 Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 22 May 2024 00:32:09 +0800 Subject: [PATCH 580/700] docs(book): trim any white space at the line right ending (#8242) Signed-off-by: jsvisa --- Makefile | 2 +- book/SUMMARY.md | 1 + book/cli/SUMMARY.md | 1 + book/cli/help.py | 34 +++- book/cli/reth.md | 59 +++--- book/cli/reth/config.md | 32 +-- book/cli/reth/db.md | 40 ++-- book/cli/reth/db/checksum.md | 38 ++-- book/cli/reth/db/clear.md | 38 ++-- book/cli/reth/db/clear/mdbx.md | 40 ++-- book/cli/reth/db/clear/static-file.md | 38 ++-- book/cli/reth/db/create-static-files.md | 46 ++--- book/cli/reth/db/diff.md | 40 ++-- book/cli/reth/db/drop.md | 38 ++-- book/cli/reth/db/get.md | 38 ++-- book/cli/reth/db/get/mdbx.md | 40 ++-- book/cli/reth/db/get/static-file.md | 38 ++-- book/cli/reth/db/list.md | 50 ++--- book/cli/reth/db/path.md | 38 ++-- book/cli/reth/db/stats.md | 42 ++-- book/cli/reth/db/version.md | 38 ++-- book/cli/reth/debug.md | 32 +-- book/cli/reth/dump-genesis.md | 32 +-- book/cli/reth/import-receipts.md | 148 ++++++++++++++ book/cli/reth/import.md | 42 ++-- book/cli/reth/init-state.md | 81 ++++---- book/cli/reth/init.md | 40 ++-- book/cli/reth/node.md | 208 +++++++++++--------- book/cli/reth/p2p.md | 166 +++++++++++----- book/cli/reth/p2p/body.md | 28 +-- book/cli/reth/p2p/header.md | 28 +-- book/cli/reth/recover.md | 32 +-- book/cli/reth/recover/storage-tries.md | 40 ++-- book/cli/reth/stage.md | 32 +-- book/cli/reth/stage/drop.md | 40 ++-- book/cli/reth/stage/dump.md | 40 ++-- book/cli/reth/stage/dump/account-hashing.md | 28 +-- book/cli/reth/stage/dump/execution.md | 28 +-- book/cli/reth/stage/dump/merkle.md | 28 +-- book/cli/reth/stage/dump/storage-hashing.md | 28 +-- book/cli/reth/stage/run.md | 110 ++++++----- book/cli/reth/stage/unwind.md | 104 +++++----- book/cli/reth/stage/unwind/num-blocks.md | 40 ++-- book/cli/reth/stage/unwind/to-block.md | 40 ++-- book/cli/reth/test-vectors.md | 32 +-- book/cli/reth/test-vectors/tables.md | 32 +-- 46 files changed, 1236 insertions(+), 954 deletions(-) create mode 100644 book/cli/reth/import-receipts.md diff --git a/Makefile b/Makefile index a6a385a133145..bfa56011c1a0c 100644 --- a/Makefile +++ b/Makefile @@ -472,5 +472,5 @@ cfg-check: pr: make cfg-check && \ make lint && \ - make docs && \ + make update-book-cli && \ make test diff --git a/book/SUMMARY.md b/book/SUMMARY.md index fc6deb28295aa..eaa7210cff118 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -32,6 +32,7 @@ - [`reth init`](./cli/reth/init.md) - [`reth init-state`](./cli/reth/init-state.md) - [`reth import`](./cli/reth/import.md) + - [`reth import-receipts`](./cli/reth/import-receipts.md) - [`reth dump-genesis`](./cli/reth/dump-genesis.md) - [`reth db`](./cli/reth/db.md) - [`reth db stats`](./cli/reth/db/stats.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index ee3d714b2bb5f..8c8ea2f42ccb2 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -3,6 +3,7 @@ - [`reth init`](./reth/init.md) - [`reth init-state`](./reth/init-state.md) - [`reth import`](./reth/import.md) + - [`reth import-receipts`](./reth/import-receipts.md) - [`reth dump-genesis`](./reth/dump-genesis.md) - [`reth db`](./reth/db.md) - [`reth db stats`](./reth/db/stats.md) diff --git a/book/cli/help.py b/book/cli/help.py index bc584136ac94b..26ce5e69198e3 100755 --- a/book/cli/help.py +++ b/book/cli/help.py @@ -23,6 +23,12 @@ """ +def write_file(file_path, content): + content = "\n".join([line.rstrip() for line in content.split("\n")]) + with open(file_path, "w") as f: + f.write(content) + + def main(): args = parse_args(sys.argv[1:]) for cmd in args.commands: @@ -65,13 +71,11 @@ def main(): root_summary += cmd_summary(root_path, cmd, obj, args.root_indentation) root_summary += "\n" - with open(path.join(args.out_dir, "SUMMARY.md"), "w") as f: - f.write(summary) + write_file(path.join(args.out_dir, "SUMMARY.md"), summary) # Generate README.md. if args.readme: - with open(path.join(args.out_dir, "README.md"), "w") as f: - f.write(README) + write_file(path.join(args.out_dir, "README.md"), README) if args.root_summary: update_root_summary(args.root_dir, root_summary) @@ -162,8 +166,7 @@ def rec(cmd: list[str], obj: object): for arg in cmd: out_path = path.join(out_path, arg) makedirs(path.dirname(out_path), exist_ok=True) - with open(f"{out_path}.md", "w") as f: - f.write(out) + write_file(f"{out_path}.md", out) for k, v in obj.items(): if k == HELP_KEY: @@ -250,14 +253,27 @@ def command_name(cmd: str): """Returns the name of a command.""" return cmd.split("/")[-1] + def preprocess_help(s: str): """Preprocesses the help output of a command.""" # Remove the user-specific paths. - s = re.sub(r"default: /.*/reth", "default: ", s) + s = re.sub( + r"default: /.*/reth", + "default: ", + s, + ) # Remove the commit SHA and target architecture triple - s = re.sub(r"default: reth/.*-[0-9A-Fa-f]{6,10}/\w+-\w*-\w+", "default: reth/-/", s) + s = re.sub( + r"default: reth/.*-[0-9A-Fa-f]{6,10}/\w+-\w*-\w+", + "default: reth/-/", + s, + ) # Remove the OS - s = re.sub(r"default: reth/.*/\w+", "default: reth//", s) + s = re.sub( + r"default: reth/.*/\w+", + "default: reth//", + s, + ) return s diff --git a/book/cli/reth.md b/book/cli/reth.md index 8b6f757c936b2..2e3ebef31d75b 100644 --- a/book/cli/reth.md +++ b/book/cli/reth.md @@ -7,39 +7,40 @@ $ reth --help Usage: reth [OPTIONS] Commands: - node Start the node - init Initialize the database from a genesis file - init-state Initialize the database from a state dump file - import This syncs RLP encoded blocks from a file - dump-genesis Dumps genesis block JSON configuration to stdout - db Database debugging utilities - stage Manipulate individual stages - p2p P2P Debugging utilities - test-vectors Generate Test Vectors - config Write config to stdout - debug Various debug routines - recover Scripts for node recovery - help Print this message or the help of the given subcommand(s) + node Start the node + init Initialize the database from a genesis file + init-state Initialize the database from a state dump file + import This syncs RLP encoded blocks from a file + import-receipts This imports RLP encoded receipts from a file + dump-genesis Dumps genesis block JSON configuration to stdout + db Database debugging utilities + stage Manipulate individual stages + p2p P2P Debugging utilities + test-vectors Generate Test Vectors + config Write config to stdout + debug Various debug routines + recover Scripts for node recovery + help Print this message or the help of the given subcommand(s) Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -51,7 +52,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -61,12 +62,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -76,22 +77,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -99,12 +100,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -115,7 +116,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/config.md b/book/cli/reth/config.md index eba35a9fa413d..72c195e4ac89e 100644 --- a/book/cli/reth/config.md +++ b/book/cli/reth/config.md @@ -16,21 +16,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -39,7 +39,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -49,12 +49,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -64,22 +64,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -87,12 +87,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -103,7 +103,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index bd5989d7f34c8..f7ce7389ed2c2 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -22,33 +22,33 @@ Commands: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -70,13 +70,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -86,12 +86,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -101,22 +101,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -124,12 +124,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -140,7 +140,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/checksum.md b/book/cli/reth/db/checksum.md index 6f080c74ba890..a8147b04a4d12 100644 --- a/book/cli/reth/db/checksum.md +++ b/book/cli/reth/db/checksum.md @@ -13,33 +13,33 @@ Arguments: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -48,7 +48,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -58,12 +58,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -73,22 +73,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -96,12 +96,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -112,7 +112,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/clear.md b/book/cli/reth/db/clear.md index f69e29b606226..aefceb94db66b 100644 --- a/book/cli/reth/db/clear.md +++ b/book/cli/reth/db/clear.md @@ -14,33 +14,33 @@ Commands: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -49,7 +49,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -59,12 +59,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -74,22 +74,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -97,12 +97,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -113,7 +113,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/clear/mdbx.md b/book/cli/reth/db/clear/mdbx.md index e16697d395a46..5befebf642d0b 100644 --- a/book/cli/reth/db/clear/mdbx.md +++ b/book/cli/reth/db/clear/mdbx.md @@ -8,38 +8,38 @@ Usage: reth db clear mdbx [OPTIONS]
Arguments:
- + Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -48,7 +48,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -58,12 +58,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -73,22 +73,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -96,12 +96,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -112,7 +112,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/clear/static-file.md b/book/cli/reth/db/clear/static-file.md index c41158b7af5b4..18ec5b5ca548d 100644 --- a/book/cli/reth/db/clear/static-file.md +++ b/book/cli/reth/db/clear/static-file.md @@ -16,33 +16,33 @@ Arguments: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -51,7 +51,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -61,12 +61,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -76,22 +76,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -99,12 +99,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -115,7 +115,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/create-static-files.md b/book/cli/reth/db/create-static-files.md index 01094f925d81f..1e69229491c08 100644 --- a/book/cli/reth/db/create-static-files.md +++ b/book/cli/reth/db/create-static-files.md @@ -18,37 +18,37 @@ Arguments: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] -f, --from Starting block for the static file - + [default: 0] -b, --block-interval Number of blocks in the static file - + [default: 500000] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] -p, --parallel Sets the number of static files built in parallel. Note: Each parallel build is memory-intensive - + [default: 1] --only-stats @@ -62,7 +62,7 @@ Options: -c, --compression Compression algorithms to use - + [default: uncompressed] Possible values: @@ -83,13 +83,13 @@ Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -98,7 +98,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -108,12 +108,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -123,22 +123,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -146,12 +146,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -162,7 +162,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index 3c0bd56413615..898e05db33578 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -9,13 +9,13 @@ Usage: reth db diff [OPTIONS] --secondary-datadir --output < Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --secondary-datadir @@ -24,21 +24,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -60,7 +60,7 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] --table
@@ -72,7 +72,7 @@ Database: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -82,12 +82,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -97,22 +97,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -120,12 +120,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -136,7 +136,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/drop.md b/book/cli/reth/db/drop.md index 080ea25696f71..25facee0306a6 100644 --- a/book/cli/reth/db/drop.md +++ b/book/cli/reth/db/drop.md @@ -9,13 +9,13 @@ Usage: reth db drop [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] -f, --force @@ -24,21 +24,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -47,7 +47,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -57,12 +57,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -72,22 +72,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -95,12 +95,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -111,7 +111,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/get.md b/book/cli/reth/db/get.md index de2f83b56c37f..366ab792dcba2 100644 --- a/book/cli/reth/db/get.md +++ b/book/cli/reth/db/get.md @@ -14,33 +14,33 @@ Commands: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -49,7 +49,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -59,12 +59,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -74,22 +74,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -97,12 +97,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -113,7 +113,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/get/mdbx.md b/book/cli/reth/db/get/mdbx.md index bf6f0749463d9..5b2ce0b0f7e50 100644 --- a/book/cli/reth/db/get/mdbx.md +++ b/book/cli/reth/db/get/mdbx.md @@ -8,7 +8,7 @@ Usage: reth db get mdbx [OPTIONS]
[SUBKEY] Arguments:
- + The key to get content for @@ -19,13 +19,13 @@ Arguments: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --raw @@ -34,21 +34,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -57,7 +57,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -67,12 +67,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -82,22 +82,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -105,12 +105,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -121,7 +121,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/get/static-file.md b/book/cli/reth/db/get/static-file.md index a6addeffb8f3f..8381d46972f39 100644 --- a/book/cli/reth/db/get/static-file.md +++ b/book/cli/reth/db/get/static-file.md @@ -19,13 +19,13 @@ Arguments: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --raw @@ -34,21 +34,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -57,7 +57,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -67,12 +67,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -82,22 +82,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -105,12 +105,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -121,7 +121,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/list.md b/book/cli/reth/db/list.md index a7d88eb51aab5..130552420340b 100644 --- a/book/cli/reth/db/list.md +++ b/book/cli/reth/db/list.md @@ -13,27 +13,27 @@ Arguments: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] -s, --skip Skip first N entries - + [default: 0] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] -r, --reverse @@ -41,27 +41,27 @@ Options: -l, --len How many items to take from the walker - + [default: 5] --search Search parameter for both keys and values. Prefix it with `0x` to search for binary data, and text otherwise. - + ATTENTION! For compressed tables (`Transactions` and `Receipts`), there might be missing results since the search uses the raw uncompressed value from the database. --min-row-size Minimum size of row in bytes - + [default: 0] --min-key-size Minimum size of key in bytes - + [default: 0] --min-value-size Minimum size of value in bytes - + [default: 0] -c, --count @@ -75,13 +75,13 @@ Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -90,7 +90,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -100,12 +100,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -115,22 +115,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -138,12 +138,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -154,7 +154,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/path.md b/book/cli/reth/db/path.md index dc8733ef1013f..0c65fe03abc33 100644 --- a/book/cli/reth/db/path.md +++ b/book/cli/reth/db/path.md @@ -9,33 +9,33 @@ Usage: reth db path [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -44,7 +44,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -54,12 +54,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -69,22 +69,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -92,12 +92,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -108,7 +108,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/stats.md b/book/cli/reth/db/stats.md index 437c10bd0a1c3..ab130217a6536 100644 --- a/book/cli/reth/db/stats.md +++ b/book/cli/reth/db/stats.md @@ -9,13 +9,13 @@ Usage: reth db stats [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --detailed-sizes @@ -24,10 +24,10 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --detailed-segments @@ -35,20 +35,20 @@ Options: --checksum Show a checksum of each table in the database. - + WARNING: this option will take a long time to run, as it needs to traverse and hash the entire database. - + For individual table checksums, use the `reth db checksum` command. --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -57,7 +57,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -67,12 +67,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -82,22 +82,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -105,12 +105,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -121,7 +121,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/version.md b/book/cli/reth/db/version.md index 546b0a93ba12a..57d9df550883f 100644 --- a/book/cli/reth/db/version.md +++ b/book/cli/reth/db/version.md @@ -9,33 +9,33 @@ Usage: reth db version [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -44,7 +44,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -54,12 +54,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -69,22 +69,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -92,12 +92,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -108,7 +108,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/debug.md b/book/cli/reth/debug.md index 54e38dbecc6c3..5ce34dba30f10 100644 --- a/book/cli/reth/debug.md +++ b/book/cli/reth/debug.md @@ -18,21 +18,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -41,7 +41,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -51,12 +51,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -66,22 +66,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -89,12 +89,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -105,7 +105,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/dump-genesis.md b/book/cli/reth/dump-genesis.md index a1bf2817053ce..74966a5e5e3f5 100644 --- a/book/cli/reth/dump-genesis.md +++ b/book/cli/reth/dump-genesis.md @@ -10,21 +10,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -33,7 +33,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -43,12 +43,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -58,22 +58,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -81,12 +81,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -97,7 +97,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/import-receipts.md b/book/cli/reth/import-receipts.md new file mode 100644 index 0000000000000..7cea21d791956 --- /dev/null +++ b/book/cli/reth/import-receipts.md @@ -0,0 +1,148 @@ +# reth import-receipts + +This imports RLP encoded receipts from a file + +```bash +$ reth import-receipts --help +Usage: reth import-receipts [OPTIONS] + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, goerli, holesky, dev + + [default: mainnet] + + --chunk-len + Chunk byte length. + + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + + The path to a receipts file for import. File must use `HackReceiptCodec` (used for + exporting OP chain segment below Bedrock block via testinprod/op-geth). + + + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 411527f9e84ba..8493a88f23829 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -12,22 +12,22 @@ Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --no-state @@ -38,13 +38,13 @@ Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -66,19 +66,19 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] The path to a block file for import. - + The online stages (headers and bodies) are replaced by a file import, after which the remaining stages are executed. Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -88,12 +88,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -103,22 +103,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -126,12 +126,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -142,7 +142,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index 0254a43f58c47..e5f3f1a8d6860 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -4,57 +4,58 @@ Initialize the database from a state dump file ```bash $ reth init-state --help -Usage: reth init-state [OPTIONS] +Usage: reth init-state [OPTIONS] + +Arguments: + + JSONL file with state dump. + + Must contain accounts in following format, additional account fields are ignored. Must + also contain { "root": \ } as first line. + { + "balance": "\", + "nonce": \, + "code": "\", + "storage": { + "\": "\", + .. + }, + "address": "\", + } + + Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + and including the non-genesis block to init chain at. See 'import' command. Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - - [default: mainnet] - --state - JSONL file with state dump. - - Must contain accounts in following format, additional account fields are ignored. Can - also contain { "root": \ } as first line. - { - "balance": "\", - "nonce": \, - "code": "\", - "storage": { - "\": "\", - .. - }, - "address": "\", - } - - Allows init at a non-genesis block. Caution! Blocks must be manually imported up until - and including the non-genesis block to init chain at. See 'import' command. + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -76,13 +77,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -92,12 +93,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -107,22 +108,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -130,12 +131,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -146,7 +147,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index fc20da02be7e3..f9e825d4ee6af 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -9,33 +9,33 @@ Usage: reth init [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -57,13 +57,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -73,12 +73,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -88,22 +88,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -111,12 +111,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -127,7 +127,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index edf0993d7a3bb..c73b7dd32e6e4 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -9,13 +9,13 @@ Usage: reth node [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --config @@ -24,26 +24,26 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] --with-unused-ports Sets all ports to unused, allowing the OS to choose random unused ports when sockets are bound. - + Mutually exclusive with `--instance`. -h, --help @@ -52,7 +52,7 @@ Options: Metrics: --metrics Enable Prometheus metrics. - + The metrics will be served at the given interface and port. Networking: @@ -70,50 +70,56 @@ Networking: --discovery.addr The UDP address to use for devp2p peer discovery version 4 - + [default: 0.0.0.0] --discovery.port The UDP port to use for devp2p peer discovery version 4 - + [default: 30303] --discovery.v5.addr - The UDP address to use for devp2p peer discovery version 5 - - [default: 0.0.0.0] + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv6 --discovery.v5.port - The UDP port to use for devp2p peer discovery version 5 - + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discv5.addr` is set + + [default: 9000] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discv5.addr.ipv6` is set + [default: 9000] --discovery.v5.lookup-interval The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - + [default: 60] --discovery.v5.bootstrap.lookup-interval The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - + [default: 5] --discovery.v5.bootstrap.lookup-countdown The number of times to carry out boost lookup queries at bootstrap - + [default: 100] --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. - + --trusted-peers enode://abcd@192.168.0.1:30303 --trusted-only - Connect only to trusted peers + Connect to or accept from trusted peers only --bootnodes Comma separated enode URLs for P2P discovery bootstrap. - + Will fall back to a network-specific default if not specified. --peers-file @@ -122,12 +128,12 @@ Networking: --identity Custom node identity - + [default: reth/-/] --p2p-secret-key Secret key to use for this node. - + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. --no-persist-peers @@ -135,17 +141,17 @@ Networking: --nat NAT resolution method (any|none|upnp|publicip|extip:\) - + [default: any] --addr Network listening address - + [default: 0.0.0.0] --port Network listening port - + [default: 30303] --max-outbound-peers @@ -155,15 +161,25 @@ Networking: Maximum number of inbound requests. default: 30 --pooled-tx-response-soft-limit - Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. - - . - + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + [default: 2097152] --pooled-tx-pack-soft-limit - Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB - + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see RLPx specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + [default: 131072] RPC: @@ -172,17 +188,17 @@ RPC: --http.addr Http server address to listen on - + [default: 127.0.0.1] --http.port Http server port to listen on - + [default: 8545] --http.api Rpc Modules to be configured for the HTTP server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --http.corsdomain @@ -193,12 +209,12 @@ RPC: --ws.addr Ws server address to listen on - + [default: 127.0.0.1] --ws.port Ws server port to listen on - + [default: 8546] --ws.origins @@ -206,7 +222,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --ipcdisable @@ -214,24 +230,24 @@ RPC: --ipcpath Filename for IPC socket/pipe within the datadir - + [default: .ipc] --authrpc.addr Auth server address to listen on - + [default: 127.0.0.1] --authrpc.port Auth server port to listen on - + [default: 8551] --authrpc.jwtsecret Path to a JWT secret to use for the authenticated engine-API RPC server. - + This will enforce JWT authentication for all requests coming from the consensus layer. - + If no path is provided, a secret will be generated and stored in the datadir under `//jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default. --auth-ipc @@ -239,151 +255,151 @@ RPC: --auth-ipc.path Filename for auth IPC socket/pipe within the datadir - + [default: _engine_api.ipc] --rpc.jwtsecret Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and `--ws.api`. - + This is __not__ used for the authenticated engine-API RPC server, see `--authrpc.jwtsecret`. --rpc.max-request-size Set the maximum RPC request payload size for both HTTP and WS in megabytes - + [default: 15] --rpc.max-response-size Set the maximum RPC response payload size for both HTTP and WS in megabytes - + [default: 160] [aliases: rpc.returndata.limit] --rpc.max-subscriptions-per-connection Set the maximum concurrent subscriptions per connection - + [default: 1024] --rpc.max-connections Maximum number of RPC server connections - + [default: 500] --rpc.max-tracing-requests Maximum number of concurrent tracing requests - - [default: 8] + + [default: 6] --rpc.max-blocks-per-filter Maximum number of blocks that could be scanned per filter request. (0 = entire chain) - + [default: 100000] --rpc.max-logs-per-response Maximum number of logs that can be returned in a single response. (0 = no limit) - + [default: 20000] --rpc.gascap Maximum gas limit for `eth_call` and call tracing RPC methods - + [default: 50000000] RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache - + [default: 5000] --rpc-cache.max-receipts Max number receipts in cache - + [default: 2000] --rpc-cache.max-envs Max number of bytes for cached env data - + [default: 1000] --rpc-cache.max-concurrent-db-requests Max number of concurrent database requests - + [default: 512] Gas Price Oracle: --gpo.blocks Number of recent blocks to check for gas price - + [default: 20] --gpo.ignoreprice Gas Price below which gpo will ignore transactions - + [default: 2] --gpo.maxprice Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo - + [default: 500000000000] --gpo.percentile The percentile of gas prices to use for the estimate - + [default: 60] TxPool: --txpool.pending-max-count Max number of transaction in the pending sub-pool - + [default: 10000] --txpool.pending-max-size Max size of the pending sub-pool in megabytes - + [default: 20] --txpool.basefee-max-count Max number of transaction in the basefee sub-pool - + [default: 10000] --txpool.basefee-max-size Max size of the basefee sub-pool in megabytes - + [default: 20] --txpool.queued-max-count Max number of transaction in the queued sub-pool - + [default: 10000] --txpool.queued-max-size Max size of the queued sub-pool in megabytes - + [default: 20] --txpool.max-account-slots Max number of executable transaction slots guaranteed per account - + [default: 16] --txpool.pricebump Price bump (in %) for the transaction pool underpriced check - + [default: 10] --blobpool.pricebump Price bump percentage to replace an already existing blob transaction - + [default: 100] --txpool.max-tx-input-bytes Max size in bytes of a single transaction allowed to enter the pool - + [default: 131072] --txpool.max-cached-entries The maximum number of blobs to keep in the in memory blob cache - + [default: 100] --txpool.nolocals @@ -398,33 +414,33 @@ TxPool: Builder: --builder.extradata Block extra data set by the payload builder - + [default: reth//] --builder.gaslimit Target gas ceiling for built blocks - + [default: 30000000] --builder.interval The interval at which the job should build a new payload after the last (in seconds) - + [default: 1] --builder.deadline The deadline for when the payload builder job should resolve - + [default: 12] --builder.max-tasks Maximum number of tasks to spawn for building a payload - + [default: 3] Debug: --debug.continuous Prompt the downloader to download blocks one at a time. - + NOTE: This is for testing purposes only. --debug.terminate @@ -432,7 +448,7 @@ Debug: --debug.tip Set the chain tip manually for testing purposes. - + NOTE: This is a temporary flag --debug.max-block @@ -463,13 +479,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Dev testnet: --dev Start the node in dev mode - + This mode uses a local proof-of-authority consensus engine with either fixed block times or automatically mined blocks. Disables network discovery and enables local http server. @@ -481,7 +497,7 @@ Dev testnet: --dev.block-time Interval between blocks. - + Parses strings using [humantime::parse_duration] --dev.block-time 12s @@ -492,7 +508,7 @@ Pruning: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -502,12 +518,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -517,22 +533,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -540,12 +556,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -556,7 +572,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 6f1c1d3e60b40..e60471d1a7035 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -18,28 +18,38 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] - --p2p-secret-key - Secret key to use for this node. - - This also will deterministically set the peer ID. + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Networking: -d, --disable-discovery Disable the discovery service @@ -54,66 +64,122 @@ Options: --discovery.addr The UDP address to use for devp2p peer discovery version 4 - + [default: 0.0.0.0] --discovery.port The UDP port to use for devp2p peer discovery version 4 - + [default: 30303] --discovery.v5.addr - The UDP address to use for devp2p peer discovery version 5 - - [default: 0.0.0.0] + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv6 --discovery.v5.port - The UDP port to use for devp2p peer discovery version 5 - + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discv5.addr` is set + + [default: 9000] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discv5.addr.ipv6` is set + [default: 9000] --discovery.v5.lookup-interval The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - + [default: 60] --discovery.v5.bootstrap.lookup-interval The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - --instance - Add a new instance of a node. - - Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - - Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - - [default: 1] + [default: 5] --discovery.v5.bootstrap.lookup-countdown The number of times to carry out boost lookup queries at bootstrap - + [default: 100] - --trusted-peer - Target trusted peer + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 --trusted-only - Connect only to trusted peers + Connect to or accept from trusted peers only - --retries - The number of retries per request - - [default: 5] + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --no-persist-peers + Do not persist peers. --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + [default: any] - -h, --help - Print help (see a summary with '-h') + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound requests. default: 100 + + --max-inbound-peers + Maximum number of inbound requests. default: 30 + + --pooled-tx-response-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see RLPx specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + + [default: 131072] + + --retries + The number of retries per request + + [default: 5] Database: --db.log-level @@ -131,13 +197,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -147,12 +213,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -162,22 +228,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -185,12 +251,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -201,7 +267,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/p2p/body.md b/book/cli/reth/p2p/body.md index 24d2958591916..6e3aa2cd6fcf1 100644 --- a/book/cli/reth/p2p/body.md +++ b/book/cli/reth/p2p/body.md @@ -13,13 +13,13 @@ Arguments: Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -28,7 +28,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -38,12 +38,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -53,22 +53,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -76,12 +76,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -92,7 +92,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/p2p/header.md b/book/cli/reth/p2p/header.md index 6cf84d903d8ff..dce6e545a4033 100644 --- a/book/cli/reth/p2p/header.md +++ b/book/cli/reth/p2p/header.md @@ -13,13 +13,13 @@ Arguments: Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -28,7 +28,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -38,12 +38,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -53,22 +53,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -76,12 +76,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -92,7 +92,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/recover.md b/book/cli/reth/recover.md index 3593d9b16ad36..6d6531e2df5bf 100644 --- a/book/cli/reth/recover.md +++ b/book/cli/reth/recover.md @@ -14,21 +14,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -37,7 +37,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -47,12 +47,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -62,22 +62,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -85,12 +85,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -101,7 +101,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index 32f135916a034..baffe7ec62f10 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -9,33 +9,33 @@ Usage: reth recover storage-tries [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -57,13 +57,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -73,12 +73,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -88,22 +88,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -111,12 +111,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -127,7 +127,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage.md b/book/cli/reth/stage.md index a140a3518c477..3a7ba05a82c4f 100644 --- a/book/cli/reth/stage.md +++ b/book/cli/reth/stage.md @@ -17,21 +17,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -40,7 +40,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -50,12 +50,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -65,22 +65,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -88,12 +88,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -104,7 +104,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index 2b647574cde12..7b4ae73b8e5a1 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -9,33 +9,33 @@ Usage: reth stage drop [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -57,7 +57,7 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] @@ -77,7 +77,7 @@ Database: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -87,12 +87,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -102,22 +102,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -125,12 +125,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -141,7 +141,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index 2788cc40a06fd..d1ff9dd820d3d 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -16,33 +16,33 @@ Commands: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -64,13 +64,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -80,12 +80,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -95,22 +95,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -118,12 +118,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -134,7 +134,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/dump/account-hashing.md b/book/cli/reth/stage/dump/account-hashing.md index c8b6069fad5b6..5de3b55f5f30b 100644 --- a/book/cli/reth/stage/dump/account-hashing.md +++ b/book/cli/reth/stage/dump/account-hashing.md @@ -21,13 +21,13 @@ Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -36,7 +36,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -46,12 +46,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -61,22 +61,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -84,12 +84,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -100,7 +100,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/dump/execution.md b/book/cli/reth/stage/dump/execution.md index 8ff064a70cc27..0abd2158222cc 100644 --- a/book/cli/reth/stage/dump/execution.md +++ b/book/cli/reth/stage/dump/execution.md @@ -21,13 +21,13 @@ Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -36,7 +36,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -46,12 +46,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -61,22 +61,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -84,12 +84,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -100,7 +100,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/dump/merkle.md b/book/cli/reth/stage/dump/merkle.md index ec5d142c47281..c3c1a08d3e246 100644 --- a/book/cli/reth/stage/dump/merkle.md +++ b/book/cli/reth/stage/dump/merkle.md @@ -21,13 +21,13 @@ Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -36,7 +36,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -46,12 +46,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -61,22 +61,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -84,12 +84,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -100,7 +100,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/dump/storage-hashing.md b/book/cli/reth/stage/dump/storage-hashing.md index 6a45c5d1ab941..e110b43d0b25d 100644 --- a/book/cli/reth/stage/dump/storage-hashing.md +++ b/book/cli/reth/stage/dump/storage-hashing.md @@ -21,13 +21,13 @@ Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -36,7 +36,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -46,12 +46,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -61,22 +61,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -84,12 +84,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -100,7 +100,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index 348f082c4fad4..07c5be00be05b 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -29,27 +29,27 @@ Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --metrics Enable Prometheus metrics. - + The metrics will be served at the given interface and port. --from @@ -69,18 +69,18 @@ Options: -s, --skip-unwind Normally, running the stage requires unwinding for stages that already have been run, in order to not rewrite to the same database slots. - + You can optionally skip the unwinding phase if you're syncing a block range that has not been synced before. --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -101,50 +101,56 @@ Networking: --discovery.addr The UDP address to use for devp2p peer discovery version 4 - + [default: 0.0.0.0] --discovery.port The UDP port to use for devp2p peer discovery version 4 - + [default: 30303] --discovery.v5.addr - The UDP address to use for devp2p peer discovery version 5 - - [default: 0.0.0.0] + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv6 --discovery.v5.port - The UDP port to use for devp2p peer discovery version 5 - + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discv5.addr` is set + + [default: 9000] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discv5.addr.ipv6` is set + [default: 9000] --discovery.v5.lookup-interval The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - + [default: 60] --discovery.v5.bootstrap.lookup-interval The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - + [default: 5] --discovery.v5.bootstrap.lookup-countdown The number of times to carry out boost lookup queries at bootstrap - + [default: 100] --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. - + --trusted-peers enode://abcd@192.168.0.1:30303 --trusted-only - Connect only to trusted peers + Connect to or accept from trusted peers only --bootnodes Comma separated enode URLs for P2P discovery bootstrap. - + Will fall back to a network-specific default if not specified. --peers-file @@ -153,12 +159,12 @@ Networking: --identity Custom node identity - + [default: reth/-/] --p2p-secret-key Secret key to use for this node. - + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. --no-persist-peers @@ -166,17 +172,17 @@ Networking: --nat NAT resolution method (any|none|upnp|publicip|extip:\) - + [default: any] --addr Network listening address - + [default: 0.0.0.0] --port Network listening port - + [default: 30303] --max-outbound-peers @@ -186,15 +192,25 @@ Networking: Maximum number of inbound requests. default: 30 --pooled-tx-response-soft-limit - Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. - - . - + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + [default: 2097152] --pooled-tx-pack-soft-limit - Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB - + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see RLPx specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + [default: 131072] Database: @@ -213,12 +229,12 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] -c, --commit Commits the changes in the database. WARNING: potentially destructive. - + Useful when you want to run diagnostics on the database. --checkpoints @@ -227,7 +243,7 @@ Database: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -237,12 +253,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -252,22 +268,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -275,12 +291,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -291,7 +307,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index 44968aeded6b1..d998a577cb5d8 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -14,33 +14,33 @@ Commands: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -62,7 +62,7 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Networking: @@ -80,50 +80,56 @@ Networking: --discovery.addr The UDP address to use for devp2p peer discovery version 4 - + [default: 0.0.0.0] --discovery.port The UDP port to use for devp2p peer discovery version 4 - + [default: 30303] --discovery.v5.addr - The UDP address to use for devp2p peer discovery version 5 - - [default: 0.0.0.0] + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv6 --discovery.v5.port - The UDP port to use for devp2p peer discovery version 5 - + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discv5.addr` is set + + [default: 9000] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discv5.addr.ipv6` is set + [default: 9000] --discovery.v5.lookup-interval The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - + [default: 60] --discovery.v5.bootstrap.lookup-interval The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - + [default: 5] --discovery.v5.bootstrap.lookup-countdown The number of times to carry out boost lookup queries at bootstrap - + [default: 100] --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. - + --trusted-peers enode://abcd@192.168.0.1:30303 --trusted-only - Connect only to trusted peers + Connect to or accept from trusted peers only --bootnodes Comma separated enode URLs for P2P discovery bootstrap. - + Will fall back to a network-specific default if not specified. --peers-file @@ -132,12 +138,12 @@ Networking: --identity Custom node identity - + [default: reth/-/] --p2p-secret-key Secret key to use for this node. - + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. --no-persist-peers @@ -145,17 +151,17 @@ Networking: --nat NAT resolution method (any|none|upnp|publicip|extip:\) - + [default: any] --addr Network listening address - + [default: 0.0.0.0] --port Network listening port - + [default: 30303] --max-outbound-peers @@ -165,21 +171,31 @@ Networking: Maximum number of inbound requests. default: 30 --pooled-tx-response-soft-limit - Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. - - . - + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + [default: 2097152] --pooled-tx-pack-soft-limit - Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB - + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see RLPx specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + [default: 131072] Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -189,12 +205,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -204,22 +220,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -227,12 +243,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -243,7 +259,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/unwind/num-blocks.md b/book/cli/reth/stage/unwind/num-blocks.md index 24d2bc5169b74..e3b393abee004 100644 --- a/book/cli/reth/stage/unwind/num-blocks.md +++ b/book/cli/reth/stage/unwind/num-blocks.md @@ -8,38 +8,38 @@ Usage: reth stage unwind num-blocks [OPTIONS] Arguments: - + Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -48,7 +48,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -58,12 +58,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -73,22 +73,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -96,12 +96,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -112,7 +112,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/stage/unwind/to-block.md b/book/cli/reth/stage/unwind/to-block.md index f8aa3bd6ef5e6..e836463b4f556 100644 --- a/book/cli/reth/stage/unwind/to-block.md +++ b/book/cli/reth/stage/unwind/to-block.md @@ -8,38 +8,38 @@ Usage: reth stage unwind to-block [OPTIONS] Arguments: - + Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -48,7 +48,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -58,12 +58,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -73,22 +73,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -96,12 +96,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -112,7 +112,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/test-vectors.md b/book/cli/reth/test-vectors.md index dac4f63cf8dc7..5014645347c47 100644 --- a/book/cli/reth/test-vectors.md +++ b/book/cli/reth/test-vectors.md @@ -14,21 +14,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -37,7 +37,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -47,12 +47,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -62,22 +62,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -85,12 +85,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -101,7 +101,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/test-vectors/tables.md b/book/cli/reth/test-vectors/tables.md index b011881e49ffb..a0fd602c31ebd 100644 --- a/book/cli/reth/test-vectors/tables.md +++ b/book/cli/reth/test-vectors/tables.md @@ -14,21 +14,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -37,7 +37,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -47,12 +47,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -62,22 +62,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -85,12 +85,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -101,7 +101,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info From affafa8cfc6521efe2ac1f8c1c32e005fcbfea3b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 May 2024 20:07:23 +0200 Subject: [PATCH 581/700] feat(op): docs sync op mainnet (#8309) --- book/run/sync-op-mainnet.md | 50 +++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 book/run/sync-op-mainnet.md diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md new file mode 100644 index 0000000000000..b50a32fb4b7a1 --- /dev/null +++ b/book/run/sync-op-mainnet.md @@ -0,0 +1,50 @@ +# Sync OP Mainnet + +To sync OP mainnet, the Bedrock datadir needs to be imported to use as starting point. +Blocks lower than the OP mainnet Bedrock fork, are built on the OVM and cannot be executed on the EVM. +For this reason, the chain segment from genesis until Bedrock, must be manually imported to circumvent +execution in reth's sync pipeline. + +Importing OP mainnet Bedrock datadir requires exported data: + +- Blocks [and receipts] below Bedrock +- State snapshot at first Bedrock block + +## Manual Export Steps + +See . + +Output from running the command to export state, can also be downloaded from . + +## Manual Import Steps + +### 1. Import Blocks + +Imports a `.rlp` file of blocks. + +Note! Requires running in debug mode (TODO: ). + +```bash +./op-reth import-op +``` + +### 2. Import Receipts + +This step is optional. To run a full node, skip this step. If however receipts are to be imported, the +corresponding transactions must already be imported (see [step 1](#1-import-blocks)). + +Imports a `.rlp` file of receipts, that has been exported with command specified in + (command for exporting receipts uses custom RLP-encoding). + +```bash +./op-reth import-receipts --chain optimism +``` + +### 3. Import State + +Imports a `.jsonl` state dump. The block at which the state dump is made, must be the latest block in +reth's database. + +```bash +./op-reth init-state --chain optimism +``` \ No newline at end of file From 50f1f1c03336d0f58d90750df723a993d57468d6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 May 2024 20:07:34 +0200 Subject: [PATCH 582/700] fix(op): disable execution stage (#8317) --- bin/reth/src/commands/import.rs | 4 ++-- bin/reth/src/commands/import_op.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 7d6b12fd8f089..235ada848549c 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -219,7 +219,7 @@ pub async fn build_import_pipeline( consensus: &Arc, file_client: Arc, static_file_producer: StaticFileProducer, - should_exec: bool, + disable_exec: bool, ) -> eyre::Result<(Pipeline, impl Stream)> where DB: Database + Clone + Unpin + 'static, @@ -273,7 +273,7 @@ where PruneModes::default(), ) .builder() - .disable_all_if(&StageId::STATE_REQUIRED, || should_exec), + .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec), ) .build(provider_factory, static_file_producer); diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 5576a1077bb03..b1ae8e8cb366c 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -143,7 +143,7 @@ impl ImportOpCommand { provider_factory.static_file_provider(), PruneModes::default(), ), - false, + true, ) .await?; From 5eb41d4088b1e8dc0b348b8a0fa979a514f8680e Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 21 May 2024 23:11:22 +0200 Subject: [PATCH 583/700] fix: disable db shrinking (#8324) --- crates/storage/db/src/implementation/mdbx/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 53594f671a47d..1db86bc54f423 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -272,7 +272,7 @@ impl DatabaseEnv { // We grow the database in increments of 4 gigabytes growth_step: Some(4 * GIGABYTE as isize), // The database never shrinks - shrink_threshold: None, + shrink_threshold: Some(0), page_size: Some(PageSize::Set(default_page_size())), }); #[cfg(not(windows))] From cd039d362ba307c95dd3bfed4fa507f8cca1c6b5 Mon Sep 17 00:00:00 2001 From: Querty <98064975+Quertyy@users.noreply.github.com> Date: Wed, 22 May 2024 18:37:11 +0700 Subject: [PATCH 584/700] feat: bsc p2p network (#8061) Co-authored-by: Matthias Seitz --- Cargo.lock | 15 +++++ Cargo.toml | 1 + crates/ethereum-forks/src/forkid.rs | 9 +++ examples/README.md | 3 +- examples/bsc-p2p/Cargo.toml | 22 +++++++ examples/bsc-p2p/src/chainspec.rs | 38 ++++++++++++ examples/bsc-p2p/src/genesis.json | 82 ++++++++++++++++++++++++++ examples/bsc-p2p/src/main.rs | 91 +++++++++++++++++++++++++++++ 8 files changed, 260 insertions(+), 1 deletion(-) create mode 100644 examples/bsc-p2p/Cargo.toml create mode 100644 examples/bsc-p2p/src/chainspec.rs create mode 100644 examples/bsc-p2p/src/genesis.json create mode 100644 examples/bsc-p2p/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 511b7da62fe1e..6b944ab54d216 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1442,6 +1442,21 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "bsc-p2p" +version = "0.0.0" +dependencies = [ + "reth-discv4", + "reth-network", + "reth-network-api", + "reth-primitives", + "reth-tracing", + "secp256k1 0.28.2", + "serde_json", + "tokio", + "tokio-stream", +] + [[package]] name = "bstr" version = "0.2.17" diff --git a/Cargo.toml b/Cargo.toml index 2759aff2a76a2..ebf86a15fcbca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,6 +90,7 @@ members = [ "examples/custom-inspector/", "examples/exex/*", "examples/db-access", + "examples/bsc-p2p", "testing/ef-tests/", "testing/testing-utils", ] diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index b0aba0d5abdc7..b5d031c5e00e2 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -298,6 +298,15 @@ impl ForkFilter { self.cache.fork_id } + /// Manually set the current fork id. + /// + /// Caution: this disregards all configured fork filters and is reset on the next head update. + /// This is useful for testing or to connect to networks over p2p where only the latest forkid + /// is known. + pub fn set_current_fork_id(&mut self, fork_id: ForkId) { + self.cache.fork_id = fork_id; + } + /// Check whether the provided `ForkId` is compatible based on the validation rules in /// `EIP-2124`. /// diff --git a/examples/README.md b/examples/README.md index 4c135f880feb8..6605fd2972b3e 100644 --- a/examples/README.md +++ b/examples/README.md @@ -60,7 +60,8 @@ to make a PR! | Example | Description | | --------------------------- | ----------------------------------------------------------------- | | [Manual P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer | -| [Polygon P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer on Polygon | +| [Polygon P2P](./polygon-p2p) | Illustrates how to connect and communicate with a peer on Polygon | +| [BSC P2P](./bsc-p2p) | Illustrates how to connect and communicate with a peer on Binance Smart Chain | ## Misc diff --git a/examples/bsc-p2p/Cargo.toml b/examples/bsc-p2p/Cargo.toml new file mode 100644 index 0000000000000..984130590cfd8 --- /dev/null +++ b/examples/bsc-p2p/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "bsc-p2p" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +reth-discv4 = { workspace = true, features = ["test-utils"] } +reth-network = { workspace = true, features = ["test-utils"] } +reth-network-api.workspace = true +reth-primitives.workspace = true +reth-tracing.workspace = true + +secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } + +tokio.workspace = true +tokio-stream.workspace = true + +serde_json.workspace = true diff --git a/examples/bsc-p2p/src/chainspec.rs b/examples/bsc-p2p/src/chainspec.rs new file mode 100644 index 0000000000000..65169c734155c --- /dev/null +++ b/examples/bsc-p2p/src/chainspec.rs @@ -0,0 +1,38 @@ +use reth_primitives::{ + b256, BaseFeeParams, Chain, ChainSpec, ForkCondition, Hardfork, NodeRecord, B256, +}; + +use std::{collections::BTreeMap, sync::Arc}; + +pub const SHANGHAI_TIME: u64 = 1705996800; + +pub(crate) fn bsc_chain_spec() -> Arc { + const GENESIS: B256 = b256!("0d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5b"); + + ChainSpec { + chain: Chain::from_id(56), + genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), + genesis_hash: Some(GENESIS), + paris_block_and_final_difficulty: None, + hardforks: BTreeMap::from([(Hardfork::Shanghai, ForkCondition::Timestamp(SHANGHAI_TIME))]), + deposit_contract: None, + base_fee_params: reth_primitives::BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), + prune_delete_limit: 0, + } + .into() +} + +/// BSC mainnet bootnodes +static BOOTNODES: [&str; 7] = [ + "enode://ba88d1a8a5e849bec0eb7df9eabf059f8edeae9a9eb1dcf51b7768276d78b10d4ceecf0cde2ef191ced02f66346d96a36ca9da7d73542757d9677af8da3bad3f@54.198.97.197:30311", + "enode://433c8bfdf53a3e2268ccb1b829e47f629793291cbddf0c76ae626da802f90532251fc558e2e0d10d6725e759088439bf1cd4714716b03a259a35d4b2e4acfa7f@52.69.102.73:30311", + "enode://571bee8fb902a625942f10a770ccf727ae2ba1bab2a2b64e121594a99c9437317f6166a395670a00b7d93647eacafe598b6bbcef15b40b6d1a10243865a3e80f@35.73.84.120:30311", + "enode://fac42fb0ba082b7d1eebded216db42161163d42e4f52c9e47716946d64468a62da4ba0b1cac0df5e8bf1e5284861d757339751c33d51dfef318be5168803d0b5@18.203.152.54:30311", + "enode://3063d1c9e1b824cfbb7c7b6abafa34faec6bb4e7e06941d218d760acdd7963b274278c5c3e63914bd6d1b58504c59ec5522c56f883baceb8538674b92da48a96@34.250.32.100:30311", + "enode://ad78c64a4ade83692488aa42e4c94084516e555d3f340d9802c2bf106a3df8868bc46eae083d2de4018f40e8d9a9952c32a0943cd68855a9bc9fd07aac982a6d@34.204.214.24:30311", + "enode://5db798deb67df75d073f8e2953dad283148133acb520625ea804c9c4ad09a35f13592a762d8f89056248f3889f6dcc33490c145774ea4ff2966982294909b37a@107.20.191.97:30311", +]; + +pub(crate) fn boot_nodes() -> Vec { + BOOTNODES[..].iter().map(|s| s.parse().unwrap()).collect() +} diff --git a/examples/bsc-p2p/src/genesis.json b/examples/bsc-p2p/src/genesis.json new file mode 100644 index 0000000000000..32e7aec8b8e4a --- /dev/null +++ b/examples/bsc-p2p/src/genesis.json @@ -0,0 +1,82 @@ +{ + "config": { + "chainId": 56, + "homesteadBlock": 0, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "ramanujanBlock": 0, + "nielsBlock": 0, + "parlia": { + "period": 3, + "epoch": 200 + } + }, + "nonce": "0x0", + "timestamp": "0x5e9da7ce", + "extraData": "0x00000000000000000000000000000000000000000000000000000000000000002a7cdd959bfe8d9487b2a43b33565295a698f7e26488aa4d1955ee33403f8ccb1d4de5fb97c7ade29ef9f4360c606c7ab4db26b016007d3ad0ab86a0ee01c3b1283aa067c58eab4709f85e99d46de5fe685b1ded8013785d6623cc18d214320b6bb6475978f3adfc719c99674c072166708589033e2d9afec2be4ec20253b8642161bc3f444f53679c1f3d472f7be8361c80a4c1e7e9aaf001d0877f1cfde218ce2fd7544e0b2cc94692d4a704debef7bcb61328b8f7166496996a7da21cf1f1b04d9b3e26a3d0772d4c407bbe49438ed859fe965b140dcf1aab71a96bbad7cf34b5fa511d8e963dbba288b1960e75d64430b3230294d12c6ab2aac5c2cd68e80b16b581ea0a6e3c511bbd10f4519ece37dc24887e11b55d7ae2f5b9e386cd1b50a4550696d957cb4900f03a82012708dafc9e1b880fd083b32182b869be8e0922b81f8e175ffde54d797fe11eb03f9e3bf75f1d68bf0b8b6fb4e317a0f9d6f03eaf8ce6675bc60d8c4d90829ce8f72d0163c1d5cf348a862d55063035e7a025f4da968de7e4d7e4004197917f4070f1d6caa02bbebaebb5d7e581e4b66559e635f805ff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit": "0x2625a00", + "difficulty": "0x1", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE", + "alloc": { + "0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE": { + "balance": "0x0" + }, + "0x0000000000000000000000000000000000001000": { + "balance": "0x0", + "code": "0x60806040526004361061027d5760003560e01c80639dc092621161014f578063c8509d81116100c1578063eb57e2021161007a578063eb57e20214610940578063eda5868c14610973578063f340fa0114610988578063f9a2bbc7146109ae578063fc3e5908146109c3578063fd6a6879146109d85761027d565b8063c8509d8114610609578063d86222d5146108d7578063daacdb66146108ec578063dc927faf14610901578063e086c7b114610916578063e1c7392a1461092b5761027d565b8063ab51bb9611610113578063ab51bb961461074a578063ac4317511461075f578063ad3c9da61461082a578063b7ab4db51461085d578063bf9f49951461041b578063c81b1662146108c25761027d565b80639dc09262146106cd578063a1a11bf5146106e2578063a5422d5c146106f7578063a78abc161461070c578063aaf5eb68146107355761027d565b80635667515a116101f35780637942fd05116101ac5780637942fd05146105df57806381650b62146105f4578063831d65d114610609578063853230aa1461068e57806386249882146106a357806396713da9146106b85761027d565b80635667515a146105005780635d77156c146105155780636969a25c1461052a5780636e47b482146105a057806370fd5bad146105b557806375d47a0a146105ca5761027d565b80633dffc387116102455780633dffc3871461041b57806343756e5c14610446578063493279b1146104775780634bf6c882146104a357806351e80672146104b8578063565c56b3146104cd5761027d565b80630bee7a67146102825780631182b875146102b05780631ff18069146103aa578063219f22d5146103d157806335409f7f146103e6575b600080fd5b34801561028e57600080fd5b506102976109ed565b6040805163ffffffff9092168252519081900360200190f35b3480156102bc57600080fd5b50610335600480360360408110156102d357600080fd5b60ff8235169190810190604081016020820135600160201b8111156102f757600080fd5b82018360208201111561030957600080fd5b803590602001918460018302840111600160201b8311171561032a57600080fd5b5090925090506109f2565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561036f578181015183820152602001610357565b50505050905090810190601f16801561039c5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156103b657600080fd5b506103bf610bdf565b60408051918252519081900360200190f35b3480156103dd57600080fd5b50610297610be5565b3480156103f257600080fd5b506104196004803603602081101561040957600080fd5b50356001600160a01b0316610bea565b005b34801561042757600080fd5b50610430610efe565b6040805160ff9092168252519081900360200190f35b34801561045257600080fd5b5061045b610f03565b604080516001600160a01b039092168252519081900360200190f35b34801561048357600080fd5b5061048c610f09565b6040805161ffff9092168252519081900360200190f35b3480156104af57600080fd5b50610430610f0e565b3480156104c457600080fd5b5061045b610f13565b3480156104d957600080fd5b506103bf600480360360208110156104f057600080fd5b50356001600160a01b0316610f19565b34801561050c57600080fd5b50610430610f6b565b34801561052157600080fd5b50610297610f70565b34801561053657600080fd5b506105546004803603602081101561054d57600080fd5b5035610f75565b604080516001600160a01b039788168152958716602087015293909516848401526001600160401b0390911660608401521515608083015260a082019290925290519081900360c00190f35b3480156105ac57600080fd5b5061045b610fd9565b3480156105c157600080fd5b50610430610fdf565b3480156105d657600080fd5b5061045b610fe4565b3480156105eb57600080fd5b50610430610fea565b34801561060057600080fd5b50610297610fef565b34801561061557600080fd5b506104196004803603604081101561062c57600080fd5b60ff8235169190810190604081016020820135600160201b81111561065057600080fd5b82018360208201111561066257600080fd5b803590602001918460018302840111600160201b8311171561068357600080fd5b509092509050610ff4565b34801561069a57600080fd5b506103bf6110a7565b3480156106af57600080fd5b506103bf6110ad565b3480156106c457600080fd5b506104306110b3565b3480156106d957600080fd5b5061045b6110b8565b3480156106ee57600080fd5b5061045b6110be565b34801561070357600080fd5b506103356110c4565b34801561071857600080fd5b506107216110e3565b604080519115158252519081900360200190f35b34801561074157600080fd5b506103bf6110ec565b34801561075657600080fd5b50610297610f6b565b34801561076b57600080fd5b506104196004803603604081101561078257600080fd5b810190602081018135600160201b81111561079c57600080fd5b8201836020820111156107ae57600080fd5b803590602001918460018302840111600160201b831117156107cf57600080fd5b919390929091602081019035600160201b8111156107ec57600080fd5b8201836020820111156107fe57600080fd5b803590602001918460018302840111600160201b8311171561081f57600080fd5b5090925090506110f5565b34801561083657600080fd5b506103bf6004803603602081101561084d57600080fd5b50356001600160a01b031661139c565b34801561086957600080fd5b506108726113ae565b60408051602080825283518183015283519192839290830191858101910280838360005b838110156108ae578181015183820152602001610896565b505050509050019250505060405180910390f35b3480156108ce57600080fd5b5061045b6114d4565b3480156108e357600080fd5b506103bf6114da565b3480156108f857600080fd5b506103bf6114e6565b34801561090d57600080fd5b5061045b6114ec565b34801561092257600080fd5b506103bf6114f2565b34801561093757600080fd5b506104196114f7565b34801561094c57600080fd5b506104196004803603602081101561096357600080fd5b50356001600160a01b03166116fa565b34801561097f57600080fd5b506102976118c9565b6104196004803603602081101561099e57600080fd5b50356001600160a01b03166118ce565b3480156109ba57600080fd5b5061045b611b04565b3480156109cf57600080fd5b50610430611b0a565b3480156109e457600080fd5b5061045b611b0f565b606481565b60005460609060ff16610a48576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b3361200014610a885760405162461bcd60e51b815260040180806020018281038252602f815260200180614516602f913960400191505060405180910390fd5b610a90613d69565b6000610ad185858080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250611b1592505050565b9150915080610aed57610ae46064611c6e565b92505050610bd8565b815160009060ff16610b0d57610b068360200151611ccf565b9050610ba4565b825160ff1660011415610ba057826020015151600114610b7a577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2604051808060200182810382526025815260200180613e6c6025913960400191505060405180910390a1506067610b9b565b610b068360200151600081518110610b8e57fe5b6020026020010151612ad5565b610ba4565b5060655b63ffffffff8116610bc95750506040805160008152602081019091529150610bd89050565b610bd281611c6e565b93505050505b9392505050565b60035481565b606881565b3361100114610c2a5760405162461bcd60e51b81526004018080602001828103825260298152602001806145726029913960400191505060405180910390fd5b6001600160a01b03811660009081526004602052604090205480610c4e5750610efb565b600181039050600060018281548110610c6357fe5b60009182526020909120600360049092020101546001549091506000190180610cb257600060018481548110610c9557fe5b906000526020600020906004020160030181905550505050610efb565b6040805183815290516001600160a01b038616917f3b6f9ef90462b512a1293ecec018670bf7b7f1876fb727590a8a6d7643130a70919081900360200190a26001600160a01b038416600090815260046020526040812055600154600019018314610e3457600180546000198101908110610d2957fe5b906000526020600020906004020160018481548110610d4457fe5b6000918252602082208354600492830290910180546001600160a01b03199081166001600160a01b0393841617825560018087015481840180548416918616919091179055600280880180549185018054909416919095161780835584546001600160401b03600160a01b91829004160267ffffffffffffffff60a01b1990911617808355935460ff600160e01b918290041615150260ff60e01b199094169390931790556003948501549401939093558254868401939192919087908110610e0957fe5b600091825260208083206004909202909101546001600160a01b031683528201929092526040019020555b6001805480610e3f57fe5b60008281526020812060046000199093019283020180546001600160a01b0319908116825560018201805490911690556002810180546001600160e81b03191690556003018190559155818381610e9257fe5b0490508015610ef65760015460005b81811015610ef3578260018281548110610eb757fe5b9060005260206000209060040201600301540160018281548110610ed757fe5b6000918252602090912060036004909202010155600101610ea1565b50505b505050505b50565b600181565b61100181565b603881565b600881565b61200081565b6001600160a01b03811660009081526004602052604081205480610f41576000915050610f66565b600180820381548110610f5057fe5b9060005260206000209060040201600301549150505b919050565b600081565b606781565b60018181548110610f8257fe5b600091825260209091206004909102018054600182015460028301546003909301546001600160a01b0392831694509082169291821691600160a01b81046001600160401b031691600160e01b90910460ff169086565b61100581565b600281565b61100881565b600b81565b606681565b33612000146110345760405162461bcd60e51b815260040180806020018281038252602f815260200180614516602f913960400191505060405180910390fd5b7f41ce201247b6ceb957dcdb217d0b8acb50b9ea0e12af9af4f5e7f38902101605838383604051808460ff1660ff168152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f1916909201829003965090945050505050a1505050565b6103e881565b60025481565b600981565b61100781565b61100681565b6040518061062001604052806105ef8152602001613f276105ef913981565b60005460ff1681565b6402540be40081565b60005460ff16611148576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b33611007146111885760405162461bcd60e51b815260040180806020018281038252602e815260200180613e91602e913960400191505060405180910390fd5b6111f284848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050604080518082019091526013815272065787069726554696d655365636f6e6447617606c1b60208201529150612c4c9050565b156112cd57602081146112365760405162461bcd60e51b8152600401808060200182810382526026815260200180613ee06026913960400191505060405180910390fd5b604080516020601f840181900481028201810190925282815260009161127491858580838501838280828437600092019190915250612d3492505050565b90506064811015801561128a5750620186a08111155b6112c55760405162461bcd60e51b8152600401808060200182810382526027815260200180613e456027913960400191505060405180910390fd5b60025561130a565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b60046020526000908152604090205481565b6001546060906000805b828110156113ff57600181815481106113cd57fe5b9060005260206000209060040201600201601c9054906101000a900460ff166113f7576001909101905b6001016113b8565b5060608160405190808252806020026020018201604052801561142c578160200160208202803683370190505b50600092509050815b838110156114cc576001818154811061144a57fe5b9060005260206000209060040201600201601c9054906101000a900460ff166114c4576001818154811061147a57fe5b600091825260209091206004909102015482516001600160a01b03909116908390859081106114a557fe5b6001600160a01b03909216602092830291909101909101526001909201915b600101611435565b509250505090565b61100281565b67016345785d8a000081565b60055481565b61100381565b602981565b60005460ff161561154f576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b611557613d69565b600061157d6040518061062001604052806105ef8152602001613f276105ef9139611b15565b91509150806115bd5760405162461bcd60e51b8152600401808060200182810382526021815260200180613f066021913960400191505060405180910390fd5b60005b8260200151518110156116e2576001836020015182815181106115df57fe5b60209081029190910181015182546001818101855560009485528385208351600493840290910180546001600160a01b039283166001600160a01b03199182161782558587015182850180549185169183169190911790556040860151600283018054606089015160808a01511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b199590981692909516919091179290921694909417161790915560a0909301516003909301929092559186015180519185019391859081106116b557fe5b602090810291909101810151516001600160a01b03168252810191909152604001600020556001016115c0565b50506103e8600255506000805460ff19166001179055565b336110011461173a5760405162461bcd60e51b81526004018080602001828103825260298152602001806145726029913960400191505060405180910390fd5b6001600160a01b0381166000908152600460205260409020548061175e5750610efb565b60018103905060006001828154811061177357fe5b906000526020600020906004020160030154905060006001838154811061179657fe5b906000526020600020906004020160030181905550600060018080549050039050836001600160a01b03167f8cd4e147d8af98a9e3b6724021b8bf6aed2e5dac71c38f2dce8161b82585b25d836040518082815260200191505060405180910390a28061180557505050610efb565b600081838161181057fe5b0490508015610ef65760005b8481101561186e57816001828154811061183257fe5b906000526020600020906004020160030154016001828154811061185257fe5b600091825260209091206003600490920201015560010161181c565b50600180549085015b81811015610ef357826001828154811061188d57fe5b90600052602060002090600402016003015401600182815481106118ad57fe5b6000918252602090912060036004909202010155600101611877565b606581565b33411461190c5760405162461bcd60e51b815260040180806020018281038252602d815260200180614545602d913960400191505060405180910390fd5b60005460ff1661195f576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b600034116119ac576040805162461bcd60e51b81526020600482015260156024820152746465706f7369742076616c7565206973207a65726f60581b604482015290519081900360640190fd5b6001600160a01b03811660009081526004602052604090205434908015611abf5760006001808303815481106119de57fe5b9060005260206000209060040201905080600201601c9054906101000a900460ff1615611a49576040805184815290516001600160a01b038616917ff177e5d6c5764d79c32883ed824111d9b13f5668cf6ab1cc12dd36791dd955b4919081900360200190a2611ab9565b600354611a5c908463ffffffff612d3916565b6003908155810154611a74908463ffffffff612d3916565b60038201556040805184815290516001600160a01b038616917f93a090ecc682c002995fad3c85b30c5651d7fd29b0be5da9d784a3302aedc055919081900360200190a25b50611aff565b6040805183815290516001600160a01b038516917ff177e5d6c5764d79c32883ed824111d9b13f5668cf6ab1cc12dd36791dd955b4919081900360200190a25b505050565b61100081565b600381565b61100481565b611b1d613d69565b6000611b27613d69565b611b2f613d81565b611b40611b3b86612d93565b612db8565b90506000805b611b4f83612e02565b15611c605780611b7457611b6a611b6584612e23565b612e71565b60ff168452611c58565b8060011415611c53576060611b90611b8b85612e23565b612f28565b90508051604051908082528060200260200182016040528015611bcd57816020015b611bba613da1565b815260200190600190039081611bb25790505b50602086015260005b8151811015611c4857611be7613da1565b6000611c05848481518110611bf857fe5b6020026020010151612ff9565b9150915080611c2257876000995099505050505050505050611c69565b8188602001518481518110611c3357fe5b60209081029190910101525050600101611bd6565b506001925050611c58565b611c60565b600101611b46565b50919350909150505b915091565b604080516001808252818301909252606091829190816020015b6060815260200190600190039081611c88579050509050611cae8363ffffffff166130d6565b81600081518110611cbb57fe5b6020026020010181905250610bd8816130e9565b6000806060611cdd84613173565b9150915081611d8a577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2816040518080602001828103825283818151815260200191508051906020019080838360005b83811015611d45578181015183820152602001611d2d565b50505050905090810190601f168015611d725780820380516001836020036101000a031916815260200191505b509250505060405180910390a1606692505050610f66565b600080805b600154811015611e075767016345785d8a000060018281548110611daf57fe5b90600052602060002090600402016003015410611dd157600190920191611dff565b600060018281548110611de057fe5b9060005260206000209060040201600301541115611dff576001909101905b600101611d8f565b50606082604051908082528060200260200182016040528015611e34578160200160208202803683370190505b509050606083604051908082528060200260200182016040528015611e63578160200160208202803683370190505b509050606084604051908082528060200260200182016040528015611e92578160200160208202803683370190505b509050606085604051908082528060200260200182016040528015611ec1578160200160208202803683370190505b5090506000606086604051908082528060200260200182016040528015611ef2578160200160208202803683370190505b509050606087604051908082528060200260200182016040528015611f21578160200160208202803683370190505b509050600098506000975060608d905060006110046001600160a01b031663149d14d96040518163ffffffff1660e01b815260040160206040518083038186803b158015611f6e57600080fd5b505afa158015611f82573d6000803e3d6000fd5b505050506040513d6020811015611f9857600080fd5b5051905067016345785d8a000081111561200d577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2604051808060200182810382526021815260200180613ebf6021913960400191505060405180910390a160689d5050505050505050505050505050610f66565b60005b6001548110156122805767016345785d8a00006001828154811061203057fe5b906000526020600020906004020160030154106121b6576001818154811061205457fe5b906000526020600020906004020160020160009054906101000a90046001600160a01b03168a8d8151811061208557fe5b60200260200101906001600160a01b031690816001600160a01b03168152505060006402540be400600183815481106120ba57fe5b906000526020600020906004020160030154816120d357fe5b06600183815481106120e157fe5b906000526020600020906004020160030154039050612109838261325590919063ffffffff16565b8a8e8151811061211557fe5b6020026020010181815250506001828154811061212e57fe5b906000526020600020906004020160020160009054906101000a90046001600160a01b0316888e8151811061215f57fe5b60200260200101906001600160a01b031690816001600160a01b03168152505081898e8151811061218c57fe5b60209081029190910101526121a7878263ffffffff612d3916565b6001909d019c96506122789050565b6000600182815481106121c557fe5b906000526020600020906004020160030154111561227857600181815481106121ea57fe5b906000526020600020906004020160010160009054906101000a90046001600160a01b0316858c8151811061221b57fe5b60200260200101906001600160a01b031690816001600160a01b0316815250506001818154811061224857fe5b906000526020600020906004020160030154848c8151811061226657fe5b60209081029190910101526001909a01995b600101612010565b50600085156126be576110046001600160a01b0316636e056520878c8c8b60025442016040518663ffffffff1660e01b815260040180806020018060200180602001856001600160401b03166001600160401b03168152602001848103845288818151815260200191508051906020019060200280838360005b838110156123125781810151838201526020016122fa565b50505050905001848103835287818151815260200191508051906020019060200280838360005b83811015612351578181015183820152602001612339565b50505050905001848103825286818151815260200191508051906020019060200280838360005b83811015612390578181015183820152602001612378565b505050509050019750505050505050506020604051808303818588803b1580156123b957600080fd5b505af1935050505080156123df57506040513d60208110156123da57600080fd5b505160015b61261a576040516000815260443d10156123fb57506000612496565b60046000803e60005160e01c6308c379a0811461241c576000915050612496565b60043d036004833e81513d60248201116001600160401b038211171561244757600092505050612496565b80830180516001600160401b03811115612468576000945050505050612496565b8060208301013d860181111561248657600095505050505050612496565b601f01601f191660405250925050505b806124a15750612545565b60019150867fa7cdeed7d0db45e3219a6e5d60838824c16f1d39991fcfe3f963029c844bf280826040518080602001828103825283818151815260200191508051906020019080838360005b838110156125055781810151838201526020016124ed565b50505050905090810190601f1680156125325780820380516001836020036101000a031916815260200191505b509250505060405180910390a250612615565b3d80801561256f576040519150601f19603f3d011682016040523d82523d6000602084013e612574565b606091505b5060019150867fbfa884552dd8921b6ce90bfe906952ae5b3b29be0cc1a951d4f62697635a3a45826040518080602001828103825283818151815260200191508051906020019080838360005b838110156125d95781810151838201526020016125c1565b50505050905090810190601f1680156126065780820380516001836020036101000a031916815260200191505b509250505060405180910390a2505b6126be565b8015612658576040805188815290517fa217d08e65f80c73121cd9db834d81652d544bfbf452f6d04922b16c90a37b709181900360200190a16126bc565b604080516020808252601b908201527f6261746368207472616e736665722072657475726e2066616c7365000000000081830152905188917fa7cdeed7d0db45e3219a6e5d60838824c16f1d39991fcfe3f963029c844bf280919081900360600190a25b505b80156128745760005b88518110156128725760008982815181106126de57fe5b602002602001015190506000600182815481106126f757fe5b60009182526020909120600160049092020181015481546001600160a01b03909116916108fc918590811061272857fe5b9060005260206000209060040201600301549081150290604051600060405180830381858888f19350505050905080156127e4576001828154811061276957fe5b60009182526020909120600160049092020181015481546001600160a01b03909116917f6c61d60f69a7beb3e1c80db7f39f37b208537cbb19da3174511b477812b2fc7d91859081106127b857fe5b9060005260206000209060040201600301546040518082815260200191505060405180910390a2612868565b600182815481106127f157fe5b60009182526020909120600160049092020181015481546001600160a01b03909116917f25d0ce7d2f0cec669a8c17efe49d195c13455bb8872b65fa610ac7f53fe4ca7d918590811061284057fe5b9060005260206000209060040201600301546040518082815260200191505060405180910390a25b50506001016126c7565b505b8451156129be5760005b85518110156129bc57600086828151811061289557fe5b60200260200101516001600160a01b03166108fc8784815181106128b557fe5b60200260200101519081150290604051600060405180830381858888f193505050509050801561294b578682815181106128eb57fe5b60200260200101516001600160a01b03167f6c61d60f69a7beb3e1c80db7f39f37b208537cbb19da3174511b477812b2fc7d87848151811061292957fe5b60200260200101516040518082815260200191505060405180910390a26129b3565b86828151811061295757fe5b60200260200101516001600160a01b03167f25d0ce7d2f0cec669a8c17efe49d195c13455bb8872b65fa610ac7f53fe4ca7d87848151811061299557fe5b60200260200101516040518082815260200191505060405180910390a25b5060010161287e565b505b4715612a27576040805147815290517f6ecc855f9440a9282c90913bbc91619fd44f5ec0b462af28d127b116f130aa4d9181900360200190a1604051611002904780156108fc02916000818181858888f19350505050158015612a25573d6000803e3d6000fd5b505b60006003819055600555825115612a4157612a4183613297565b6110016001600160a01b031663fc4333cd6040518163ffffffff1660e01b8152600401600060405180830381600087803b158015612a7e57600080fd5b505af1158015612a92573d6000803e3d6000fd5b50506040517fedd8d7296956dd970ab4de3f2fc03be2b0ffc615d20cd4c72c6e44f928630ebf925060009150a15060009f9e505050505050505050505050505050565b80516001600160a01b0316600090815260046020526040812054801580612b265750600180820381548110612b0657fe5b9060005260206000209060040201600201601c9054906101000a900460ff165b15612b6c5782516040516001600160a01b03909116907fe209c46bebf57cf265d5d9009a00870e256d9150f3ed5281ab9d9eb3cec6e4be90600090a26000915050610f66565b600154600554600019820111801590612bc25784516040516001600160a01b03909116907fe209c46bebf57cf265d5d9009a00870e256d9150f3ed5281ab9d9eb3cec6e4be90600090a260009350505050610f66565b600580546001908101909155805481906000198601908110612be057fe5b6000918252602082206002600490920201018054921515600160e01b0260ff60e01b199093169290921790915585516040516001600160a01b03909116917ff226e7d8f547ff903d9d419cf5f54e0d7d07efa9584135a53a057c5f1f27f49a91a2506000949350505050565b6000816040516020018082805190602001908083835b60208310612c815780518252601f199092019160209182019101612c62565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b60208310612cef5780518252601f199092019160209182019101612cd0565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051602081830303815290604052805190602001201490505b92915050565b015190565b600082820183811015610bd8576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b612d9b613dd6565b506040805180820190915281518152602082810190820152919050565b612dc0613d81565b612dc98261375e565b612dd257600080fd5b6000612de18360200151613798565b60208085015160408051808201909152868152920190820152915050919050565b6000612e0c613dd6565b505080518051602091820151919092015191011190565b612e2b613dd6565b612e3482612e02565b612e3d57600080fd5b60208201516000612e4d826137fb565b80830160209586015260408051808201909152908152938401919091525090919050565b805160009015801590612e8657508151602110155b612e8f57600080fd5b6000612e9e8360200151613798565b90508083600001511015612ef9576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b825160208085015183018051928490039291831015612f1f57826020036101000a820491505b50949350505050565b6060612f338261375e565b612f3c57600080fd5b6000612f478361392e565b9050606081604051908082528060200260200182016040528015612f8557816020015b612f72613dd6565b815260200190600190039081612f6a5790505b5090506000612f978560200151613798565b60208601510190506000805b84811015612fee57612fb4836137fb565b9150604051806040016040528083815260200184815250848281518110612fd757fe5b602090810291909101015291810191600101612fa3565b509195945050505050565b613001613da1565b600061300b613da1565b613013613d81565b61301c85612db8565b90506000805b61302b83612e02565b15611c6057806130565761304661304184612e23565b61398a565b6001600160a01b031684526130ce565b806001141561307e5761306b61304184612e23565b6001600160a01b031660208501526130ce565b80600214156130a65761309361304184612e23565b6001600160a01b031660408501526130ce565b8060031415611c53576130bb611b6584612e23565b6001600160401b03166060850152600191505b600101613022565b6060612d2e6130e4836139a4565b613a8a565b606081516000141561310a5750604080516000815260208101909152610f66565b60608260008151811061311957fe5b602002602001015190506000600190505b835181101561315a576131508285838151811061314357fe5b6020026020010151613adc565b915060010161312a565b50610bd861316d825160c060ff16613b59565b82613adc565b600060606029835111156131a5576000604051806060016040528060298152602001613df16029913991509150611c69565b60005b835181101561323b5760005b81811015613232578481815181106131c857fe5b6020026020010151600001516001600160a01b03168583815181106131e957fe5b6020026020010151600001516001600160a01b0316141561322a5760006040518060600160405280602b8152602001613e1a602b9139935093505050611c69565b6001016131b4565b506001016131a8565b505060408051602081019091526000815260019150915091565b6000610bd883836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f770000815250613c51565b600154815160005b828110156133b45760016132b1613da1565b600183815481106132be57fe5b600091825260208083206040805160c08101825260049490940290910180546001600160a01b0390811685526001820154811693850193909352600281015492831691840191909152600160a01b82046001600160401b03166060840152600160e01b90910460ff16151560808301526003015460a082015291505b848110156133885786818151811061334e57fe5b6020026020010151600001516001600160a01b031682600001516001600160a01b031614156133805760009250613388565b60010161333a565b5081156133aa5780516001600160a01b03166000908152600460205260408120555b505060010161329f565b508082111561342957805b828110156134275760018054806133d257fe5b60008281526020812060046000199093019283020180546001600160a01b03199081168255600182810180549092169091556002820180546001600160e81b03191690556003909101919091559155016133bf565b505b6000818310613438578161343a565b825b905060005b81811015613634576134ec85828151811061345657fe5b60200260200101516001838154811061346b57fe5b60009182526020918290206040805160c08101825260049390930290910180546001600160a01b0390811684526001820154811694840194909452600281015493841691830191909152600160a01b83046001600160401b03166060830152600160e01b90920460ff161515608082015260039091015460a0820152613ce8565b61360757806001016004600087848151811061350457fe5b6020026020010151600001516001600160a01b03166001600160a01b031681526020019081526020016000208190555084818151811061354057fe5b60200260200101516001828154811061355557fe5b6000918252602091829020835160049092020180546001600160a01b039283166001600160a01b0319918216178255928401516001820180549184169185169190911790556040840151600282018054606087015160808801511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b1995909716929097169190911792909216939093171692909217905560a09091015160039091015561362c565b60006001828154811061361657fe5b9060005260206000209060040201600301819055505b60010161343f565b508282111561375857825b82811015610ef657600185828151811061365557fe5b60209081029190910181015182546001818101855560009485528385208351600493840290910180546001600160a01b039283166001600160a01b03199182161782559585015181840180549184169188169190911790556040850151600282018054606088015160808901511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b199590971692909a169190911792909216939093171695909517905560a0909201516003909301929092558751908401929088908590811061372b57fe5b602090810291909101810151516001600160a01b031682528101919091526040016000205560010161363f565b50505050565b805160009061376f57506000610f66565b6020820151805160001a9060c082101561378e57600092505050610f66565b5060019392505050565b8051600090811a60808110156137b2576000915050610f66565b60b88110806137cd575060c081108015906137cd575060f881105b156137dc576001915050610f66565b60c08110156137f05760b519019050610f66565b60f519019050610f66565b80516000908190811a60808110156138165760019150613927565b60b881101561382b57607e1981019150613927565b60c08110156138a557600060b78203600186019550806020036101000a86510491506001810182019350508083101561389f576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b50613927565b60f88110156138ba5760be1981019150613927565b600060f78203600186019550806020036101000a865104915060018101820193505080831015613925576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b805160009061393f57506000610f66565b600080905060006139538460200151613798565b602085015185519181019250015b8082101561398157613972826137fb565b60019093019290910190613961565b50909392505050565b805160009060151461399b57600080fd5b612d2e82612e71565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff1984166139e857506018613a0c565b6fffffffffffffffffffffffffffffffff198416613a0857506010613a0c565b5060005b6020811015613a4257818181518110613a2157fe5b01602001516001600160f81b03191615613a3a57613a42565b600101613a0c565b60008160200390506060816040519080825280601f01601f191660200182016040528015613a77576020820181803683370190505b5080830196909652508452509192915050565b606081516001148015613abc5750607f60f81b82600081518110613aaa57fe5b01602001516001600160f81b03191611155b15613ac8575080610f66565b612d2e613ada8351608060ff16613b59565b835b6060806040519050835180825260208201818101602087015b81831015613b0d578051835260209283019201613af5565b50855184518101855292509050808201602086015b81831015613b3a578051835260209283019201613b22565b508651929092011591909101601f01601f191660405250905092915050565b6060680100000000000000008310613ba9576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b60408051600180825281830190925260609160208201818036833701905050905060378411613c035782840160f81b81600081518110613be557fe5b60200101906001600160f81b031916908160001a9053509050612d2e565b6060613c0e856139a4565b90508381510160370160f81b82600081518110613c2757fe5b60200101906001600160f81b031916908160001a905350613c488282613adc565b95945050505050565b60008184841115613ce05760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b83811015613ca5578181015183820152602001613c8d565b50505050905090810190601f168015613cd25780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b505050900390565b805182516000916001600160a01b039182169116148015613d22575081602001516001600160a01b031683602001516001600160a01b0316145b8015613d47575081604001516001600160a01b031683604001516001600160a01b0316145b8015610bd85750506060908101519101516001600160401b0390811691161490565b60408051808201909152600081526060602082015290565b6040518060400160405280613d94613dd6565b8152602001600081525090565b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b60405180604001604052806000815260200160008152509056fe746865206e756d626572206f662076616c696461746f72732065786365656420746865206c696d69746475706c696361746520636f6e73656e7375732061646472657373206f662076616c696461746f725365747468652065787069726554696d655365636f6e64476170206973206f7574206f662072616e67656c656e677468206f66206a61696c2076616c696461746f7273206d757374206265206f6e65746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e7472616374666565206973206c6172676572207468616e2044555354595f494e434f4d494e476c656e677468206f662065787069726554696d655365636f6e64476170206d69736d617463686661696c656420746f20706172736520696e69742076616c696461746f72536574f905ec80f905e8f846942a7cdd959bfe8d9487b2a43b33565295a698f7e294b6a7edd747c0554875d3fc531d19ba1497992c5e941ff80f3f7f110ffd8920a3ac38fdef318fe94a3f86048c27395000f846946488aa4d1955ee33403f8ccb1d4de5fb97c7ade294220f003d8bdfaadf52aa1e55ae4cc485e6794875941a87e90e440a39c99aa9cb5cea0ad6a3f0b2407b86048c27395000f846949ef9f4360c606c7ab4db26b016007d3ad0ab86a0946103af86a874b705854033438383c82575f25bc29418e2db06cbff3e3c5f856410a1838649e760175786048c27395000f84694ee01c3b1283aa067c58eab4709f85e99d46de5fe94ee4b9bfb1871c64e2bcabb1dc382dc8b7c4218a29415904ab26ab0e99d70b51c220ccdcccabee6e29786048c27395000f84694685b1ded8013785d6623cc18d214320b6bb6475994a20ef4e5e4e7e36258dbf51f4d905114cb1b34bc9413e39085dc88704f4394d35209a02b1a9520320c86048c27395000f8469478f3adfc719c99674c072166708589033e2d9afe9448a30d5eaa7b64492a160f139e2da2800ec3834e94055838358c29edf4dcc1ba1985ad58aedbb6be2b86048c27395000f84694c2be4ec20253b8642161bc3f444f53679c1f3d479466f50c616d737e60d7ca6311ff0d9c434197898a94d1d678a2506eeaa365056fe565df8bc8659f28b086048c27395000f846942f7be8361c80a4c1e7e9aaf001d0877f1cfde218945f93992ac37f3e61db2ef8a587a436a161fd210b94ecbc4fb1a97861344dad0867ca3cba2b860411f086048c27395000f84694ce2fd7544e0b2cc94692d4a704debef7bcb613289444abc67b4b2fba283c582387f54c9cba7c34bafa948acc2ab395ded08bb75ce85bf0f95ad2abc51ad586048c27395000f84694b8f7166496996a7da21cf1f1b04d9b3e26a3d077946770572763289aac606e4f327c2f6cc1aa3b3e3b94882d745ed97d4422ca8da1c22ec49d880c4c097286048c27395000f846942d4c407bbe49438ed859fe965b140dcf1aab71a9943ad0939e120f33518fbba04631afe7a3ed6327b194b2bbb170ca4e499a2b0f3cc85ebfa6e8c4dfcbea86048c27395000f846946bbad7cf34b5fa511d8e963dbba288b1960e75d694853b0f6c324d1f4e76c8266942337ac1b0af1a229442498946a51ca5924552ead6fc2af08b94fcba648601d1a94a2000f846944430b3230294d12c6ab2aac5c2cd68e80b16b581947b107f4976a252a6939b771202c28e64e03f52d694795811a7f214084116949fc4f53cedbf189eeab28601d1a94a2000f84694ea0a6e3c511bbd10f4519ece37dc24887e11b55d946811ca77acfb221a49393c193f3a22db829fcc8e9464feb7c04830dd9ace164fc5c52b3f5a29e5018a8601d1a94a2000f846947ae2f5b9e386cd1b50a4550696d957cb4900f03a94e83bcc5077e6b873995c24bac871b5ad856047e19464e48d4057a90b233e026c1041e6012ada897fe88601d1a94a2000f8469482012708dafc9e1b880fd083b32182b869be8e09948e5adc73a2d233a1b496ed3115464dd6c7b887509428b383d324bc9a37f4e276190796ba5a8947f5ed8601d1a94a2000f8469422b81f8e175ffde54d797fe11eb03f9e3bf75f1d94a1c3ef7ca38d8ba80cce3bfc53ebd2903ed21658942767f7447f7b9b70313d4147b795414aecea54718601d1a94a2000f8469468bf0b8b6fb4e317a0f9d6f03eaf8ce6675bc60d94675cfe570b7902623f47e7f59c9664b5f5065dcf94d84f0d2e50bcf00f2fc476e1c57f5ca2d57f625b8601d1a94a2000f846948c4d90829ce8f72d0163c1d5cf348a862d5506309485c42a7b34309bee2ed6a235f86d16f059deec5894cc2cedc53f0fa6d376336efb67e43d167169f3b78601d1a94a2000f8469435e7a025f4da968de7e4d7e4004197917f4070f194b1182abaeeb3b4d8eba7e6a4162eac7ace23d57394c4fd0d870da52e73de2dd8ded19fe3d26f43a1138601d1a94a2000f84694d6caa02bbebaebb5d7e581e4b66559e635f805ff94c07335cf083c1c46a487f0325769d88e163b653694efaff03b42e41f953a925fc43720e45fb61a19938601d1a94a2000746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d6573736167652073656e646572206d7573742062652074686520626c6f636b2070726f6475636572746865206d6573736167652073656e646572206d75737420626520736c61736820636f6e7472616374a2646970667358221220f4016eb3755efa2abde797b21f8695280d971b0fea37198122d2e5867516da0464736f6c63430006040033" + }, + "0x0000000000000000000000000000000000001001": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106102275760003560e01c8063831d65d111610130578063c80d4b8f116100b8578063e1c7392a1161007c578063e1c7392a146106d9578063f9a2bbc7146106e1578063fc3e5908146106e9578063fc4333cd146106f1578063fd6a6879146106f957610227565b8063c80d4b8f14610623578063c81b16621461062b578063c8509d8114610633578063c96be4cb146106ab578063dc927faf146106d157610227565b8063a1a11bf5116100ff578063a1a11bf514610531578063a78abc1614610539578063ab51bb9614610555578063ac0af6291461055d578063ac4317511461056557610227565b8063831d65d11461049f57806396713da9146105195780639bc8e4f2146105215780639dc092621461052957610227565b80634bf6c882116101b35780636e47b482116101825780636e47b4821461047757806370fd5bad1461047f57806375d47a0a146104875780637912a65d1461048f5780637942fd051461049757610227565b80634bf6c8821461045757806351e806721461045f578063567a372d1461046757806362b72cf51461046f57610227565b806337c8dab9116101fa57806337c8dab9146103b9578063389f4f71146103f85780633dffc3871461041257806343756e5c14610430578063493279b11461043857610227565b80630bee7a671461022c5780631182b8751461024d57806323bac5a21461033a57806335aa2e4414610380575b600080fd5b610234610701565b6040805163ffffffff9092168252519081900360200190f35b6102c56004803603604081101561026357600080fd5b60ff8235169190810190604081016020820135600160201b81111561028757600080fd5b82018360208201111561029957600080fd5b803590602001918460018302840111600160201b831117156102ba57600080fd5b509092509050610706565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102ff5781810151838201526020016102e7565b50505050905090810190601f16801561032c5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6103606004803603602081101561035057600080fd5b50356001600160a01b03166107da565b604080519384526020840192909252151582820152519081900360600190f35b61039d6004803603602081101561039657600080fd5b50356107fd565b604080516001600160a01b039092168252519081900360200190f35b6103df600480360360208110156103cf57600080fd5b50356001600160a01b0316610824565b6040805192835260208301919091528051918290030190f35b61040061087b565b60408051918252519081900360200190f35b61041a610881565b6040805160ff9092168252519081900360200190f35b61039d610886565b61044061088c565b6040805161ffff9092168252519081900360200190f35b61041a610891565b61039d610896565b61040061089c565b6104006108a2565b61039d6108a8565b61041a6108ae565b61039d6108b3565b6104006108b9565b61041a6108be565b610517600480360360408110156104b557600080fd5b60ff8235169190810190604081016020820135600160201b8111156104d957600080fd5b8201836020820111156104eb57600080fd5b803590602001918460018302840111600160201b8311171561050c57600080fd5b5090925090506108c3565b005b61041a610a1e565b610400610a23565b61039d610a2e565b61039d610a34565b610541610a3a565b604080519115158252519081900360200190f35b610234610a43565b610400610a48565b6105176004803603604081101561057b57600080fd5b810190602081018135600160201b81111561059557600080fd5b8201836020820111156105a757600080fd5b803590602001918460018302840111600160201b831117156105c857600080fd5b919390929091602081019035600160201b8111156105e557600080fd5b8201836020820111156105f757600080fd5b803590602001918460018302840111600160201b8311171561061857600080fd5b509092509050610a4d565b610400610e3b565b61039d610e40565b6105176004803603604081101561064957600080fd5b60ff8235169190810190604081016020820135600160201b81111561066d57600080fd5b82018360208201111561067f57600080fd5b803590602001918460018302840111600160201b831117156106a057600080fd5b509092509050610e46565b610517600480360360208110156106c157600080fd5b50356001600160a01b0316610ef9565b61039d61131e565b610517611324565b61039d611395565b61041a61139b565b6105176113a0565b61039d61182b565b606481565b606033612000146107485760405162461bcd60e51b815260040180806020018281038252602f815260200180612282602f913960400191505060405180910390fd5b60005460ff1661078d576040805162461bcd60e51b815260206004820152601960248201526000805160206122de833981519152604482015290519081900360640190fd5b6040805162461bcd60e51b815260206004820152601e60248201527f7265636569766520756e65787065637465642073796e207061636b6167650000604482015290519081900360640190fd5b600260208190526000918252604090912080546001820154919092015460ff1683565b6001818154811061080a57fe5b6000918252602090912001546001600160a01b0316905081565b60008061082f612146565b5050506001600160a01b0316600090815260026020818152604092839020835160608101855281548082526001830154938201849052919093015460ff16151592909301919091529091565b60055481565b600181565b61100181565b603881565b600881565b61200081565b60045481565b60035481565b61100581565b600281565b61100881565b603281565b600b81565b33612000146109035760405162461bcd60e51b815260040180806020018281038252602f815260200180612282602f913960400191505060405180910390fd5b60005460ff16610948576040805162461bcd60e51b815260206004820152601960248201526000805160206122de833981519152604482015290519081900360640190fd5b610950612169565b600061099184848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061183192505050565b9150915080156109db5781516040805163ffffffff9092168252517f7f0956d47419b9525356e7111652b653b530ec6f5096dccc04589bc38e6299679181900360200190a1610a17565b81516040805163ffffffff9092168252517f7d45f62d17443dd4547bca8a8112c60e2385669318dc300ec61a5d2492f262e79181900360200190a15b5050505050565b600981565b662386f26fc1000081565b61100781565b61100681565b60005460ff1681565b600081565b600481565b60005460ff16610a92576040805162461bcd60e51b815260206004820152601960248201526000805160206122de833981519152604482015290519081900360640190fd5b3361100714610ad25760405162461bcd60e51b815260040180806020018281038252602e81526020018061220d602e913960400191505060405180910390fd5b610b3d84848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260148152731b5a5cd9195b59585b9bdc951a1c995cda1bdb1960621b602082015291506118b19050565b15610c165760208114610b815760405162461bcd60e51b81526004018080602001828103825260278152602001806121b66027913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610bbf9185858083850183828082843760009201919091525061199992505050565b905060018110158015610bd3575060055481105b610c0e5760405162461bcd60e51b815260040180806020018281038252602581526020018061225d6025913960400191505060405180910390fd5b600455610da9565b610c7c84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600f81526e19995b1bdb9e551a1c995cda1bdb19608a1b602082015291506118b19050565b15610d6c5760208114610cc05760405162461bcd60e51b815260040180806020018281038252602281526020018061223b6022913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610cfe9185858083850183828082843760009201919091525061199992505050565b90506103e88111158015610d13575060045481115b610d64576040805162461bcd60e51b815260206004820181905260248201527f7468652066656c6f6e795468726573686f6c64206f7574206f662072616e6765604482015290519081900360640190fd5b600555610da9565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b609681565b61100281565b3361200014610e865760405162461bcd60e51b815260040180806020018281038252602f815260200180612282602f913960400191505060405180910390fd5b60005460ff16610ecb576040805162461bcd60e51b815260206004820152601960248201526000805160206122de833981519152604482015290519081900360640190fd5b6040517f07db600eebe2ac176be8dcebad61858c245a4961bb32ca2aa3d159b09aa0810e90600090a1505050565b334114610f375760405162461bcd60e51b815260040180806020018281038252602d8152602001806122b1602d913960400191505060405180910390fd5b60005460ff16610f7c576040805162461bcd60e51b815260206004820152601960248201526000805160206122de833981519152604482015290519081900360640190fd5b6003544311610fd2576040805162461bcd60e51b815260206004820181905260248201527f63616e206e6f7420736c61736820747769636520696e206f6e6520626c6f636b604482015290519081900360640190fd5b3a1561101c576040805162461bcd60e51b81526020600482015260146024820152736761737072696365206973206e6f74207a65726f60601b604482015290519081900360640190fd5b611024612146565b506001600160a01b0381166000908152600260208181526040928390208351606081018552815481526001820154928101929092529091015460ff16158015928201929092529061107f5760208101805160010190526110d8565b60016040820181905260208201819052805480820182556000919091527fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf60180546001600160a01b0319166001600160a01b0384161790555b4381526005546020820151816110ea57fe5b0661123c57600060208201819052604080516335409f7f60e01b81526001600160a01b03851660048201529051611000926335409f7f926024808201939182900301818387803b15801561113d57600080fd5b505af1158015611151573d6000803e3d6000fd5b505050506120006001600160a01b031663f7a251d7600b6111718561199e565b60006040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156111d15781810151838201526020016111b9565b50505050905090810190601f1680156111fe5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561121f57600080fd5b505af1158015611233573d6000803e3d6000fd5b505050506112b2565b60045481602001518161124b57fe5b066112b257604080516375abf10160e11b81526001600160a01b038416600482015290516110009163eb57e20291602480830192600092919082900301818387803b15801561129957600080fd5b505af11580156112ad573d6000803e3d6000fd5b505050505b6001600160a01b0382166000818152600260208181526040808420865181559186015160018301558581015191909201805460ff1916911515919091179055517fddb6012116e51abf5436d956a4f0ebd927e92c576ff96d7918290c8782291e3e9190a2505043600355565b61100381565b60005460ff161561137c576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b603260045560966005556000805460ff19166001179055565b61100081565b600381565b33611000146113e05760405162461bcd60e51b81526004018080602001828103825260308152602001806121dd6030913960400191505060405180910390fd5b60005460ff16611425576040805162461bcd60e51b815260206004820152601960248201526000805160206122de833981519152604482015290519081900360640190fd5b60015461143157611829565b600154600090600019015b8082116117fd576000805b8284101561156057611457612146565b600260006001878154811061146857fe5b60009182526020808320909101546001600160a01b0316835282810193909352604091820190208151606081018352815481526001820154938101939093526002015460ff16151590820152600554909150600490048160200151111561154a576004600554816114d557fe5b048160200151038160200181815250508060026000600188815481106114f757fe5b6000918252602080832091909101546001600160a01b0316835282810193909352604091820190208351815591830151600183015591909101516002909101805460ff1916911515919091179055611554565b6001925050611560565b50600190930192611447565b8284116116f75761156f612146565b600260006001868154811061158057fe5b60009182526020808320909101546001600160a01b0316835282810193909352604091820190208151606081018352815481526001820154938101939093526002015460ff161515908201526005549091506004900481602001511115611668576004600554816115ed57fe5b0481602001510381602001818152505080600260006001878154811061160f57fe5b6000918252602080832091909101546001600160a01b03168352828101939093526040918201902083518155918301516001808401919091559201516002909101805460ff191691151591909117905591506116f79050565b600260006001868154811061167957fe5b60009182526020808320909101546001600160a01b031683528201929092526040018120818155600181810192909255600201805460ff191690558054806116bd57fe5b600082815260209020810160001990810180546001600160a01b0319169055019055836116ea57506116f7565b5060001990920191611560565b8180156117015750805b156117e057600260006001868154811061171757fe5b60009182526020808320909101546001600160a01b031683528201929092526040018120818155600181810192909255600201805460ff1916905580548490811061175e57fe5b600091825260209091200154600180546001600160a01b03909216918690811061178457fe5b9060005260206000200160006101000a8154816001600160a01b0302191690836001600160a01b0316021790555060018054806117bd57fe5b600082815260209020810160001990810180546001600160a01b03191690550190555b826117ec5750506117fd565b50506001909101906000190161143c565b6040517fcfdb3b6ccaeccbdc68be3c59c840e3b3c90f0a7c491f5fff1cf56cfda200dd9c90600090a150505b565b61100481565b611839612169565b6000611843612169565b61184b61217b565b61185c61185786611a70565b611a95565b90506000805b61186b83611adf565b156118a457806118975761188661188184611b00565b611b4e565b63ffffffff1684526001915061189c565b6118a4565b600101611862565b5091935090915050915091565b6000816040516020018082805190602001908083835b602083106118e65780518252601f1990920191602091820191016118c7565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b602083106119545780518252601f199092019160209182019101611935565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051602081830303815290604052805190602001201490505b92915050565b015190565b60408051600480825260a08201909252606091829190816020015b60608152602001906001900390816119b95790505090506119e2836001600160a01b0316611c05565b816000815181106119ef57fe5b6020026020010181905250611a0343611c28565b81600181518110611a1057fe5b6020908102919091010152611a256038611c28565b81600281518110611a3257fe5b6020026020010181905250611a4642611c28565b81600381518110611a5357fe5b6020026020010181905250611a6781611c3b565b9150505b919050565b611a7861219b565b506040805180820190915281518152602082810190820152919050565b611a9d61217b565b611aa682611cc5565b611aaf57600080fd5b6000611abe8360200151611cff565b60208085015160408051808201909152868152920190820152915050919050565b6000611ae961219b565b505080518051602091820151919092015191011190565b611b0861219b565b611b1182611adf565b611b1a57600080fd5b60208201516000611b2a82611d62565b80830160209586015260408051808201909152908152938401919091525090919050565b805160009015801590611b6357508151602110155b611b6c57600080fd5b6000611b7b8360200151611cff565b90508083600001511015611bd6576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b825160208085015183018051928490039291831015611bfc57826020036101000a820491505b50949350505050565b60408051600560a21b8318601482015260348101909152606090611a6781611e95565b6060611993611c3683611eeb565b611e95565b6060815160001415611c5c5750604080516000815260208101909152611a6b565b606082600081518110611c6b57fe5b602002602001015190506000600190505b8351811015611cac57611ca282858381518110611c9557fe5b6020026020010151611fd1565b9150600101611c7c565b50611a67611cbf825160c060ff1661204e565b82611fd1565b8051600090611cd657506000611a6b565b6020820151805160001a9060c0821015611cf557600092505050611a6b565b5060019392505050565b8051600090811a6080811015611d19576000915050611a6b565b60b8811080611d34575060c08110801590611d34575060f881105b15611d43576001915050611a6b565b60c0811015611d575760b519019050611a6b565b60f519019050611a6b565b80516000908190811a6080811015611d7d5760019150611e8e565b60b8811015611d9257607e1981019150611e8e565b60c0811015611e0c57600060b78203600186019550806020036101000a865104915060018101820193505080831015611e06576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b50611e8e565b60f8811015611e215760be1981019150611e8e565b600060f78203600186019550806020036101000a865104915060018101820193505080831015611e8c576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b606081516001148015611ec75750607f60f81b82600081518110611eb557fe5b01602001516001600160f81b03191611155b15611ed3575080611a6b565b611993611ee58351608060ff1661204e565b83611fd1565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff198416611f2f57506018611f53565b6fffffffffffffffffffffffffffffffff198416611f4f57506010611f53565b5060005b6020811015611f8957818181518110611f6857fe5b01602001516001600160f81b03191615611f8157611f89565b600101611f53565b60008160200390506060816040519080825280601f01601f191660200182016040528015611fbe576020820181803683370190505b5080830196909652508452509192915050565b6060806040519050835180825260208201818101602087015b81831015612002578051835260209283019201611fea565b50855184518101855292509050808201602086015b8183101561202f578051835260209283019201612017565b508651929092011591909101601f01601f191660405250905092915050565b606068010000000000000000831061209e576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116120f85782840160f81b816000815181106120da57fe5b60200101906001600160f81b031916908160001a9053509050611993565b606061210385611eeb565b90508381510160370160f81b8260008151811061211c57fe5b60200101906001600160f81b031916908160001a90535061213d8282611fd1565b95945050505050565b604051806060016040528060008152602001600081526020016000151581525090565b60408051602081019091526000815290565b604051806040016040528061218e61219b565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe6c656e677468206f66206d697364656d65616e6f725468726573686f6c64206d69736d61746368746865206d6573736167652073656e646572206d7573742062652076616c696461746f7253657420636f6e7472616374746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163746c656e677468206f662066656c6f6e795468726573686f6c64206d69736d61746368746865206d697364656d65616e6f725468726573686f6c64206f7574206f662072616e6765746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d6573736167652073656e646572206d7573742062652074686520626c6f636b2070726f647563657274686520636f6e7472616374206e6f7420696e69742079657400000000000000a2646970667358221220978db7b9b4e7fb0a1f9436afd2b57418fb3ba1969a491794d2da96ee68de87d064736f6c63430006040033" + }, + "0x0000000000000000000000000000000000001002": { + "balance": "0x0", + "code": "0x60806040526004361061014f5760003560e01c806396713da9116100b6578063c81b16621161006f578063c81b1662146103dc578063dc927faf146103f1578063f9a2bbc714610406578063fb5478b31461041b578063fc3e590814610430578063fd6a68791461044557610193565b806396713da91461033a5780639a99b4f01461034f5780639dc0926214610388578063a1a11bf51461039d578063a78abc16146103b2578063ab51bb96146103c757610193565b806351e806721161010857806351e806721461028a5780636d70f7ae1461029f5780636e47b482146102e657806370fd5bad146102fb57806375d47a0a146103105780637942fd051461032557610193565b80630bee7a67146101985780633a0b0eff146101c65780633dffc387146101ed57806343756e5c14610218578063493279b1146102495780634bf6c8821461027557610193565b366101935734156101915760408051348152905133917f6c98249d85d88c3753a04a22230f595e4dc8d3dc86c34af35deeeedc861b89db919081900360200190a25b005b600080fd5b3480156101a457600080fd5b506101ad61045a565b6040805163ffffffff9092168252519081900360200190f35b3480156101d257600080fd5b506101db61045f565b60408051918252519081900360200190f35b3480156101f957600080fd5b50610202610465565b6040805160ff9092168252519081900360200190f35b34801561022457600080fd5b5061022d61046a565b604080516001600160a01b039092168252519081900360200190f35b34801561025557600080fd5b5061025e610470565b6040805161ffff9092168252519081900360200190f35b34801561028157600080fd5b50610202610475565b34801561029657600080fd5b5061022d61047a565b3480156102ab57600080fd5b506102d2600480360360208110156102c257600080fd5b50356001600160a01b0316610480565b604080519115158252519081900360200190f35b3480156102f257600080fd5b5061022d61049e565b34801561030757600080fd5b506102026104a4565b34801561031c57600080fd5b5061022d6104a9565b34801561033157600080fd5b506102026104af565b34801561034657600080fd5b506102026104b4565b34801561035b57600080fd5b506101db6004803603604081101561037257600080fd5b506001600160a01b0381351690602001356104b9565b34801561039457600080fd5b5061022d610664565b3480156103a957600080fd5b5061022d61066a565b3480156103be57600080fd5b506102d2610670565b3480156103d357600080fd5b506101ad610679565b3480156103e857600080fd5b5061022d61067e565b3480156103fd57600080fd5b5061022d610684565b34801561041257600080fd5b5061022d61068a565b34801561042757600080fd5b506101db610690565b34801561043c57600080fd5b5061020261069c565b34801561045157600080fd5b5061022d6106a1565b606481565b60015481565b600181565b61100181565b603881565b600881565b61200081565b6001600160a01b031660009081526002602052604090205460ff1690565b61100581565b600281565b61100881565b600b81565b600981565b6000805460ff1661053657600260208190527fe57bda0a954a7c7381b17b2c763e646ba2c60f67292d287ba583603e2c1c41668054600160ff19918216811790925561100560009081527fe25235fc0de9d7165652bef0846fefda506174abb9a190f03d0f7bcc6146dbce80548316841790559282558254161790555b3360009081526002602052604090205460ff166105845760405162461bcd60e51b815260040180806020018281038252602b8152602001806106a8602b913960400191505060405180910390fd5b60004783106105935747610595565b825b9050670de0b6b3a76400008111156105b25750670de0b6b3a76400005b8015610633576040516001600160a01b0385169082156108fc029083906000818181858888f193505050501580156105ee573d6000803e3d6000fd5b506040805182815290516001600160a01b038616917ff8b71c64315fc33b2ead2adfa487955065152a8ac33d9d5193aafd7f45dc15a0919081900360200190a261065d565b6040517fe589651933c2457488cc0d8e0941518abf748e799435e4e396d9c4d0b2db2d4d90600090a15b9392505050565b61100781565b61100681565b60005460ff1681565b600081565b61100281565b61100381565b61100081565b670de0b6b3a764000081565b600381565b6110048156fe6f6e6c79206f70657261746f7220697320616c6c6f77656420746f2063616c6c20746865206d6574686f64a2646970667358221220c09e2c37549a0aef291c4f977743d3cea839c669624a8cfae895b3979a32800764736f6c63430006040033" + }, + "0x0000000000000000000000000000000000001003": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106102115760003560e01c8063a78abc1611610125578063dda83148116100ad578063e405bbc31161007c578063e405bbc314610681578063ea54b2aa14610689578063f9a2bbc714610691578063fc3e590814610699578063fd6a6879146106a157610211565b8063dda8314814610609578063df5fe7041461062f578063e1c7392a14610655578063e2761af01461065d57610211565b8063c81b1662116100f4578063c81b166214610534578063cba510a91461053c578063d816987914610562578063da8d08f0146105db578063dc927faf1461060157610211565b8063a78abc1614610444578063ab51bb9614610460578063ac43175114610468578063adc879e91461052c57610211565b8063564b81ef116101a857806375d47a0a1161017757806375d47a0a1461041c5780637942fd051461042457806396713da91461042c5780639dc0926214610434578063a1a11bf51461043c57610211565b8063564b81ef146102ca5780635c5ae8db146103475780636e47b4821461040c57806370fd5bad1461041457610211565b806343756e5c116101e457806343756e5c14610277578063493279b11461029b5780634bf6c882146102ba57806351e80672146102c257610211565b80630bee7a67146102165780632657e9b61461023757806333f7798d146102515780633dffc38714610259575b600080fd5b61021e6106a9565b6040805163ffffffff9092168252519081900360200190f35b61023f6106ae565b60408051918252519081900360200190f35b61023f6106b9565b6102616106bf565b6040805160ff9092168252519081900360200190f35b61027f6106c4565b604080516001600160a01b039092168252519081900360200190f35b6102a36106ca565b6040805161ffff9092168252519081900360200190f35b6102616106cf565b61027f6106d4565b6102d26106da565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561030c5781810151838201526020016102f4565b50505050905090810190601f1680156103395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b61036d6004803603602081101561035d57600080fd5b50356001600160401b03166107e6565b60405180856001600160401b03166001600160401b0316815260200184815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b838110156103ce5781810151838201526020016103b6565b50505050905090810190601f1680156103fb5780820380516001836020036101000a031916815260200191505b509550505050505060405180910390f35b61027f6108a1565b6102616108a7565b61027f6108ac565b6102616108b2565b6102616108b7565b61027f6108bc565b61027f6108c2565b61044c6108c8565b604080519115158252519081900360200190f35b61021e6108d1565b61052a6004803603604081101561047e57600080fd5b81019060208101813564010000000081111561049957600080fd5b8201836020820111156104ab57600080fd5b803590602001918460018302840111640100000000831117156104cd57600080fd5b9193909290916020810190356401000000008111156104eb57600080fd5b8201836020820111156104fd57600080fd5b8035906020019184600183028401116401000000008311171561051f57600080fd5b5090925090506108d6565b005b61023f610b8f565b61027f610b95565b61023f6004803603602081101561055257600080fd5b50356001600160401b0316610b9b565b61044c6004803603604081101561057857600080fd5b81019060208101813564010000000081111561059357600080fd5b8201836020820111156105a557600080fd5b803590602001918460018302840111640100000000831117156105c757600080fd5b9193509150356001600160401b0316610bba565b61027f600480360360208110156105f157600080fd5b50356001600160401b031661139b565b61027f6113b6565b61027f6004803603602081101561061f57600080fd5b50356001600160401b03166113bc565b61044c6004803603602081101561064557600080fd5b50356001600160401b03166113e0565b61052a611422565b6106656115c9565b604080516001600160401b039092168252519081900360200190f35b6106656115d8565b6102d26115ee565b61027f61160d565b610261611613565b61027f611618565b606481565b662386f26fc1000081565b60055481565b600181565b61100181565b603881565b600881565b61200081565b604080516020808252818301909252606091829190602082018180368337505060045460208301525090506000805b60208160ff16101561075057828160ff168151811061072457fe5b01602001516001600160f81b0319161561074357600190910190610748565b610750565b600101610709565b5060608160ff166040519080825280601f01601f191660200182016040528015610781576020820181803683370190505b50905060005b8260ff168160ff1610156107dd57838160ff16815181106107a457fe5b602001015160f81c60f81b828260ff16815181106107be57fe5b60200101906001600160f81b031916908160001a905350600101610787565b50925050505b90565b60016020818152600092835260409283902080548184015460028084015460038501805489516101009982161599909902600019011692909204601f81018790048702880187019098528787526001600160401b0390931696919592949091908301828280156108975780601f1061086c57610100808354040283529160200191610897565b820191906000526020600020905b81548152906001019060200180831161087a57829003601f168201915b5050505050905084565b61100581565b600281565b61100881565b600b81565b600981565b61100781565b61100681565b60005460ff1681565b600081565b60005460ff1661092d576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e7472616374206e6f7420696e69742079657400000000000000604482015290519081900360640190fd5b336110071461096d5760405162461bcd60e51b815260040180806020018281038252602e8152602001806119ea602e913960400191505060405180910390fd5b6109e184848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152601b81527f726577617264466f7256616c696461746f725365744368616e676500000000006020820152915061161e9050565b15610ac05760208114610a255760405162461bcd60e51b815260040180806020018281038252602e815260200180611989602e913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610a639185858083850183828082843760009201919091525061170592505050565b9050600081118015610a7d5750670de0b6b3a76400008111155b610ab85760405162461bcd60e51b815260040180806020018281038252602f815260200180611a18602f913960400191505060405180910390fd5b600555610afd565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b60045481565b61100281565b6001600160401b03166000908152600160208190526040909120015490565b60408051630a83aaa960e31b815233600482015290516000916110069163541d554891602480820192602092909190829003018186803b158015610bfd57600080fd5b505afa158015610c11573d6000803e3d6000fd5b505050506040513d6020811015610c2757600080fd5b5051610c7a576040805162461bcd60e51b815260206004820152601f60248201527f746865206d73672073656e646572206973206e6f7420612072656c6179657200604482015290519081900360640190fd5b6001600160401b0382166000908152600260205260409020546001600160a01b031615610cee576040805162461bcd60e51b815260206004820152601c60248201527f63616e27742073796e63206475706c6963617465642068656164657200000000604482015290519081900360640190fd5b6003546001600160401b0390811690831611610d3b5760405162461bcd60e51b8152600401808060200182810382526026815260200180611a476026913960400191505060405180910390fd5b600354600160401b90046001600160401b0316610d56611867565b6001600160401b0382811660009081526001602081815260409283902083516080810185528154909516855280830154858301526002808201548686015260038201805486516101009682161596909602600019011691909104601f81018490048402850184019095528484529093606086019392830182828015610e1c5780601f10610df157610100808354040283529160200191610e1c565b820191906000526020600020905b815481529060010190602001808311610dff57829003601f168201915b50505050508152505090505b836001600160401b0316826001600160401b031610158015610e5957506003546001600160401b0390811690831610155b15610f3a5780516001600160401b0380821660009081526001602081815260409283902083516080810185528154909516855280830154858301526002808201548686015260038201805486516101009682161596909602600019011691909104601f8101849004840285018401909552848452959750939460608601939091830182828015610f2a5780601f10610eff57610100808354040283529160200191610f2a565b820191906000526020600020905b815481529060010190602001808311610f0d57829003601f168201915b5050505050815250509050610e28565b6060810151516110315780516001600160401b03811660009081526001602081815260409283902060030180548451600294821615610100026000190190911693909304601f810183900483028401830190945283835293955090929190830182828015610fe95780601f10610fbe57610100808354040283529160200191610fe9565b820191906000526020600020905b815481529060010190602001808311610fcc57829003601f168201915b505050506060830182905250516110315760405162461bcd60e51b81526004018080602001828103825260218152602001806119686021913960400191505060405180910390fd5b6000816060015151608801905060608787905082016040519080825280601f01601f191660200182016040528015611070576020820181803683370190505b509050600061107e8261170a565b905061108c84868386611710565b6110c75760405162461bcd60e51b81526004018080602001828103825260238152602001806119456023913960400191505060405180910390fd5b6000838201915061110d8a8a8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061177c92505050565b9450905061111c818386611786565b8251602001935061112b61188d565b6110008186866064600019fa61114057600080fd5b805194506000600160f81b8616156111cf5750600554604080516309a99b4f60e41b815233600482015260248101929092525160019161100291639a99b4f0916044808201926020929091908290030181600087803b1580156111a257600080fd5b505af11580156111b6573d6000803e3d6000fd5b505050506040513d60208110156111cc57600080fd5b50505b856001600160401b0316955060208201935060006111ef858884156117c7565b90985090506001600160401b03808216908c161461123e5760405162461bcd60e51b81526004018080602001828103825260338152602001806119b76033913960400191505060405180910390fd5b6001600160401b03808c16600081815260026020818152604080842080546001600160a01b031916331790558e86168e529383526001808252928490208d518154961667ffffffffffffffff199096169590951785558c81015192850192909255918b01519183019190915560608a015180518b93926112c59260038501929101906118ac565b50506003546001600160401b03600160401b9091048116908d161115905061130d576003805467ffffffffffffffff60401b1916600160401b6001600160401b038e16021790555b7f4042c1020a8f410fb1c8859d276ab436aeb2c3074960e48467299cf1c966d3b48b8a8a602001518560405180856001600160401b03166001600160401b03168152602001846001600160401b03166001600160401b031681526020018381526020018215151515815260200194505050505060405180910390a15060019c9b505050505050505050505050565b6002602052600090815260409020546001600160a01b031681565b61100381565b6001600160401b03166000908152600260205260409020546001600160a01b031690565b6001600160401b0381166000908152600260205260408120546001600160a01b031615158061141c57506003546001600160401b038381169116145b92915050565b60005460ff161561147a576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b6000806114a16040518061024001604052806102208152602001611a6d610220913961177c565b815160045590925090506114b3611867565b60006114c1848460006117c7565b60008083526001600160401b038281168252600160208181526040938490208651815467ffffffffffffffff191694169390931783558086015191830191909155918401516002820155606084015180519496509294508593909261152d9260038501929101906118ac565b50506003805467ffffffffffffffff19166001600160401b0384811691821767ffffffffffffffff60401b1916600160401b9290920291909117918290556000805460ff19166001179055662386f26fc10000600555602085810151604080519490931684529083015280517f5ac9b37d571677b80957ca05693f371526c602fd08042b416a29fdab7efefa499350918290030190a150505050565b6003546001600160401b031681565b600354600160401b90046001600160401b031681565b6040518061024001604052806102208152602001611a6d610220913981565b61100081565b600381565b61100481565b6000816040516020018082805190602001908083835b602083106116535780518252601f199092019160209182019101611634565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b602083106116c15780518252601f1990920191602091820191016116a2565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012014905092915050565b015190565b60200190565b600084606001515182840103925060008061172e876060015161177c565b909250905061173e828683611786565b5050506040840151601f1983810191909152602090940151603f19830152605f19820192909252600454606719820152910160871990910152600190565b8051602090910191565b5b602081106117a6578251825260209283019290910190601f1901611787565b915181516020939093036101000a6000190180199091169216919091179052565b6117cf611867565b60088401516028850151604890950180519095600092916117ee611867565b6020810183905260408101829052866118595760008060688a036040519080825280601f01601f191660200182016040528015611832576020820181803683370190505b50606084018190526118439061177c565b909250905061185660208c018383611786565b50505b989297509195505050505050565b604080516080810182526000808252602082018190529181019190915260608082015290565b6040518061100001604052806080906020820280368337509192915050565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106118ed57805160ff191683800117855561191a565b8280016001018555821561191a579182015b8281111561191a5782518255916020019190600101906118ff565b5061192692915061192a565b5090565b6107e391905b80821115611926576000815560010161193056fe6661696c656420746f2073657269616c697a6520636f6e73656e7375732073746174656661696c656420746f206c6f61642076616c696461746f722073657420646174616c656e677468206f6620726577617264466f7256616c696461746f725365744368616e6765206d69736d617463686865616465722068656967687420646f65736e277420657175616c20746f207468652073706563696669656420686569676874746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e7472616374746865206e6577526577617264466f7256616c696461746f725365744368616e6765206f7574206f662072616e676563616e27742073796e6320686561646572206265666f726520696e697469616c48656967687442696e616e63652d436861696e2d5469677269730000000000000000000000000000000006915167cedaf7bbf7df47d932fdda630527ee648562cf3e52c5e5f46156a3a971a4ceb443c53a50d8653ef8cf1e5716da68120fb51b636dc6d111ec3277b098ecd42d49d3769d8a1f78b4c17a965f7a30d4181fabbd1f969f46d3c8e83b5ad4845421d8000000e8d4a510002ba4e81542f437b7ae1f8a35ddb233c789a8dc22734377d9b6d63af1ca403b61000000e8d4a51000df8da8c5abfdb38595391308bb71e5a1e0aabdc1d0cf38315d50d6be939b2606000000e8d4a51000b6619edca4143484800281d698b70c935e9152ad57b31d85c05f2f79f64b39f3000000e8d4a510009446d14ad86c8d2d74780b0847110001a1c2e252eedfea4753ebbbfce3a22f52000000e8d4a510000353c639f80cc8015944436dab1032245d44f912edc31ef668ff9f4a45cd0599000000e8d4a51000e81d3797e0544c3a718e1f05f0fb782212e248e784c1a851be87e77ae0db230e000000e8d4a510005e3fcda30bd19d45c4b73688da35e7da1fce7c6859b2c1f20ed5202d24144e3e000000e8d4a51000b06a59a2d75bf5d014fce7c999b5e71e7a960870f725847d4ba3235baeaa08ef000000e8d4a510000c910e2fe650e4e01406b3310b489fb60a84bc3ff5c5bee3a56d5898b6a8af32000000e8d4a5100071f2d7b8ec1c8b99a653429b0118cd201f794f409d0fea4d65b1b662f2b00063000000e8d4a51000a264697066735822122032fc162aed7c2a4fe0f40397d327efc38d8aaf5dbdd1720f7f5d8101877da61d64736f6c63430006040033" + }, + "0x0000000000000000000000000000000000001004": { + "balance": "176405560900000000000000000", + "code": "0x60806040526004361061031e5760003560e01c80639a99b4f0116101ab578063bd466461116100f7578063f014847211610095578063fc1a598f1161006f578063fc1a598f14610cb3578063fc3e590814610686578063fd6a687914610ce6578063ff9c0027146107dc57610366565b8063f014847214610c74578063f9a2bbc714610c89578063fa9e915914610c9e57610366565b8063d9e6dae9116100d1578063d9e6dae914610608578063dc927faf14610c35578063e1c7392a14610c4a578063ebf71d5314610c5f57610366565b8063bd46646114610b68578063c81b166214610b9b578063c8509d8114610bb057610366565b8063aa7415f511610164578063b99328c51161013e578063b99328c514610ad2578063b9fd21e314610b0b578063ba35ead614610b20578063bbface1f14610b3557610366565b8063aa7415f5146109ab578063ab51bb96146109f2578063ac43175114610a0757610366565b80639a99b4f01461091e5780639dc0926214610957578063a1a11bf51461096c578063a496fba214610981578063a78abc1614610996578063a7c9f02d1461068657610366565b8063613684751161026a57806375d47a0a116102235780638b87b21f116101fd5780638b87b21f146105875780638eff336c146108b557806396713da9146108f45780639a854bbd1461090957610366565b806375d47a0a146108065780637942fd051461081b578063831d65d11461083057610366565b8063613684751461060857806366dea52a146106865780636e0565201461069b5780636e47b482146107c757806370fd5bad146107dc57806371d30863146107f157610366565b806343a368b9116102d757806350432d32116102b157806350432d321461061d57806351e806721461063257806359b92789146106475780635d499b1b1461067157610366565b806343a368b9146105c7578063493279b1146105dc5780634bf6c8821461060857610366565b80630bee7a671461036b5780631182b87514610399578063149d14d9146104935780633d713223146104ba5780633dffc3871461058757806343756e5c146105b257610366565b36610366573415610364576040805133815234602082015281517f6c98249d85d88c3753a04a22230f595e4dc8d3dc86c34af35deeeedc861b89db929181900390910190a15b005b600080fd5b34801561037757600080fd5b50610380610cfb565b6040805163ffffffff9092168252519081900360200190f35b3480156103a557600080fd5b5061041e600480360360408110156103bc57600080fd5b60ff8235169190810190604081016020820135600160201b8111156103e057600080fd5b8201836020820111156103f257600080fd5b803590602001918460018302840111600160201b8311171561041357600080fd5b509092509050610d00565b6040805160208082528351818301528351919283929083019185019080838360005b83811015610458578181015183820152602001610440565b50505050905090810190601f1680156104855780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561049f57600080fd5b506104a8610e2e565b60408051918252519081900360200190f35b3480156104c657600080fd5b5061056b600480360360208110156104dd57600080fd5b810190602081018135600160201b8111156104f757600080fd5b82018360208201111561050957600080fd5b803590602001918460018302840111600160201b8311171561052a57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610e34945050505050565b604080516001600160a01b039092168252519081900360200190f35b34801561059357600080fd5b5061059c610e58565b6040805160ff9092168252519081900360200190f35b3480156105be57600080fd5b5061056b610e5d565b3480156105d357600080fd5b506104a8610e63565b3480156105e857600080fd5b506105f1610e6f565b6040805161ffff9092168252519081900360200190f35b34801561061457600080fd5b5061059c610e74565b34801561062957600080fd5b506104a8610e79565b34801561063e57600080fd5b5061056b610e84565b34801561065357600080fd5b5061056b6004803603602081101561066a57600080fd5b5035610e8a565b34801561067d57600080fd5b506104a8610ea5565b34801561069257600080fd5b5061059c610eae565b6107b3600480360360808110156106b157600080fd5b810190602081018135600160201b8111156106cb57600080fd5b8201836020820111156106dd57600080fd5b803590602001918460208302840111600160201b831117156106fe57600080fd5b919390929091602081019035600160201b81111561071b57600080fd5b82018360208201111561072d57600080fd5b803590602001918460208302840111600160201b8311171561074e57600080fd5b919390929091602081019035600160201b81111561076b57600080fd5b82018360208201111561077d57600080fd5b803590602001918460208302840111600160201b8311171561079e57600080fd5b91935091503567ffffffffffffffff16610eb3565b604080519115158252519081900360200190f35b3480156107d357600080fd5b5061056b611388565b3480156107e857600080fd5b5061059c61138e565b3480156107fd57600080fd5b506104a8611393565b34801561081257600080fd5b5061056b611399565b34801561082757600080fd5b5061059c61139f565b34801561083c57600080fd5b506103646004803603604081101561085357600080fd5b60ff8235169190810190604081016020820135600160201b81111561087757600080fd5b82018360208201111561088957600080fd5b803590602001918460018302840111600160201b831117156108aa57600080fd5b5090925090506113a4565b3480156108c157600080fd5b50610364600480360360608110156108d857600080fd5b508035906001600160a01b0360208201351690604001356114ed565b34801561090057600080fd5b5061059c611573565b34801561091557600080fd5b506104a8611578565b34801561092a57600080fd5b506104a86004803603604081101561094157600080fd5b506001600160a01b038135169060200135611584565b34801561096357600080fd5b5061056b6116c2565b34801561097857600080fd5b5061056b6116c8565b34801561098d57600080fd5b5061059c6116ce565b3480156109a257600080fd5b506107b36116d3565b6107b3600480360360808110156109c157600080fd5b5080356001600160a01b03908116916020810135909116906040810135906060013567ffffffffffffffff166116dc565b3480156109fe57600080fd5b506103806116ce565b348015610a1357600080fd5b5061036460048036036040811015610a2a57600080fd5b810190602081018135600160201b811115610a4457600080fd5b820183602082011115610a5657600080fd5b803590602001918460018302840111600160201b83111715610a7757600080fd5b919390929091602081019035600160201b811115610a9457600080fd5b820183602082011115610aa657600080fd5b803590602001918460018302840111600160201b83111715610ac757600080fd5b509092509050611d9d565b348015610ade57600080fd5b5061036460048036036040811015610af557600080fd5b50803590602001356001600160a01b031661200c565b348015610b1757600080fd5b506104a8612082565b348015610b2c57600080fd5b506104a861208c565b348015610b4157600080fd5b506104a860048036036020811015610b5857600080fd5b50356001600160a01b0316612092565b348015610b7457600080fd5b506104a860048036036020811015610b8b57600080fd5b50356001600160a01b03166120a4565b348015610ba757600080fd5b5061056b6120bf565b348015610bbc57600080fd5b5061036460048036036040811015610bd357600080fd5b60ff8235169190810190604081016020820135600160201b811115610bf757600080fd5b820183602082011115610c0957600080fd5b803590602001918460018302840111600160201b83111715610c2a57600080fd5b5090925090506120c5565b348015610c4157600080fd5b5061056b612195565b348015610c5657600080fd5b5061036461219b565b348015610c6b57600080fd5b5061059c61223b565b348015610c8057600080fd5b5061059c612240565b348015610c9557600080fd5b5061056b612245565b348015610caa57600080fd5b506104a861224b565b348015610cbf57600080fd5b5061041e60048036036020811015610cd657600080fd5b50356001600160a01b0316612251565b348015610cf257600080fd5b5061056b612378565b606481565b60005460609060ff16610d48576040805162461bcd60e51b815260206004820152601960248201526000805160206147bf833981519152604482015290519081900360640190fd5b3361200014610d885760405162461bcd60e51b815260040180806020018281038252602f81526020018061476d602f913960400191505060405180910390fd5b60ff841660021415610dda57610dd383838080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061237e92505050565b9050610e27565b6040805162461bcd60e51b815260206004820152601860248201527f756e7265636f676e697a65642073796e207061636b6167650000000000000000604482015290519081900360640190fd5b9392505050565b60015490565b6020818101516000908152600490915260409020546001600160a01b03165b919050565b600181565b61100181565b670de0b6b3a764000081565b603881565b600881565b66071afd498d000081565b61200081565b6000908152600460205260409020546001600160a01b031690565b6402540be40081565b600381565b6000805460ff16610ef9576040805162461bcd60e51b815260206004820152601960248201526000805160206147bf833981519152604482015290519081900360640190fd5b868514610f375760405162461bcd60e51b815260040180806020018281038252603b815260200180614732603b913960400191505060405180910390fd5b868314610f755760405162461bcd60e51b815260040180806020018281038252603f815260200180614605603f913960400191505060405180910390fd5b426078018267ffffffffffffffff161015610fc15760405162461bcd60e51b81526004018080602001828103825260248152602001806144f56024913960400191505060405180910390fd5b6402540be4003406156110055760405162461bcd60e51b81526004018080602001828103825260408152602001806148356040913960400191505060405180910390fd5b60408051868152602080880282010190915285906000908190606090848015611038578160200160208202803683370190505b50905060005b84811015611113576402540be4008b8b8381811061105857fe5b905060200201358161106657fe5b06156110a35760405162461bcd60e51b815260040180806020018281038252603c815260200180614644603c913960400191505060405180910390fd5b6110c88b8b838181106110b257fe5b90506020020135856124a290919063ffffffff16565b93506110f46402540be4008c8c848181106110df57fe5b905060200201356124fc90919063ffffffff16565b82828151811061110057fe5b602090810291909101015260010161103e565b506001546111389061112b908663ffffffff61253e16565b849063ffffffff6124a216565b3410156111765760405162461bcd60e51b81526004018080602001828103825260568152602001806147df6056913960600191505060405180910390fd5b611186348463ffffffff61259716565b915061119061434e565b6040518060c001604052806221272160e91b60001b815260200160006001600160a01b031681526020018381526020018e8e808060200260200160405190810160405280939291908181526020018383602002808284376000920191909152505050908252506040805160208c810282810182019093528c82529283019290918d918d91829185019084908082843760009201919091525050509082525067ffffffffffffffff8916602090910152905061200063f7a251d76003611254846125d9565b611269876402540be40063ffffffff6124fc16565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156112c75781810151838201526020016112af565b50505050905090810190601f1680156112f45780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561131557600080fd5b505af1158015611329573d6000803e3d6000fd5b505060408051600081523360208201528082018890526060810187905290517f74eab09b0e53aefc23f2e1b16da593f95c2dd49c6f5a23720463d10d9c330b2a9350908190036080019150a15060019c9b505050505050505050505050565b61100581565b600281565b60015481565b61100881565b600b81565b60005460ff166113e9576040805162461bcd60e51b815260206004820152601960248201526000805160206147bf833981519152604482015290519081900360640190fd5b33612000146114295760405162461bcd60e51b815260040180806020018281038252602f81526020018061476d602f913960400191505060405180910390fd5b60ff8316600314156114795761147482828080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061289492505050565b6114e8565b7f41ce201247b6ceb957dcdb217d0b8acb50b9ea0e12af9af4f5e7f38902101605838383604051808460ff1660ff168152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f1916909201829003965090945050505050a15b505050565b336110081461152d5760405162461bcd60e51b815260040180806020018281038252602381526020018061479c6023913960400191505060405180910390fd5b600083815260046020908152604080832080546001600160a01b039096166001600160a01b03199096168617905593825260038152838220949094556002909352912055565b600981565b677ce66c50e284000081565b6000805460ff166115ca576040805162461bcd60e51b815260206004820152601960248201526000805160206147bf833981519152604482015290519081900360640190fd5b336110051461160a5760405162461bcd60e51b815260040180806020018281038252602f815260200180614468602f913960400191505060405180910390fd5b6000478310611619574761161b565b825b9050670de0b6b3a76400008111156116375760009150506116bc565b80156116b9576040516001600160a01b0385169082156108fc029083906000818181858888f19350505050158015611673573d6000803e3d6000fd5b50604080516001600160a01b03861681526020810183905281517ff8b71c64315fc33b2ead2adfa487955065152a8ac33d9d5193aafd7f45dc15a0929181900390910190a15b90505b92915050565b61100781565b61100681565b600081565b60005460ff1681565b6000805460ff16611722576040805162461bcd60e51b815260206004820152601960248201526000805160206147bf833981519152604482015290519081900360640190fd5b426078018267ffffffffffffffff16101561176e5760405162461bcd60e51b81526004018080602001828103825260248152602001806144f56024913960400191505060405180910390fd5b6402540be4003406156117b25760405162461bcd60e51b81526004018080602001828103825260408152602001806148356040913960400191505060405180910390fd5b600080806001600160a01b038816611891576001546117d890879063ffffffff6124a216565b3410156118165760405162461bcd60e51b815260040180806020018281038252606181526020018061457f6061913960800191505060405180910390fd5b6402540be40086061561185a5760405162461bcd60e51b815260040180806020018281038252603c815260200180614644603c913960400191505060405180910390fd5b61186a348763ffffffff61259716565b9050611881866402540be40063ffffffff6124fc16565b6221272160e91b93509150611b34565b6001600160a01b0388166000908152600360205260409020549250826118e85760405162461bcd60e51b815260040180806020018281038252603181526020018061454e6031913960400191505060405180910390fd5b6001543410156119295760405162461bcd60e51b815260040180806020018281038252603f8152602001806146a1603f913960400191505060405180910390fd5b506001600160a01b038716600090815260026020526040902054349060088111158061197457506008811180156119745750611972876007198301600a0a63ffffffff6128f016565b155b6119af5760405162461bcd60e51b815260040180806020018281038252603c815260200180614644603c913960400191505060405180910390fd5b6119b98782612932565b92506119c484612972565b15611a0c576305f5e100831015611a0c5760405162461bcd60e51b815260040180806020018281038252603a815260200180614497603a913960400191505060405180910390fd5b600881101580611a265750600881108015611a2657508683115b611a615760405162461bcd60e51b81526004018080602001828103825260258152602001806145e06025913960400191505060405180910390fd5b677ce66c50e2840000831115611aa85760405162461bcd60e51b81526004018080602001828103825260358152602001806145196035913960400191505060405180910390fd5b604080516323b872dd60e01b81523360048201523060248201526044810189905290516001600160a01b038b16916323b872dd9160648083019260209291908290030181600087803b158015611afd57600080fd5b505af1158015611b11573d6000803e3d6000fd5b505050506040513d6020811015611b2757600080fd5b5051611b3257600080fd5b505b611b3c61434e565b6040805160c0810182528581526001600160a01b038b166020820152815160018082528184018452919283019181602001602082028036833750505081526040805160018082528183019092526020928301929091908083019080368337505050815260408051600180825281830190925260209283019290919080830190803683370190505081526020018767ffffffffffffffff168152509050828160400151600081518110611bea57fe5b602002602001018181525050878160600151600081518110611c0857fe5b60200260200101906001600160a01b031690816001600160a01b031681525050338160800151600081518110611c3a57fe5b6001600160a01b039092166020928302919091019091015261200063f7a251d76003611c65846125d9565b611c7a866402540be40063ffffffff6124fc16565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b83811015611cd8578181015183820152602001611cc0565b50505050905090810190601f168015611d055780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b158015611d2657600080fd5b505af1158015611d3a573d6000803e3d6000fd5b5050604080516001600160a01b038d1681523360208201528082018b90526060810186905290517f74eab09b0e53aefc23f2e1b16da593f95c2dd49c6f5a23720463d10d9c330b2a9350908190036080019150a150600198975050505050505050565b3361100714611ddd5760405162461bcd60e51b815260040180806020018281038252602e8152602001806146e0602e913960400191505060405180910390fd5b60208114611e32576040805162461bcd60e51b815260206004820152601b60248201527f65787065637465642076616c7565206c656e6774682069732033320000000000604482015290519081900360640190fd5b606084848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050604080516020601f8801819004810282018101909252868152939450606093925086915085908190840183828082843760009201919091525050505060208301519091506772656c617946656560c01b811415611f3a576020820151670de0b6b3a76400008111801590611ee157506402540be4008106155b611f32576040805162461bcd60e51b815260206004820152601960248201527f7468652072656c6179466565206f7574206f662072616e676500000000000000604482015290519081900360640190fd5b600155611f77565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a878787876040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050505050565b336110081461204c5760405162461bcd60e51b815260040180806020018281038252602381526020018061479c6023913960400191505060405180910390fd5b600091825260046020908152604080842080546001600160a01b03191690556001600160a01b0392909216835260039052812055565b6221272160e91b81565b61c35081565b60026020526000908152604090205481565b6001600160a01b031660009081526003602052604090205490565b61100281565b60005460ff1661210a576040805162461bcd60e51b815260206004820152601960248201526000805160206147bf833981519152604482015290519081900360640190fd5b336120001461214a5760405162461bcd60e51b815260040180806020018281038252602f81526020018061476d602f913960400191505060405180910390fd5b60ff8316600314156114795761147482828080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250612a7892505050565b61100381565b60005460ff16156121f3576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b66071afd498d000060019081556000808052600260205260127fac33ff75c19e70fe83507db0d683fd3465c996598dc972688b7ace676c89077b55805460ff19169091179055565b600481565b600581565b61100081565b61271081565b6001600160a01b03811660009081526003602090815260409182902054825182815280840190935260609290918391906020820181803683375050506020810183905290506000805b60208160ff1610156122e157828160ff16815181106122b557fe5b01602001516001600160f81b031916156122d4576001909101906122d9565b6122e1565b60010161229a565b5060608160ff166040519080825280601f01601f191660200182016040528015612312576020820181803683370190505b50905060005b8260ff168160ff16101561236e57838160ff168151811061233557fe5b602001015160f81c60f81b828260ff168151811061234f57fe5b60200101906001600160f81b031916908160001a905350600101612318565b5095945050505050565b61100481565b606061238861439a565b600061239384612b76565b91509150806123e9576040805162461bcd60e51b815260206004820152601f60248201527f756e7265636f676e697a6564207472616e73666572496e207061636b61676500604482015290519081900360640190fd5b60006123f483612cb5565b905063ffffffff811615612488576040808401516020808601516001600160a01b0316600090815260029091529182205461242f9190612932565b90506124396143cf565b60405180608001604052808660000151815260200183815260200186608001516001600160a01b031681526020018463ffffffff16815250905061247c81613002565b95505050505050610e53565b50506040805160008152602081019091529150610e539050565b6000828201838110156116b9576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b60006116b983836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f0000000000008152506130de565b60008261254d575060006116bc565b8282028284828161255a57fe5b04146116b95760405162461bcd60e51b81526004018080602001828103825260218152602001806146806021913960400191505060405180910390fd5b60006116b983836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f770000815250613180565b60408051600680825260e08201909252606091829190816020015b60608152602001906001900390816125f45750508351909150612616906131da565b8160008151811061262357fe5b602002602001018190525061264483602001516001600160a01b03166131ed565b8160018151811061265157fe5b60200260200101819052506000836040015151905060608160405190808252806020026020018201604052801561269c57816020015b60608152602001906001900390816126875790505b50905060005b828110156126e9576126ca866040015182815181106126bd57fe5b60200260200101516131da565b8282815181106126d657fe5b60209081029190910101526001016126a2565b506126f381613210565b8360028151811061270057fe5b602002602001018190525060608260405190808252806020026020018201604052801561274157816020015b606081526020019060019003908161272c5790505b50905060005b83811015612797576127788760600151828151811061276257fe5b60200260200101516001600160a01b03166131ed565b82828151811061278457fe5b6020908102919091010152600101612747565b506127a181613210565b846003815181106127ae57fe5b60200260200101819052506060836040519080825280602002602001820160405280156127ef57816020015b60608152602001906001900390816127da5790505b50905060005b8481101561282f576128108860800151828151811061276257fe5b82828151811061281c57fe5b60209081029190910101526001016127f5565b5061283981613210565b8560048151811061284657fe5b60200260200101819052506128688760a0015167ffffffffffffffff166131da565b8560058151811061287557fe5b602002602001018190525061288985613210565b979650505050505050565b61289c6143f6565b60006128a78361329a565b91509150806128e75760405162461bcd60e51b815260040180806020018281038252602481526020018061470e6024913960400191505060405180910390fd5b6114e882613465565b60006116b983836040518060400160405280601881526020017f536166654d6174683a206d6f64756c6f206279207a65726f00000000000000008152506138e9565b6000600882111561295b57612954836007198401600a0a63ffffffff6124fc16565b90506116bc565b6116b9836008849003600a0a63ffffffff61253e16565b604080516020808252818301909252600091606091906020820181803683375050506020810184905290506000805b60208160ff1610156129e857828160ff16815181106129bc57fe5b01602001516001600160f81b031916156129db576001909101906129e0565b6129e8565b6001016129a1565b50600860ff82161015612a0057600092505050610e53565b816005820360ff1681518110612a1257fe5b6020910101516001600160f81b031916602d60f81b14612a3757600092505050610e53565b816001820360ff1681518110612a4957fe5b6020910101516001600160f81b031916604d60f81b14612a6e57600092505050610e53565b5060019392505050565b612a8061434e565b6000612a8b8361394b565b9150915080612acb5760405162461bcd60e51b81526004018080602001828103825260248152602001806144d16024913960400191505060405180910390fd5b612ad36143f6565b602080840180516001600160a01b0390811684526040808701518585015291511660009081526002909252812054905b846040015151811015612b5457612b3185604001518281518110612b2357fe5b602002602001015183613bc6565b85604001518281518110612b4157fe5b6020908102919091010152600101612b03565b506080840151604083015260056060830152612b6f82613465565b5050505050565b612b7e61439a565b6000612b8861439a565b612b9061442d565b612ba1612b9c86613bff565b613c24565b90506000805b612bb083613c6e565b15612ca85780612bd257612bcb612bc684613c8f565b613cdd565b8452612ca0565b8060011415612bff57612bec612be784613c8f565b613d94565b6001600160a01b03166020850152612ca0565b8060021415612c1e57612c14612bc684613c8f565b6040850152612ca0565b8060031415612c4657612c33612be784613c8f565b6001600160a01b03166060850152612ca0565b8060041415612c6e57612c5b612be784613c8f565b6001600160a01b03166080850152612ca0565b8060051415612c9b57612c83612bc684613c8f565b67ffffffffffffffff1660a085015260019150612ca0565b612ca8565b600101612ba7565b5091935090915050915091565b60208101516000906001600160a01b0316612dec578160a0015167ffffffffffffffff16421115612ce857506001610e53565b8160400151471015612cfc57506003610e53565b606082015160408084015190516000926001600160a01b0316916127109184818181858888f193505050503d8060008114612d53576040519150601f19603f3d011682016040523d82523d6000602084013e612d58565b606091505b5050905080612d6b575060049050610e53565b7f471eb9cc1ffe55ffadf15b32595415eb9d80f22e761d24bd6dffc607e1284d5983602001518460600151856040015160405180846001600160a01b03166001600160a01b03168152602001836001600160a01b03166001600160a01b03168152602001828152602001935050505060405180910390a15060009050610e53565b8160a0015167ffffffffffffffff16421115612e0a57506001610e53565b81516020808401516001600160a01b031660009081526003909152604090205414612e3757506002610e53565b602080830151604080516370a0823160e01b815230600482015290516000936001600160a01b03909316926370a082319261c3509260248083019392829003018187803b158015612e8757600080fd5b5086fa158015612e9b573d6000803e3d6000fd5b50505050506040513d6020811015612eb257600080fd5b50516040840151909150811015612ecd575060039050610e53565b600083602001516001600160a01b031663a9059cbb61c350866060015187604001516040518463ffffffff1660e01b815260040180836001600160a01b03166001600160a01b0316815260200182815260200192505050602060405180830381600088803b158015612f3e57600080fd5b5087f1158015612f52573d6000803e3d6000fd5b50505050506040513d6020811015612f6957600080fd5b505190508015612ff6577f471eb9cc1ffe55ffadf15b32595415eb9d80f22e761d24bd6dffc607e1284d5984602001518560600151866040015160405180846001600160a01b03166001600160a01b03168152602001836001600160a01b03166001600160a01b03168152602001828152602001935050505060405180910390a15060009150610e539050565b5060059150610e539050565b60408051600480825260a08201909252606091829190816020015b606081526020019060019003908161301d575050835190915061303f906131da565b8160008151811061304c57fe5b602002602001018190525061306483602001516131da565b8160018151811061307157fe5b602002602001018190525061309283604001516001600160a01b03166131ed565b8160028151811061309f57fe5b60200260200101819052506130bd836060015163ffffffff166131da565b816003815181106130ca57fe5b6020026020010181905250610e2781613210565b6000818361316a5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b8381101561312f578181015183820152602001613117565b50505050905090810190601f16801561315c5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b50600083858161317657fe5b0495945050505050565b600081848411156131d25760405162461bcd60e51b815260206004820181815283516024840152835190928392604490910191908501908083836000831561312f578181015183820152602001613117565b505050900390565b60606116bc6131e883613dae565b613e94565b60408051600560a21b8318601482015260348101909152606090610e2781613e94565b60608151600014156132315750604080516000815260208101909152610e53565b60608260008151811061324057fe5b602002602001015190506000600190505b8351811015613281576132778285838151811061326a57fe5b6020026020010151613ee6565b9150600101613251565b50610e27613294825160c060ff16613f63565b82613ee6565b6132a26143f6565b60006132ac6143f6565b6132b461442d565b6132c0612b9c86613bff565b90506000805b6132cf83613c6e565b15612ca857806132f5576132e5612be784613c8f565b6001600160a01b0316845261345d565b806001141561339657606061331161330c85613c8f565b61405b565b9050805160405190808252806020026020018201604052801561333e578160200160208202803683370190505b50602086015260005b815181101561338f5761336c82828151811061335f57fe5b6020026020010151613cdd565b8660200151828151811061337c57fe5b6020908102919091010152600101613347565b505061345d565b80600214156134385760606133ad61330c85613c8f565b905080516040519080825280602002602001820160405280156133da578160200160208202803683370190505b50604086015260005b815181101561338f576134088282815181106133fb57fe5b6020026020010151613d94565b8660400151828151811061341857fe5b6001600160a01b03909216602092830291909101909101526001016133e3565b8060031415612c9b5761344d612bc684613c8f565b63ffffffff166060850152600191505b6001016132c6565b80516001600160a01b031661368f5760005b8160200151518110156136895760008260400151828151811061349657fe5b60200260200101516001600160a01b0316612710846020015184815181106134ba57fe5b60209081029190910101516040516000818181858888f193505050503d8060008114613502576040519150601f19603f3d011682016040523d82523d6000602084013e613507565b606091505b50509050806135ca577f203f9f67a785f4f81be4d48b109aa0c498d1bc8097ecc2627063f480cc5fe73e83600001518460400151848151811061354657fe5b60200260200101518560200151858151811061355e57fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a1613680565b7fd468d4fa5e8fb4adc119b29a983fd0785e04af5cb8b7a3a69a47270c54b6901a83600001518460400151848151811061360057fe5b60200260200101518560200151858151811061361857fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a15b50600101613477565b506138e6565b60005b8160200151518110156138e457600082600001516001600160a01b031663a9059cbb61c350856040015185815181106136c757fe5b6020026020010151866020015186815181106136df57fe5b60200260200101516040518463ffffffff1660e01b815260040180836001600160a01b03166001600160a01b0316815260200182815260200192505050602060405180830381600088803b15801561373657600080fd5b5087f115801561374a573d6000803e3d6000fd5b50505050506040513d602081101561376157600080fd5b505190508015613825577fd468d4fa5e8fb4adc119b29a983fd0785e04af5cb8b7a3a69a47270c54b6901a8360000151846040015184815181106137a157fe5b6020026020010151856020015185815181106137b957fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a16138db565b7f203f9f67a785f4f81be4d48b109aa0c498d1bc8097ecc2627063f480cc5fe73e83600001518460400151848151811061385b57fe5b60200260200101518560200151858151811061387357fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a15b50600101613692565b505b50565b600081836139385760405162461bcd60e51b815260206004820181815283516024840152835190928392604490910191908501908083836000831561312f578181015183820152602001613117565b5082848161394257fe5b06949350505050565b61395361434e565b600061395d61434e565b61396561442d565b613971612b9c86613bff565b90506000805b61398083613c6e565b15613bb8578061399d57613996612bc684613c8f565b8452613bb0565b80600114156139c5576139b2612be784613c8f565b6001600160a01b03166020850152613bb0565b8060021415613a545760606139dc61330c85613c8f565b90508051604051908082528060200260200182016040528015613a09578160200160208202803683370190505b50604086015260005b8151811015613a4d57613a2a82828151811061335f57fe5b86604001518281518110613a3a57fe5b6020908102919091010152600101613a12565b5050613bb0565b8060031415613ae9576060613a6b61330c85613c8f565b90508051604051908082528060200260200182016040528015613a98578160200160208202803683370190505b50606086015260005b8151811015613a4d57613ab98282815181106133fb57fe5b86606001518281518110613ac957fe5b6001600160a01b0390921660209283029190910190910152600101613aa1565b8060041415613b7e576060613b0061330c85613c8f565b90508051604051908082528060200260200182016040528015613b2d578160200160208202803683370190505b50608086015260005b8151811015613a4d57613b4e8282815181106133fb57fe5b86608001518281518110613b5e57fe5b6001600160a01b0390921660209283029190910190910152600101613b36565b8060051415613bab57613b93612bc684613c8f565b67ffffffffffffffff1660a085015260019150613bb0565b613bb8565b600101613977565b509195600195509350505050565b60006008821115613be857612954836007198401600a0a63ffffffff61253e16565b6116b9836008849003600a0a63ffffffff6124fc16565b613c0761444d565b506040805180820190915281518152602082810190820152919050565b613c2c61442d565b613c358261412c565b613c3e57600080fd5b6000613c4d836020015161415c565b60208085015160408051808201909152868152920190820152915050919050565b6000613c7861444d565b505080518051602091820151919092015191011190565b613c9761444d565b613ca082613c6e565b613ca957600080fd5b60208201516000613cb9826141bf565b80830160209586015260408051808201909152908152938401919091525090919050565b805160009015801590613cf257508151602110155b613cfb57600080fd5b6000613d0a836020015161415c565b90508083600001511015613d65576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b825160208085015183018051928490039291831015613d8b57826020036101000a820491505b50949350505050565b8051600090601514613da557600080fd5b6116bc82613cdd565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff198416613df257506018613e16565b6fffffffffffffffffffffffffffffffff198416613e1257506010613e16565b5060005b6020811015613e4c57818181518110613e2b57fe5b01602001516001600160f81b03191615613e4457613e4c565b600101613e16565b60008160200390506060816040519080825280601f01601f191660200182016040528015613e81576020820181803683370190505b5080830196909652508452509192915050565b606081516001148015613ec65750607f60f81b82600081518110613eb457fe5b01602001516001600160f81b03191611155b15613ed2575080610e53565b6116bc613ee48351608060ff16613f63565b835b6060806040519050835180825260208201818101602087015b81831015613f17578051835260209283019201613eff565b50855184518101855292509050808201602086015b81831015613f44578051835260209283019201613f2c565b508651929092011591909101601f01601f191660405250905092915050565b6060680100000000000000008310613fb3576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b6040805160018082528183019092526060916020820181803683370190505090506037841161400d5782840160f81b81600081518110613fef57fe5b60200101906001600160f81b031916908160001a90535090506116bc565b606061401885613dae565b90508381510160370160f81b8260008151811061403157fe5b60200101906001600160f81b031916908160001a9053506140528282613ee6565b95945050505050565b60606140668261412c565b61406f57600080fd5b600061407a836142f2565b90506060816040519080825280602002602001820160405280156140b857816020015b6140a561444d565b81526020019060019003908161409d5790505b50905060006140ca856020015161415c565b60208601510190506000805b84811015614121576140e7836141bf565b915060405180604001604052808381526020018481525084828151811061410a57fe5b6020908102919091010152918101916001016140d6565b509195945050505050565b805160009061413d57506000610e53565b6020820151805160001a9060c0821015612a6e57600092505050610e53565b8051600090811a6080811015614176576000915050610e53565b60b8811080614191575060c08110801590614191575060f881105b156141a0576001915050610e53565b60c08110156141b45760b519019050610e53565b60f519019050610e53565b80516000908190811a60808110156141da57600191506142eb565b60b88110156141ef57607e19810191506142eb565b60c081101561426957600060b78203600186019550806020036101000a865104915060018101820193505080831015614263576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b506142eb565b60f881101561427e5760be19810191506142eb565b600060f78203600186019550806020036101000a8651049150600181018201935050808310156142e9576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b805160009061430357506000610e53565b60008090506000614317846020015161415c565b602085015185519181019250015b8082101561434557614336826141bf565b60019093019290910190614325565b50909392505050565b6040518060c001604052806000801916815260200160006001600160a01b03168152602001606081526020016060815260200160608152602001600067ffffffffffffffff1681525090565b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b60408051608081018252600080825260208201819052918101829052606081019190915290565b604051806080016040528060006001600160a01b031681526020016060815260200160608152602001600063ffffffff1681525090565b604051806040016040528061444061444d565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe746865206d6573736167652073656e646572206d75737420626520696e63656e746976697a6520636f6e7472616374466f72206d696e69546f6b656e2c20746865207472616e7366657220616d6f756e74206d757374206e6f74206265206c657373207468616e2031756e7265636f676e697a6564207472616e736665724f75742073796e207061636b61676565787069726554696d65206d7573742062652074776f206d696e75746573206c61746572616d6f756e7420697320746f6f206c617267652c20657863656564206d6178696d756d206265703220746f6b656e20616d6f756e7474686520636f6e747261637420686173206e6f74206265656e20626f756e6420746f20616e79206265703220746f6b656e726563656976656420424e4220616d6f756e742073686f756c64206265206e6f206c657373207468616e207468652073756d206f66207472616e736665724f757420424e4220616d6f756e7420616e64206d696e696d756d2072656c6179466565616d6f756e7420697320746f6f206c617267652c2075696e74323536206f766572666c6f774c656e677468206f6620726563697069656e74416464727320646f65736e277420657175616c20746f206c656e677468206f6620726566756e644164647273696e76616c6964207472616e7366657220616d6f756e743a20707265636973696f6e206c6f737320696e20616d6f756e7420636f6e76657273696f6e536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77726563656976656420424e4220616d6f756e742073686f756c64206265206e6f206c657373207468616e20746865206d696e696d756d2072656c6179466565746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e7472616374756e7265636f676e697a6564207472616e736665724f75742061636b207061636b6167654c656e677468206f6620726563697069656e74416464727320646f65736e277420657175616c20746f206c656e677468206f6620616d6f756e7473746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d73672073656e646572206d75737420626520746f6b656e4d616e6167657274686520636f6e7472616374206e6f7420696e69742079657400000000000000726563656976656420424e4220616d6f756e742073686f756c64206265206e6f206c657373207468616e207468652073756d206f66207472616e7366657220424e4220616d6f756e7420616e642072656c6179466565696e76616c696420726563656976656420424e4220616d6f756e743a20707265636973696f6e206c6f737320696e20616d6f756e7420636f6e76657273696f6ea26469706673582212200ed3b493edce71fd9235e1f674e51718f846ead6af3ed00debad31d4e3d4dc5f64736f6c63430006040033" + }, + "0x0000000000000000000000000000000000001005": { + "balance": "0x0", + "code": "0x6080604052600436106102765760003560e01c80637e146cc51161014f578063af400681116100c1578063e75d72c71161007a578063e75d72c714610795578063e89a3020146107c8578063f9a2bbc7146107f2578063fc3e590814610807578063fd6a68791461081c578063fdd31fcd146108315761027d565b8063af400681146106ed578063bd4cc83014610717578063c81b166214610741578063dc927faf14610756578063dcae76ab1461076b578063e1c7392a146107805761027d565b8063a3c3c0ad11610113578063a3c3c0ad146105b3578063a78abc16146105c8578063a7c6a59d146105dd578063ab51bb96146105f2578063ac43175114610607578063ace9fcc2146106d85761027d565b80637e146cc51461052c578063930e1b091461054157806396713da9146105745780639dc0926214610589578063a1a11bf51461059e5761027d565b806343756e5c116101e85780636e47b482116101ac5780636e47b482146104645780636f93d2e61461047957806370fd5bad146104d857806374f2272d146104ed57806375d47a0a146105025780637942fd05146105175761027d565b806343756e5c146103e4578063493279b1146103f95780634bf6c8821461042557806351e806721461043a578063541333071461044f5761027d565b806312950c461161023a57806312950c46146103165780631b20087c1461032b5780631c643312146103405780633a975612146102825780633dffc3871461038657806340bb43c0146103b15761027d565b8063081e9d131461028257806308f2ec06146102a9578063093f2fc4146102be5780630bee7a67146102d357806310e06a76146103015761027d565b3661027d57005b600080fd5b34801561028e57600080fd5b50610297610864565b60408051918252519081900360200190f35b3480156102b557600080fd5b50610297610869565b3480156102ca57600080fd5b5061029761086e565b3480156102df57600080fd5b506102e8610873565b6040805163ffffffff9092168252519081900360200190f35b34801561030d57600080fd5b50610297610878565b34801561032257600080fd5b5061029761087e565b34801561033757600080fd5b50610297610884565b34801561034c57600080fd5b5061036a6004803603602081101561036357600080fd5b503561088a565b604080516001600160a01b039092168252519081900360200190f35b34801561039257600080fd5b5061039b610864565b6040805160ff9092168252519081900360200190f35b3480156103bd57600080fd5b50610297600480360360208110156103d457600080fd5b50356001600160a01b03166108b1565b3480156103f057600080fd5b5061036a6108c3565b34801561040557600080fd5b5061040e6108c9565b6040805161ffff9092168252519081900360200190f35b34801561043157600080fd5b5061039b6108ce565b34801561044657600080fd5b5061036a6108d3565b34801561045b57600080fd5b50610297610873565b34801561047057600080fd5b5061036a6108d9565b34801561048557600080fd5b506104c46004803603608081101561049c57600080fd5b506001600160a01b0381358116916020810135909116906040810135906060013515156108df565b604080519115158252519081900360200190f35b3480156104e457600080fd5b5061039b610cad565b3480156104f957600080fd5b50610297610cb2565b34801561050e57600080fd5b5061036a610cb8565b34801561052357600080fd5b5061039b610cbe565b34801561053857600080fd5b50610297610cc3565b34801561054d57600080fd5b506102976004803603602081101561056457600080fd5b50356001600160a01b0316610cc8565b34801561058057600080fd5b5061039b610cda565b34801561059557600080fd5b5061036a610cdf565b3480156105aa57600080fd5b5061036a610ce5565b3480156105bf57600080fd5b50610297610ceb565b3480156105d457600080fd5b506104c4610cf1565b3480156105e957600080fd5b50610297610cfa565b3480156105fe57600080fd5b506102e8610d00565b34801561061357600080fd5b506106d66004803603604081101561062a57600080fd5b81019060208101813564010000000081111561064557600080fd5b82018360208201111561065757600080fd5b8035906020019184600183028401116401000000008311171561067957600080fd5b91939092909160208101903564010000000081111561069757600080fd5b8201836020820111156106a957600080fd5b803590602001918460018302840111640100000000831117156106cb57600080fd5b509092509050610d05565b005b3480156106e457600080fd5b50610297611354565b3480156106f957600080fd5b506102976004803603602081101561071057600080fd5b503561135a565b34801561072357600080fd5b506102976004803603602081101561073a57600080fd5b50356113b5565b34801561074d57600080fd5b5061036a6113ce565b34801561076257600080fd5b5061036a6113d4565b34801561077757600080fd5b506102976113da565b34801561078c57600080fd5b506106d66113e0565b3480156107a157600080fd5b506106d6600480360360208110156107b857600080fd5b50356001600160a01b03166114a9565b3480156107d457600080fd5b5061036a600480360360208110156107eb57600080fd5b5035611602565b3480156107fe57600080fd5b5061036a61160f565b34801561081357600080fd5b5061039b611615565b34801561082857600080fd5b5061036a61161a565b34801561083d57600080fd5b506102976004803603602081101561085457600080fd5b50356001600160a01b0316611620565b600181565b602881565b605081565b606481565b600b5481565b60015481565b600c5481565b6006818154811061089757fe5b6000918252602090912001546001600160a01b0316905081565b60076020526000908152604090205481565b61100181565b603881565b600881565b61200081565b61100581565b6000805460ff16610937576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e7472616374206e6f7420696e69742079657400000000000000604482015290519081900360640190fd5b33612000146109775760405162461bcd60e51b815260040180806020018281038252602f815260200180612164602f913960400191505060405180910390fd5b600082156109fc57604080516309a99b4f60e41b8152611005600482015260248101869052905161100291639a99b4f09160448083019260209291908290030181600087803b1580156109c957600080fd5b505af11580156109dd573d6000803e3d6000fd5b505050506040513d60208110156109f357600080fd5b50519050610a75565b604080516309a99b4f60e41b8152611005600482015260248101869052905161100491639a99b4f09160448083019260209291908290030181600087803b158015610a4657600080fd5b505af1158015610a5a573d6000803e3d6000fd5b505050506040513d6020811015610a7057600080fd5b505190505b600c805460010190556000610a8982611632565b600954909150610a9f908263ffffffff61166116565b600955600a54610ac7908290610abb908563ffffffff61166116565b9063ffffffff6116c216565b600a556001600160a01b038716600090815260056020526040902054610b3357600680546001810182556000919091527ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f0180546001600160a01b0319166001600160a01b0389161790555b6001600160a01b038088166000908152600560209081526040808320805460010190559289168252600790522054610bb157600880546001810182556000919091527ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee30180546001600160a01b0319166001600160a01b0388161790555b6001600160a01b038616600090815260076020526040902080546001019055600c54606411610ca057600b54600954600a5460408051938452602084019290925282820152517f2649b1b772a1a74bd332a67695e285317dd722941166595741c60a00fa65bb759181900360600190a16000610c2b611704565b90506000610c376119e8565b6001600160a01b0389166000908152600d6020526040902054909150610c75908290610c69908563ffffffff61166116565b9063ffffffff61166116565b6001600160a01b0389166000908152600d6020526040812091909155600b80546001019055600c5550505b5060019695505050505050565b600281565b60035481565b61100881565b600b81565b600581565b60056020526000908152604090205481565b600981565b61100781565b61100681565b600a5481565b60005460ff1681565b60045481565b600081565b3361100714610d455760405162461bcd60e51b815260040180806020018281038252602e8152602001806120a9602e913960400191505060405180910390fd5b60005460ff16610d865760405162461bcd60e51b81526004018080602001828103825260218152602001806120d76021913960400191505060405180910390fd5b610dfa84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152601f81527f68656164657252656c61796572526577617264526174654d6f6c6563756c650060208201529150611c669050565b15610ec75760208114610e3e5760405162461bcd60e51b81526004018080602001828103825260328152602001806121936032913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610e7c91858580838501838280828437600092019190915250611d4d92505050565b9050600254811115610ebf5760405162461bcd60e51b8152600401808060200182810382526060815260200180611f826060913960600191505060405180910390fd5b6001556112c2565b610f2084848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805160608101909152602280825290925090506120876020830139611c66565b15610ff85760208114610f645760405162461bcd60e51b815260040180806020018281038252602e815260200180612038602e913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610fa291858580838501838280828437600092019190915250611d4d92505050565b90508015801590610fb557506001548110155b610ff05760405162461bcd60e51b815260040180806020018281038252606c8152602001806120f8606c913960800191505060405180910390fd5b6002556112c2565b61106c84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152601a81527f63616c6c6572436f6d70656e736174696f6e4d6f6c6563756c6500000000000060208201529150611c669050565b1561113957602081146110b05760405162461bcd60e51b815260040180806020018281038252602e815260200180612038602e913960400191505060405180910390fd5b604080516020601f84018190048102820181019092528281526000916110ee91858580838501838280828437600092019190915250611d4d92505050565b90506004548111156111315760405162461bcd60e51b8152600401808060200182810382526056815260200180611fe26056913960600191505060405180910390fd5b6003556112c2565b6111ad84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152601d81527f63616c6c6572436f6d70656e736174696f6e44656e6f6d696e61746f7200000060208201529150611c669050565b1561128557602081146111f15760405162461bcd60e51b815260040180806020018281038252602e815260200180612038602e913960400191505060405180910390fd5b604080516020601f840181900481028201810190925282815260009161122f91858580838501838280828437600092019190915250611d4d92505050565b9050801580159061124257506003548110155b61127d5760405162461bcd60e51b8152600401808060200182810382526061815260200180611f216061913960800191505060405180910390fd5b6004556112c2565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b60025481565b60006028821161136b5750806113b0565b81602810801561137c575060508211155b15611389575060286113b0565b60508211801561139a5750606e8211155b156113aa575060788190036113b0565b50600481045b919050565b6000602882116113c65750806113b0565b5060286113b0565b61100281565b61100381565b60095481565b60005460ff1615611438576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b60005460ff1615611486576040805162461bcd60e51b8152602060048201526013602482015272185b1c9958591e481a5b9a5d1a585b1a5e9959606a1b604482015290519081900360640190fd5b60018080556005600255600381905560506004556000805460ff19169091179055565b6001600160a01b0381166000908152600d602052604090205480611508576040805162461bcd60e51b81526020600482015260116024820152701b9bc81c995b185e595c881c995dd85c99607a1b604482015290519081900360640190fd5b6001600160a01b0382166000818152600d60205260408082208290555184929184156108fc02918591818181858888f193505050506115b85760405161100290819084156108fc029085906000818181858888f19350505050158015611572573d6000803e3d6000fd5b506040805161100281526020810185905281517f24502838a334c8f2bb2ee1f8262a4fa7183e4489a717e96cc824e325f8b39e11929181900390910190a15050506115ff565b604080516001600160a01b03851681526020810184905281517f24502838a334c8f2bb2ee1f8262a4fa7183e4489a717e96cc824e325f8b39e11929181900390910190a150505b50565b6008818154811061089757fe5b61100081565b600381565b61100481565b600d6020526000908152604090205481565b600061165b60025461164f60015485611d5290919063ffffffff16565b9063ffffffff611dab16565b92915050565b6000828201838110156116bb576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b9392505050565b60006116bb83836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f770000815250611ded565b600954600680546040805160208084028201810190925282815260009493859360609383018282801561176057602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311611742575b5050505050905060608151604051908082528060200260200182016040528015611794578160200160208202803683370190505b50905060005b82518110156118215760008382815181106117b157fe5b6020026020010151905060006117eb60056000846001600160a01b03166001600160a01b03168152602001908152602001600020546113b5565b9050808484815181106117fa57fe5b6020908102919091010152611815868263ffffffff61166116565b9550505060010161179a565b50600061183f60045461164f60035488611d5290919063ffffffff16565b9050611851858263ffffffff6116c216565b94508460015b845181101561192857600061188c8761164f8a88868151811061187657fe5b6020026020010151611d5290919063ffffffff16565b90506118d981600d60008986815181106118a257fe5b60200260200101516001600160a01b03166001600160a01b031681526020019081526020016000205461166190919063ffffffff16565b600d60008885815181106118e957fe5b6020908102919091018101516001600160a01b031682528101919091526040016000205561191d838263ffffffff6116c216565b925050600101611857565b5061193e81600d6000876000815181106118a257fe5b600d60008660008151811061194f57fe5b60200260200101516001600160a01b03166001600160a01b0316815260200190815260200160002081905550600060098190555060008090505b84518110156119d057600560008683815181106119a257fe5b6020908102919091018101516001600160a01b03168252810191909152604001600090812055600101611989565b506119dd60066000611ee9565b509450505050505b90565b600a546008805460408051602080840282018101909252828152600094938593606093830182828015611a4457602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311611a26575b5050505050905060608151604051908082528060200260200182016040528015611a78578160200160208202803683370190505b50905060005b8251811015611af7576000838281518110611a9557fe5b602002602001015190506000611acf60076000846001600160a01b03166001600160a01b031681526020019081526020016000205461135a565b905080848481518110611ade57fe5b6020908102919091010152949094019350600101611a7e565b506000611b1560045461164f60035488611d5290919063ffffffff16565b9050611b27858263ffffffff6116c216565b94508460015b8451811015611bb1576000611b4c8761164f8a88868151811061187657fe5b9050611b6281600d60008986815181106118a257fe5b600d6000888581518110611b7257fe5b6020908102919091018101516001600160a01b0316825281019190915260400160002055611ba6838263ffffffff6116c216565b925050600101611b2d565b50611bc781600d6000876000815181106118a257fe5b600d600086600081518110611bd857fe5b60200260200101516001600160a01b03166001600160a01b03168152602001908152602001600020819055506000600a8190555060008090505b8451811015611c595760076000868381518110611c2b57fe5b6020908102919091018101516001600160a01b03168252810191909152604001600090812055600101611c12565b506119dd60086000611ee9565b6000816040516020018082805190602001908083835b60208310611c9b5780518252601f199092019160209182019101611c7c565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b60208310611d095780518252601f199092019160209182019101611cea565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012014905092915050565b015190565b600082611d615750600061165b565b82820282848281611d6e57fe5b04146116bb5760405162461bcd60e51b81526004018080602001828103825260218152602001806120666021913960400191505060405180910390fd5b60006116bb83836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f000000000000815250611e84565b60008184841115611e7c5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b83811015611e41578181015183820152602001611e29565b50505050905090810190601f168015611e6e5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b505050900390565b60008183611ed35760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315611e41578181015183820152602001611e29565b506000838581611edf57fe5b0495945050505050565b50805460008255906000526020600020908101906115ff91906119e591905b80821115611f1c5760008155600101611f08565b509056fe746865206e657743616c6c6572436f6d70656e736174696f6e44656e6f6d696e61746f72206d757374206e6f74206265207a65726f20616e64206e6f206c657373207468616e2063616c6c6572436f6d70656e736174696f6e4d6f6c6563756c656e65772068656164657252656c61796572526577617264526174654d6f6c6563756c652073686f756c646e27742062652067726561746572207468616e2068656164657252656c617965725265776172645261746544656e6f6d696e61746f726e65772063616c6c6572436f6d70656e736174696f6e4d6f6c6563756c652073686f756c646e27742062652067726561746572207468616e2063616c6c6572436f6d70656e736174696f6e44656e6f6d696e61746f726c656e677468206f6620726577617264466f7256616c696461746f725365744368616e6765206d69736d61746368536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f7768656164657252656c617965725265776172645261746544656e6f6d696e61746f72746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e7472616374636f6e747261637420686173206e6f74206265656e20696e697469616c697a6564746865206e65772068656164657252656c617965725265776172645261746544656e6f6d696e61746f72206d757374206e6f74206265207a65726f20616e64206e6f206c657373207468616e2068656164657252656c61796572526577617264526174654d6f6c6563756c65746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e74726163746c656e677468206f662068656164657252656c61796572526577617264526174654d6f6c6563756c65206d69736d61746368a2646970667358221220cd7c1ec8296551de0c54d97959872d6570445d6b1250fde9150f66f3ae10dc7164736f6c63430006040033" + }, + "0x0000000000000000000000000000000000001006": { + "balance": "0x0", + "code": "0x6080604052600436106101c25760003560e01c806395468d26116100f7578063c81b166211610095578063f9a2bbc711610064578063f9a2bbc714610529578063fb7cfdd71461053e578063fc3e590814610553578063fd6a687914610568576101c2565b8063c81b1662146104d5578063dc927faf146104ea578063e1c7392a146104ff578063e79a198f14610514576101c2565b8063a1a11bf5116100d1578063a1a11bf5146103c7578063a78abc16146103dc578063ab51bb96146103f1578063ac43175114610406576101c2565b806395468d261461038857806396713da91461039d5780639dc09262146103b2576101c2565b8063541d55481161016457806370fd5bad1161013e57806370fd5bad1461033457806375d47a0a146103495780637942fd051461035e5780637ae2308814610373576101c2565b8063541d5548146102b15780636a87d780146102f85780636e47b4821461031f576101c2565b806343756e5c116101a057806343756e5c1461022a578063493279b11461025b5780634bf6c8821461028757806351e806721461029c576101c2565b80630bee7a67146101c75780631aa3a008146101f55780633dffc387146101ff575b600080fd5b3480156101d357600080fd5b506101dc61057d565b6040805163ffffffff9092168252519081900360200190f35b6101fd610582565b005b34801561020b57600080fd5b5061021461077d565b6040805160ff9092168252519081900360200190f35b34801561023657600080fd5b5061023f610782565b604080516001600160a01b039092168252519081900360200190f35b34801561026757600080fd5b50610270610788565b6040805161ffff9092168252519081900360200190f35b34801561029357600080fd5b5061021461078d565b3480156102a857600080fd5b5061023f610792565b3480156102bd57600080fd5b506102e4600480360360208110156102d457600080fd5b50356001600160a01b0316610798565b604080519115158252519081900360200190f35b34801561030457600080fd5b5061030d6107b6565b60408051918252519081900360200190f35b34801561032b57600080fd5b5061023f6107bc565b34801561034057600080fd5b506102146107c2565b34801561035557600080fd5b5061023f6107c7565b34801561036a57600080fd5b506102146107cd565b34801561037f57600080fd5b5061030d6107d2565b34801561039457600080fd5b5061030d6107df565b3480156103a957600080fd5b506102146107eb565b3480156103be57600080fd5b5061023f6107f0565b3480156103d357600080fd5b5061023f6107f6565b3480156103e857600080fd5b506102e46107fc565b3480156103fd57600080fd5b506101dc610805565b34801561041257600080fd5b506101fd6004803603604081101561042957600080fd5b81019060208101813564010000000081111561044457600080fd5b82018360208201111561045657600080fd5b8035906020019184600183028401116401000000008311171561047857600080fd5b91939092909160208101903564010000000081111561049657600080fd5b8201836020820111156104a857600080fd5b803590602001918460018302840111640100000000831117156104ca57600080fd5b50909250905061080a565b3480156104e157600080fd5b5061023f610c2c565b3480156104f657600080fd5b5061023f610c32565b34801561050b57600080fd5b506101fd610c38565b34801561052057600080fd5b506101fd610cba565b34801561053557600080fd5b5061023f610e73565b34801561054a57600080fd5b5061030d610e79565b34801561055f57600080fd5b50610214610e7f565b34801561057457600080fd5b5061023f610e84565b606481565b3360009081526004602052604090205460ff16156105df576040805162461bcd60e51b81526020600482015260156024820152741c995b185e595c88185b1c9958591e48195e1a5cdd605a1b604482015290519081900360640190fd5b60005460ff16610632576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b61063b33610e8a565b156106775760405162461bcd60e51b815260040180806020018281038252602781526020018061109c6027913960400191505060405180910390fd5b3332146106c1576040805162461bcd60e51b81526020600482015260136024820152721b9bc81c1c9bde1e481a5cc8185b1b1bddd959606a1b604482015290519081900360640190fd5b60015434146107015760405162461bcd60e51b81526004018080602001828103825260258152602001806110776025913960400191505060405180910390fd5b604080518082018252600180548252600254602080840191825233600081815260038352868120955186559251948401949094556004815290849020805460ff1916909217909155825191825291517fdb33a09d38b59a8fa8b7d92a1d82c8015e99f05f67ae9c9ae623157767959496929181900390910190a1565b600181565b61100181565b603881565b600881565b61200081565b6001600160a01b031660009081526004602052604090205460ff1690565b60025481565b61100581565b600281565b61100881565b600b81565b68056bc75e2d6310000081565b67016345785d8a000081565b600981565b61100781565b61100681565b60005460ff1681565b600081565b60005460ff1661085d576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b336110071461089d5760405162461bcd60e51b815260040180806020018281038252602e8152602001806110c3602e913960400191505060405180910390fd5b61090384848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600f81526e1c995c5d5a5c995911195c1bdcda5d608a1b60208201529150610e909050565b15610a0657602081146109475760405162461bcd60e51b81526004018080602001828103825260228152602001806110f16022913960400191505060405180910390fd5b604080516020601f840181900481028201810190925282815260009161098591858580838501838280828437600092019190915250610f7792505050565b90506001811180156109a05750683635c9adc5dea000008111155b80156109ad575060025481115b6109fe576040805162461bcd60e51b815260206004820181905260248201527f7468652072657175697265644465706f736974206f7574206f662072616e6765604482015290519081900360640190fd5b600155610b9a565b610a6184848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260048152636475657360e01b60208201529150610e909050565b15610b5d5760208114610abb576040805162461bcd60e51b815260206004820152601760248201527f6c656e677468206f662064756573206d69736d61746368000000000000000000604482015290519081900360640190fd5b604080516020601f8401819004810282018101909252828152600091610af991858580838501838280828437600092019190915250610f7792505050565b9050600081118015610b0c575060015481105b610b55576040805162461bcd60e51b81526020600482015260156024820152747468652064756573206f7574206f662072616e676560581b604482015290519081900360640190fd5b600255610b9a565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b61100281565b61100381565b60005460ff1615610c90576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b68056bc75e2d63100000600190815567016345785d8a00006002556000805460ff19169091179055565b3360009081526004602052604090205460ff16610d15576040805162461bcd60e51b81526020600482015260146024820152731c995b185e595c88191bc81b9bdd08195e1a5cdd60621b604482015290519081900360640190fd5b60005460ff16610d68576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b610d7061105c565b5033600081815260036020908152604091829020825180840190935280548084526001909101549183018290529192916108fc91610db4919063ffffffff610f7c16565b6040518115909202916000818181858888f19350505050158015610ddc573d6000803e3d6000fd5b50602081015160405161100291829181156108fc0291906000818181858888f19350505050158015610e12573d6000803e3d6000fd5b50336000818152600460209081526040808320805460ff191690556003825280832083815560010192909255815192835290517fd17202129b83db7880d6b9f25df81c58ad46f7e0e2c92236b1aa10663a4876679281900390910190a15050565b61100081565b60015481565b600381565b61100481565b3b151590565b6000816040516020018082805190602001908083835b60208310610ec55780518252601f199092019160209182019101610ea6565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b60208310610f335780518252601f199092019160209182019101610f14565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012014905092915050565b015190565b6000610fbe83836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f770000815250610fc5565b9392505050565b600081848411156110545760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b83811015611019578181015183820152602001611001565b50505050905090810190601f1680156110465780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b505050900390565b60405180604001604052806000815260200160008152509056fe6465706f7369742076616c7565206973206e6f742065786163746c79207468652073616d65636f6e7472616374206973206e6f7420616c6c6f77656420746f20626520612072656c61796572746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163746c656e677468206f662072657175697265644465706f736974206d69736d61746368a2646970667358221220449507f2557413401b33bd1aa888e40f31523a5d010a891cf7383c0090d7f49a64736f6c63430006040033" + }, + "0x0000000000000000000000000000000000001007": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b506004361061018e5760003560e01c8063831d65d1116100de578063ab51bb9611610097578063dc927faf11610071578063dc927faf14610486578063f9a2bbc71461048e578063fc3e590814610496578063fd6a68791461049e5761018e565b8063ab51bb96146103fc578063c81b166214610404578063c8509d811461040c5761018e565b8063831d65d11461034457806396713da9146103c05780639ab1a373146103c85780639dc09262146103d0578063a1a11bf5146103d8578063a78abc16146103e05761018e565b8063493279b11161014b5780636e47b482116101255780636e47b4821461032457806370fd5bad1461032c57806375d47a0a146103345780637942fd051461033c5761018e565b8063493279b1146102f55780634bf6c8821461031457806351e806721461031c5761018e565b80630bee7a67146101935780631182b875146101b45780633a21baae146102a35780633dffc387146102ab57806343756e5c146102c95780634900c4ea146102ed575b600080fd5b61019b6104a6565b6040805163ffffffff9092168252519081900360200190f35b61022e600480360360408110156101ca57600080fd5b60ff82351691908101906040810160208201356401000000008111156101ef57600080fd5b82018360208201111561020157600080fd5b8035906020019184600183028401116401000000008311171561022357600080fd5b5090925090506104ab565b6040805160208082528351818301528351919283929083019185019080838360005b83811015610268578181015183820152602001610250565b50505050905090810190601f1680156102955780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b61019b61059a565b6102b361059f565b6040805160ff9092168252519081900360200190f35b6102d16105a4565b604080516001600160a01b039092168252519081900360200190f35b6102b36105aa565b6102fd6105af565b6040805161ffff9092168252519081900360200190f35b6102b36105b4565b6102d16105b9565b6102d16105bf565b6102b36105c5565b6102d16105ca565b6102b36105d0565b6103be6004803603604081101561035a57600080fd5b60ff823516919081019060408101602082013564010000000081111561037f57600080fd5b82018360208201111561039157600080fd5b803590602001918460018302840111640100000000831117156103b357600080fd5b5090925090506105d5565b005b6102b3610667565b61019b61066c565b6102d1610671565b6102d1610677565b6103e861067d565b604080519115158252519081900360200190f35b61019b6105aa565b6102d1610686565b6103be6004803603604081101561042257600080fd5b60ff823516919081019060408101602082013564010000000081111561044757600080fd5b82018360208201111561045957600080fd5b8035906020019184600183028401116401000000008311171561047b57600080fd5b50909250905061068c565b6102d1610703565b6102d1610709565b6102b361070f565b6102d1610714565b606481565b606033612000146104ed5760405162461bcd60e51b815260040180806020018281038252602f8152602001806113e7602f913960400191505060405180910390fd5b6104f5611382565b600061053685858080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061071a92505050565b91509150806105525761054960646107e0565b92505050610593565b600061055d8361084a565b905063ffffffff811661058457505060408051600081526020810190915291506105939050565b61058d816107e0565b93505050505b9392505050565b606681565b600181565b61100181565b600081565b603881565b600881565b61200081565b61100581565b600281565b61100881565b600b81565b33612000146106155760405162461bcd60e51b815260040180806020018281038252602f8152602001806113e7602f913960400191505060405180910390fd5b6040805162461bcd60e51b815260206004820152601e60248201527f7265636569766520756e65787065637465642061636b207061636b6167650000604482015290519081900360640190fd5b505050565b600981565b606581565b61100781565b61100681565b60005460ff1681565b61100281565b33612000146106cc5760405162461bcd60e51b815260040180806020018281038252602f8152602001806113e7602f913960400191505060405180910390fd5b60405162461bcd60e51b81526004018080602001828103825260238152602001806114166023913960400191505060405180910390fd5b61100381565b61100081565b600381565b61100481565b610722611382565b600061072c611382565b6107346113ac565b61074561074086610beb565b610c10565b90506000805b61075483610c5a565b156107d357806107765761076f61076a84610c7b565b610cc9565b84526107cb565b80600114156107955761078b61076a84610c7b565b60208501526107cb565b80600214156107c6576107af6107aa84610c7b565b610d42565b6001600160a01b03166040850152600191506107cb565b6107d3565b60010161074b565b5091935090915050915091565b604080516001808252818301909252606091829190816020015b60608152602001906001900390816107fa5790505090506108208363ffffffff16610d62565b8160008151811061082d57fe5b602002602001018190525061084181610d75565b9150505b919050565b60006108598260400151610dff565b6108c557604080516020808252601c908201527f74686520746172676574206973206e6f74206120636f6e7472616374000000008183015290517f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb29181900360600190a1506065610845565b81604001516001600160a01b031663ac431751836000015184602001516040518363ffffffff1660e01b8152600401808060200180602001838103835285818151815260200191508051906020019080838360005b8381101561093257818101518382015260200161091a565b50505050905090810190601f16801561095f5780820380516001836020036101000a031916815260200191505b50838103825284518152845160209182019186019080838360005b8381101561099257818101518382015260200161097a565b50505050905090810190601f1680156109bf5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b1580156109e057600080fd5b505af19250505080156109f1575060015b610be3576040516000815260443d1015610a0d57506000610aaa565b60046000803e60005160e01c6308c379a08114610a2e576000915050610aaa565b60043d036004833e81513d602482011167ffffffffffffffff82111715610a5a57600092505050610aaa565b808301805167ffffffffffffffff811115610a7c576000945050505050610aaa565b8060208301013d8601811115610a9a57600095505050505050610aaa565b601f01601f191660405250925050505b80610ab55750610b58565b7f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2816040518080602001828103825283818151815260200191508051906020019080838360005b83811015610b14578181015183820152602001610afc565b50505050905090810190601f168015610b415780820380516001836020036101000a031916815260200191505b509250505060405180910390a16066915050610845565b3d808015610b82576040519150601f19603f3d011682016040523d82523d6000602084013e610b87565b606091505b5060408051602080825283518183015283517f1279f84165b4fd69c35e1f338ff107231b036c655cd1688851e011ce617c4e8d938593928392918301919085019080838360008315610b14578181015183820152602001610afc565b506000919050565b610bf36113cc565b506040805180820190915281518152602082810190820152919050565b610c186113ac565b610c2182610e05565b610c2a57600080fd5b6000610c398360200151610e3f565b60208085015160408051808201909152868152920190820152915050919050565b6000610c646113cc565b505080518051602091820151919092015191011190565b610c836113cc565b610c8c82610c5a565b610c9557600080fd5b60208201516000610ca582610ea2565b80830160209586015260408051808201909152908152938401919091525090919050565b8051606090610cd757600080fd5b6000610ce68360200151610e3f565b83516040805191839003808352601f19601f8201168301602001909152919250606090828015610d1d576020820181803683370190505b5090506000816020019050610d39848760200151018285610fd5565b50949350505050565b8051600090601514610d5357600080fd5b610d5c82611020565b92915050565b6060610d5c610d70836110d5565b6111bb565b6060815160001415610d965750604080516000815260208101909152610845565b606082600081518110610da557fe5b602002602001015190506000600190505b8351811015610de657610ddc82858381518110610dcf57fe5b602002602001015161120d565b9150600101610db6565b50610841610df9825160c060ff1661128a565b8261120d565b3b151590565b8051600090610e1657506000610845565b6020820151805160001a9060c0821015610e3557600092505050610845565b5060019392505050565b8051600090811a6080811015610e59576000915050610845565b60b8811080610e74575060c08110801590610e74575060f881105b15610e83576001915050610845565b60c0811015610e975760b519019050610845565b60f519019050610845565b80516000908190811a6080811015610ebd5760019150610fce565b60b8811015610ed257607e1981019150610fce565b60c0811015610f4c57600060b78203600186019550806020036101000a865104915060018101820193505080831015610f46576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b50610fce565b60f8811015610f615760be1981019150610fce565b600060f78203600186019550806020036101000a865104915060018101820193505080831015610fcc576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b80610fdf57610662565b5b60208110610fff578251825260209283019290910190601f1901610fe0565b915181516020939093036101000a6000190180199091169216919091179052565b80516000901580159061103557508151602110155b61103e57600080fd5b600061104d8360200151610e3f565b905080836000015110156110a8576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b825160208085015183018051928490039291831015610d3957506020919091036101000a90049392505050565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff1984166111195750601861113d565b6fffffffffffffffffffffffffffffffff1984166111395750601061113d565b5060005b60208110156111735781818151811061115257fe5b01602001516001600160f81b0319161561116b57611173565b60010161113d565b60008160200390506060816040519080825280601f01601f1916602001820160405280156111a8576020820181803683370190505b5080830196909652508452509192915050565b6060815160011480156111ed5750607f60f81b826000815181106111db57fe5b01602001516001600160f81b03191611155b156111f9575080610845565b610d5c61120b8351608060ff1661128a565b835b6060806040519050835180825260208201818101602087015b8183101561123e578051835260209283019201611226565b50855184518101855292509050808201602086015b8183101561126b578051835260209283019201611253565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106112da576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116113345782840160f81b8160008151811061131657fe5b60200101906001600160f81b031916908160001a9053509050610d5c565b606061133f856110d5565b90508381510160370160f81b8260008151811061135857fe5b60200101906001600160f81b031916908160001a905350611379828261120d565b95945050505050565b6040518060600160405280606081526020016060815260200160006001600160a01b031681525090565b60405180604001604052806113bf6113cc565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e74726163747265636569766520756e6578706563746564206661696c2061636b207061636b616765a2646970667358221220a5eefec118f5b467b996b871e3b96c20aa4fe4192a6c4b34c6afc795161d4a9a64736f6c63430006040033" + }, + "0x0000000000000000000000000000000000001008": { + "balance": "0x0", + "code": "0x6080604052600436106102465760003560e01c806375d47a0a11610139578063ab51bb96116100b6578063d9e6dae91161007a578063d9e6dae914610512578063dc927faf14610975578063f9a2bbc71461098a578063fc3e590814610566578063fd6a68791461099f578063fe3a2af5146104e857610246565b8063ab51bb96146108ca578063c81b1662146108df578063c8509d81146107da578063c8e704a414610566578063d117a110146108f457610246565b806395b9ad26116100fd57806395b9ad261461086157806396713da9146108765780639dc092621461088b578063a1a11bf5146108a0578063a78abc16146108b557610246565b806375d47a0a146106fc57806377d9dae8146107115780637942fd05146107c55780637d078e13146103b3578063831d65d1146107da57610246565b80634bc81c00116101c757806366dea52a1161018b57806366dea52a146105665780636b3f13071461057b5780636e47b4821461064357806370fd5bad1461055157806372c4e0861461065857610246565b80634bc81c00146104fd5780634bf6c8821461051257806351e80672146105275780635d499b1b1461053c5780635f558f861461055157610246565b80633dffc3871161020e5780633dffc387146103b357806343756e5c146103c8578063445fcefe146103f9578063493279b1146104bc5780634a688818146104e857610246565b80630bee7a671461024b5780630f212b1b146102795780631182b875146102a45780631f91600b1461039e57806323996b53146103b3575b600080fd5b34801561025757600080fd5b506102606109b4565b6040805163ffffffff9092168252519081900360200190f35b34801561028557600080fd5b5061028e6109b9565b6040805160ff9092168252519081900360200190f35b3480156102b057600080fd5b50610329600480360360408110156102c757600080fd5b60ff8235169190810190604081016020820135600160201b8111156102eb57600080fd5b8201836020820111156102fd57600080fd5b803590602001918460018302840111600160201b8311171561031e57600080fd5b5090925090506109be565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561036357818101518382015260200161034b565b50505050905090810190601f1680156103905780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156103aa57600080fd5b5061028e610a47565b3480156103bf57600080fd5b5061028e610a4c565b3480156103d457600080fd5b506103dd610a51565b604080516001600160a01b039092168252519081900360200190f35b34801561040557600080fd5b506104aa6004803603602081101561041c57600080fd5b810190602081018135600160201b81111561043657600080fd5b82018360208201111561044857600080fd5b803590602001918460018302840111600160201b8311171561046957600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610a57945050505050565b60408051918252519081900360200190f35b3480156104c857600080fd5b506104d1610bb6565b6040805161ffff9092168252519081900360200190f35b3480156104f457600080fd5b5061028e610bbb565b34801561050957600080fd5b5061028e610bc0565b34801561051e57600080fd5b5061028e610bc5565b34801561053357600080fd5b506103dd610bca565b34801561054857600080fd5b506104aa610bd0565b34801561055d57600080fd5b5061028e610bd9565b34801561057257600080fd5b5061028e610bde565b61062f6004803603604081101561059157600080fd5b6001600160a01b038235169190810190604081016020820135600160201b8111156105bb57600080fd5b8201836020820111156105cd57600080fd5b803590602001918460018302840111600160201b831117156105ee57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610be3945050505050565b604080519115158252519081900360200190f35b34801561064f57600080fd5b506103dd611465565b61062f6004803603602081101561066e57600080fd5b810190602081018135600160201b81111561068857600080fd5b82018360208201111561069a57600080fd5b803590602001918460018302840111600160201b831117156106bb57600080fd5b91908080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525092955061146b945050505050565b34801561070857600080fd5b506103dd6118b9565b61062f6004803603604081101561072757600080fd5b6001600160a01b038235169190810190604081016020820135600160201b81111561075157600080fd5b82018360208201111561076357600080fd5b803590602001918460018302840111600160201b8311171561078457600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295506118bf945050505050565b3480156107d157600080fd5b5061028e611dc6565b3480156107e657600080fd5b5061085f600480360360408110156107fd57600080fd5b60ff8235169190810190604081016020820135600160201b81111561082157600080fd5b82018360208201111561083357600080fd5b803590602001918460018302840111600160201b8311171561085457600080fd5b509092509050611dcb565b005b34801561086d57600080fd5b5061028e611e7e565b34801561088257600080fd5b5061028e611e83565b34801561089757600080fd5b506103dd611e88565b3480156108ac57600080fd5b506103dd611e8e565b3480156108c157600080fd5b5061062f611e94565b3480156108d657600080fd5b50610260610bbb565b3480156108eb57600080fd5b506103dd611e9d565b34801561090057600080fd5b5061091e6004803603602081101561091757600080fd5b5035611ea3565b6040805160ff988916815260208101979097526001600160a01b03909516868601526060860193909352608085019190915290931660a083015267ffffffffffffffff90921660c082015290519081900360e00190f35b34801561098157600080fd5b506103dd611efb565b34801561099657600080fd5b506103dd611f01565b3480156109ab57600080fd5b506103dd611f07565b606481565b600681565b60603361200014610a005760405162461bcd60e51b815260040180806020018281038252602f8152602001806132a0602f913960400191505060405180910390fd5b610a3f83838080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250611f0d92505050565b949350505050565b600481565b600181565b61100181565b6020810151600090610a67613168565b50600081815260016020818152604092839020835160e081018552815460ff9081168252938201549281019290925260028101546001600160a01b031693820184905260038101546060830152600481015460808301526005015491821660a082015261010090910467ffffffffffffffff1660c082015290610aef57600092505050610bb1565b600081604001516001600160a01b03166370a082316110046040518263ffffffff1660e01b815260040180826001600160a01b03166001600160a01b0316815260200191505060206040518083038186803b158015610b4d57600080fd5b505afa158015610b61573d6000803e3d6000fd5b505050506040513d6020811015610b7757600080fd5b505160808301516060840151919250600091610b989163ffffffff61219e16565b9050610baa818363ffffffff61219e16565b9450505050505b919050565b603881565b600081565b600581565b600881565b61200081565b6402540be40081565b600281565b600381565b600080610bef836121e7565b9050610bf9613168565b50600081815260016020818152604092839020835160e081018552815460ff90811682529382015492810183905260028201546001600160a01b03169481019490945260038101546060850152600481015460808501526005015491821660a084015261010090910467ffffffffffffffff1660c0830152610cbf576040805162461bcd60e51b815260206004820152601a602482015279189a5b99081c995c5d595cdd08191bd95cdb89dd08195e1a5cdd60321b604482015290519081900360640190fd5b6000610cdc8260800151836060015161219e90919063ffffffff16565b905081604001516001600160a01b0316866001600160a01b031614610d325760405162461bcd60e51b815260040180806020018281038252604581526020018061325b6045913960600191505060405180910390fd5b336001600160a01b0316866001600160a01b031663893d20e86040518163ffffffff1660e01b815260040160206040518083038186803b158015610d7557600080fd5b505afa158015610d89573d6000803e3d6000fd5b505050506040513d6020811015610d9f57600080fd5b50516001600160a01b031614610de65760405162461bcd60e51b815260040180806020018281038252602e8152602001806131f6602e913960400191505060405180910390fd5b604080516370a0823160e01b8152611004600482015290516000916001600160a01b038916916370a0823191602480820192602092909190829003018186803b158015610e3257600080fd5b505afa158015610e46573d6000803e3d6000fd5b505050506040513d6020811015610e5c57600080fd5b505160408051636eb1769f60e11b815233600482015230602482015290519192508391610eed9184916001600160a01b038c169163dd62ed3e916044808301926020929190829003018186803b158015610eb557600080fd5b505afa158015610ec9573d6000803e3d6000fd5b505050506040513d6020811015610edf57600080fd5b50519063ffffffff6121ee16565b1015610f40576040805162461bcd60e51b815260206004820152601760248201527f616c6c6f77616e6365206973206e6f7420656e6f756768000000000000000000604482015290519081900360640190fd5b600034905060006110046001600160a01b031663149d14d96040518163ffffffff1660e01b815260040160206040518083038186803b158015610f8257600080fd5b505afa158015610f96573d6000803e3d6000fd5b505050506040513d6020811015610fac57600080fd5b50519050808210801590610fc557506402540be4008206155b6110005760405162461bcd60e51b81526004018080602001828103825260378152602001806132246037913960400191505060405180910390fd5b600061100c868b612248565b905063ffffffff811661120b576001600160a01b038a166323b872dd3361100461103c898963ffffffff61219e16565b6040518463ffffffff1660e01b815260040180846001600160a01b03166001600160a01b03168152602001836001600160a01b03166001600160a01b031681526020018281526020019350505050602060405180830381600087803b1580156110a457600080fd5b505af11580156110b8573d6000803e3d6000fd5b505050506040513d60208110156110ce57600080fd5b5050602086015160408088015160a089015182516323bfccdb60e21b815260048101949094526001600160a01b03909116602484015260ff1660448301525161100491638eff336c91606480830192600092919082900301818387803b15801561113757600080fd5b505af115801561114b573d6000803e3d6000fd5b50505050896001600160a01b03167f78e7dd9aefcdbf795c4936a66f7dc6d41bb56637b54f561a6bf7829dca3348a88a8860600151886040518080602001848152602001838152602001828103825285818151815260200191508051906020019080838360005b838110156111ca5781810151838201526020016111b2565b50505050905090810190601f1680156111f75780820380516001836020036101000a031916815260200191505b5094505050505060405180910390a26112c3565b896001600160a01b03167f831c0ef4d93bda3bce08b69ae3f29ef1a6e052b833200988554158494405a1078a8360405180806020018363ffffffff1663ffffffff168152602001828103825284818151815260200191508051906020019080838360005b8381101561128757818101518382015260200161126f565b50505050905090810190601f1680156112b45780820380516001836020036101000a031916815260200191505b50935050505060405180910390a25b60008781526001602081905260408220805460ff191681559081018290556002810180546001600160a01b0319169055600381018290556004810191909155600501805468ffffffffffffffffff1916905561131d6131a4565b5060408051808201825263ffffffff831681526020810189905290516110049085156108fc029086906000818181858888f19350505050158015611365573d6000803e3d6000fd5b5061200063f7a251d760016113798461269f565b61138e886402540be40063ffffffff61272916565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156113ec5781810151838201526020016113d4565b50505050905090810190601f1680156114195780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561143a57600080fd5b505af115801561144e573d6000803e3d6000fd5b505050506001985050505050505050505b92915050565b61100581565b600080611477836121e7565b9050611481613168565b50600081815260016020818152604092839020835160e081018552815460ff90811682529382015492810183905260028201546001600160a01b03169481019490945260038101546060850152600481015460808501526005015491821660a084015261010090910467ffffffffffffffff1660c0830152611547576040805162461bcd60e51b815260206004820152601a602482015279189a5b99081c995c5d595cdd08191bd95cdb89dd08195e1a5cdd60321b604482015290519081900360640190fd5b428160c0015167ffffffffffffffff16106115a9576040805162461bcd60e51b815260206004820152601b60248201527f62696e642072657175657374206973206e6f7420657870697265640000000000604482015290519081900360640190fd5b600034905060006110046001600160a01b031663149d14d96040518163ffffffff1660e01b815260040160206040518083038186803b1580156115eb57600080fd5b505afa1580156115ff573d6000803e3d6000fd5b505050506040513d602081101561161557600080fd5b5051905080821080159061162e57506402540be4008206155b6116695760405162461bcd60e51b81526004018080602001828103825260378152602001806132246037913960400191505060405180910390fd5b60008481526001602081905260408220805460ff191681559081018290556002810180546001600160a01b0319169055600381018290556004810191909155600501805468ffffffffffffffffff191690556116c36131a4565b50604080518082018252600181526020810186905290516110049084156108fc029085906000818181858888f19350505050158015611706573d6000803e3d6000fd5b5061200063f7a251d7600161171a8461269f565b61172f876402540be40063ffffffff61272916565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b8381101561178d578181015183820152602001611775565b50505050905090810190601f1680156117ba5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b1580156117db57600080fd5b505af11580156117ef573d6000803e3d6000fd5b5050505083604001516001600160a01b03167f831c0ef4d93bda3bce08b69ae3f29ef1a6e052b833200988554158494405a10788600160405180806020018360ff1663ffffffff168152602001828103825284818151815260200191508051906020019080838360005b83811015611871578181015183820152602001611859565b50505050905090810190601f16801561189e5780820380516001836020036101000a031916815260200191505b50935050505060405180910390a25060019695505050505050565b61100881565b6000806118cb836121e7565b90506118d5613168565b50600081815260016020818152604092839020835160e081018552815460ff90811682529382015492810183905260028201546001600160a01b03169481019490945260038101546060850152600481015460808501526005015491821660a084015261010090910467ffffffffffffffff1660c083015261199b576040805162461bcd60e51b815260206004820152601a602482015279189a5b99081c995c5d595cdd08191bd95cdb89dd08195e1a5cdd60321b604482015290519081900360640190fd5b80604001516001600160a01b0316856001600160a01b0316146119ef5760405162461bcd60e51b815260040180806020018281038252604581526020018061325b6045913960600191505060405180910390fd5b336001600160a01b0316856001600160a01b031663893d20e86040518163ffffffff1660e01b815260040160206040518083038186803b158015611a3257600080fd5b505afa158015611a46573d6000803e3d6000fd5b505050506040513d6020811015611a5c57600080fd5b50516001600160a01b031614611ab9576040805162461bcd60e51b815260206004820152601b60248201527f6f6e6c79206265703230206f776e65722063616e2072656a6563740000000000604482015290519081900360640190fd5b600034905060006110046001600160a01b031663149d14d96040518163ffffffff1660e01b815260040160206040518083038186803b158015611afb57600080fd5b505afa158015611b0f573d6000803e3d6000fd5b505050506040513d6020811015611b2557600080fd5b50519050808210801590611b3e57506402540be4008206155b611b795760405162461bcd60e51b81526004018080602001828103825260378152602001806132246037913960400191505060405180910390fd5b60008481526001602081905260408220805460ff191681559081018290556002810180546001600160a01b0319169055600381018290556004810191909155600501805468ffffffffffffffffff19169055611bd36131a4565b50604080518082018252600781526020810186905290516110049084156108fc029085906000818181858888f19350505050158015611c16573d6000803e3d6000fd5b5061200063f7a251d76001611c2a8461269f565b611c3f876402540be40063ffffffff61272916565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b83811015611c9d578181015183820152602001611c85565b50505050905090810190601f168015611cca5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b158015611ceb57600080fd5b505af1158015611cff573d6000803e3d6000fd5b50505050876001600160a01b03167f831c0ef4d93bda3bce08b69ae3f29ef1a6e052b833200988554158494405a10788600760405180806020018360ff1663ffffffff168152602001828103825284818151815260200191508051906020019080838360005b83811015611d7d578181015183820152602001611d65565b50505050905090810190601f168015611daa5780820380516001836020036101000a031916815260200191505b50935050505060405180910390a2506001979650505050505050565b600b81565b3361200014611e0b5760405162461bcd60e51b815260040180806020018281038252602f8152602001806132a0602f913960400191505060405180910390fd5b7f41ce201247b6ceb957dcdb217d0b8acb50b9ea0e12af9af4f5e7f38902101605838383604051808460ff1660ff168152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f1916909201829003965090945050505050a1505050565b600781565b600981565b61100781565b61100681565b60005460ff1681565b61100281565b600160208190526000918252604090912080549181015460028201546003830154600484015460059094015460ff9586169593946001600160a01b0390931693919291811690610100900467ffffffffffffffff1687565b61100381565b61100081565b61100481565b6060611f17613168565b6000611f228461276b565b9150915080611f78576040805162461bcd60e51b815260206004820152601f60248201527f756e7265636f676e697a6564207472616e73666572496e207061636b61676500604482015290519081900360640190fd5b815160ff1661202c576020828101805160009081526001928390526040908190208551815460ff1990811660ff928316178355935194820194909455908501516002820180546001600160a01b0319166001600160a01b03909216919091179055606085015160038201556080850151600482015560a08501516005909101805460c08701519316919093161768ffffffffffffffff00191661010067ffffffffffffffff90921691909102179055612183565b815160ff16600114156121365760006110046001600160a01b03166359b9278984602001516040518263ffffffff1660e01b81526004018082815260200191505060206040518083038186803b15801561208557600080fd5b505afa158015612099573d6000803e3d6000fd5b505050506040513d60208110156120af57600080fd5b505190506001600160a01b038116156121305760208301516040805163b99328c560e01b815260048101929092526001600160a01b0383166024830152516110049163b99328c591604480830192600092919082900301818387803b15801561211757600080fd5b505af115801561212b573d6000803e3d6000fd5b505050505b50612183565b6040805162461bcd60e51b815260206004820152601960248201527f756e7265636f676e697a65642062696e64207061636b61676500000000000000604482015290519081900360640190fd5b60408051600080825260208201909252905b50949350505050565b60006121e083836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f7700008152506128bd565b9392505050565b6020015190565b6000828201838110156121e0576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b600080826001600160a01b031663313ce5676040518163ffffffff1660e01b815260040160206040518083038186803b15801561228457600080fd5b505afa158015612298573d6000803e3d6000fd5b505050506040513d60208110156122ae57600080fd5b5051604080516395d89b4160e01b815290519192506060916001600160a01b038616916395d89b41916004808301926000929190829003018186803b1580156122f657600080fd5b505afa15801561230a573d6000803e3d6000fd5b505050506040513d6000823e601f3d908101601f19168201604052602081101561233357600080fd5b8101908080516040519392919084600160201b82111561235257600080fd5b90830190602082018581111561236757600080fd5b8251600160201b81118282018810171561238057600080fd5b82525081516020918201929091019080838360005b838110156123ad578181015183820152602001612395565b50505050905090810190601f1680156123da5780820380516001836020036101000a031916815260200191505b5060408181526370a0823160e01b82526110046004830152519495506000946001600160a01b038a1694506370a08231935060248083019350602092829003018186803b15801561242a57600080fd5b505afa15801561243e573d6000803e3d6000fd5b505050506040513d602081101561245457600080fd5b5051608087015160608801519192506000916124759163ffffffff61219e16565b9050428760c0015167ffffffffffffffff16101561249b57506001935061145f92505050565b6124a9838860200151612954565b6124bb57506002935061145f92505050565b808211156124d157506003935061145f92505050565b866060015187604001516001600160a01b03166318160ddd6040518163ffffffff1660e01b815260040160206040518083038186803b15801561251357600080fd5b505afa158015612527573d6000803e3d6000fd5b505050506040513d602081101561253d57600080fd5b50511461255257506004935061145f92505050565b8660a0015160ff16841461256e57506005935061145f92505050565b602080880151604080516359b9278960e01b8152600481019290925251600092611004926359b927899260248083019392829003018186803b1580156125b357600080fd5b505afa1580156125c7573d6000803e3d6000fd5b505050506040513d60208110156125dd57600080fd5b50516001600160a01b031614158061267f57506000801b6110046001600160a01b031663bd46646189604001516040518263ffffffff1660e01b815260040180826001600160a01b03166001600160a01b0316815260200191505060206040518083038186803b15801561265057600080fd5b505afa158015612664573d6000803e3d6000fd5b505050506040513d602081101561267a57600080fd5b505114155b1561269257506006935061145f92505050565b5060009695505050505050565b6040805160028082526060828101909352829190816020015b60608152602001906001900390816126b857505083519091506126e09063ffffffff16612a3c565b816000815181106126ed57fe5b6020026020010181905250612708836020015160001c612a3c565b8160018151811061271557fe5b60200260200101819052506121e081612a4f565b60006121e083836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f000000000000815250612ad9565b612773613168565b600061277d613168565b6127856131bb565b61279661279186612b3e565b612b63565b90506000805b6127a583612bad565b156128b057806127ca576127c06127bb84612bce565b612c1c565b60ff1684526128a8565b80600114156127e9576127df6127bb84612bce565b60208501526128a8565b8060021415612816576128036127fe84612bce565b612cd1565b6001600160a01b031660408501526128a8565b80600314156128355761282b6127bb84612bce565b60608501526128a8565b80600414156128545761284a6127bb84612bce565b60808501526128a8565b8060051415612876576128696127bb84612bce565b60ff1660a08501526128a8565b80600614156128a35761288b6127bb84612bce565b67ffffffffffffffff1660c0850152600191506128a8565b6128b0565b60010161279c565b5091935090915050915091565b6000818484111561294c5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b838110156129115781810151838201526020016128f9565b50505050905090810190601f16801561293e5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b505050900390565b815160009083906008108061296a575080516003115b1561297957600091505061145f565b604080516020808252818301909252606091602082018180368337019050509050836020820152808251815181106129ad57fe5b6020910101516001600160f81b031916602d60f81b146129d25760009250505061145f565b600160005b8351811015612a32578281815181106129ec57fe5b602001015160f81c60f81b6001600160f81b031916848281518110612a0d57fe5b01602001516001600160f81b03191614612a2a5760009150612a32565b6001016129d7565b5095945050505050565b606061145f612a4a83612ceb565b612dd1565b6060815160001415612a705750604080516000815260208101909152610bb1565b606082600081518110612a7f57fe5b602002602001015190506000600190505b8351811015612ac057612ab682858381518110612aa957fe5b6020026020010151612e23565b9150600101612a90565b506121e0612ad3825160c060ff16612ea0565b82612e23565b60008183612b285760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156129115781810151838201526020016128f9565b506000838581612b3457fe5b0495945050505050565b612b466131db565b506040805180820190915281518152602082810190820152919050565b612b6b6131bb565b612b7482612f98565b612b7d57600080fd5b6000612b8c8360200151612fd2565b60208085015160408051808201909152868152920190820152915050919050565b6000612bb76131db565b505080518051602091820151919092015191011190565b612bd66131db565b612bdf82612bad565b612be857600080fd5b60208201516000612bf882613035565b80830160209586015260408051808201909152908152938401919091525090919050565b805160009015801590612c3157508151602110155b612c3a57600080fd5b6000612c498360200151612fd2565b90508083600001511015612ca4576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b82516020808501518301805192849003929183101561219557506020919091036101000a90049392505050565b8051600090601514612ce257600080fd5b61145f82612c1c565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff198416612d2f57506018612d53565b6fffffffffffffffffffffffffffffffff198416612d4f57506010612d53565b5060005b6020811015612d8957818181518110612d6857fe5b01602001516001600160f81b03191615612d8157612d89565b600101612d53565b60008160200390506060816040519080825280601f01601f191660200182016040528015612dbe576020820181803683370190505b5080830196909652508452509192915050565b606081516001148015612e035750607f60f81b82600081518110612df157fe5b01602001516001600160f81b03191611155b15612e0f575080610bb1565b61145f612e218351608060ff16612ea0565b835b6060806040519050835180825260208201818101602087015b81831015612e54578051835260209283019201612e3c565b50855184518101855292509050808201602086015b81831015612e81578051835260209283019201612e69565b508651929092011591909101601f01601f191660405250905092915050565b6060680100000000000000008310612ef0576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b60408051600180825281830190925260609160208201818036833701905050905060378411612f4a5782840160f81b81600081518110612f2c57fe5b60200101906001600160f81b031916908160001a905350905061145f565b6060612f5585612ceb565b90508381510160370160f81b82600081518110612f6e57fe5b60200101906001600160f81b031916908160001a905350612f8f8282612e23565b95945050505050565b8051600090612fa957506000610bb1565b6020820151805160001a9060c0821015612fc857600092505050610bb1565b5060019392505050565b8051600090811a6080811015612fec576000915050610bb1565b60b8811080613007575060c08110801590613007575060f881105b15613016576001915050610bb1565b60c081101561302a5760b519019050610bb1565b60f519019050610bb1565b80516000908190811a60808110156130505760019150613161565b60b881101561306557607e1981019150613161565b60c08110156130df57600060b78203600186019550806020036101000a8651049150600181018201935050808310156130d9576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b50613161565b60f88110156130f45760be1981019150613161565b600060f78203600186019550806020036101000a86510491506001810182019350508083101561315f576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b6040805160e081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c081019190915290565b604080518082019091526000808252602082015290565b60405180604001604052806131ce6131db565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe6f6e6c79206265703230206f776e65722063616e20617070726f766520746869732062696e64207265717565737472656c6179466565206d757374206265204e202a203165313020616e642067726561746572207468616e206d696e6952656c6179466565636f6e74616374206164647265737320646f65736e277420657175616c20746f2074686520636f6e7472616374206164647265737320696e2062696e642072657175657374746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374a264697066735822122030cc6c250f37ad9452c0933399bf3f460a19215bce5c10c377f100761098776a64736f6c63430006040033" + }, + "0x0000000000000000000000000000000000002000": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106102485760003560e01c8063863fe4ab1161013b578063c81b1662116100b8578063e3b048051161007c578063e3b048051461072c578063f7a251d71461074c578063f9a2bbc7146107c4578063fc3e5908146107cc578063fd6a6879146107d457610248565b8063c81b1662146106dd578063d31f968d146106e5578063d76a867514610714578063dc927faf1461071c578063e1c7392a1461072457610248565b8063a78abc16116100ff578063a78abc16146105d3578063ab51bb96146105db578063ac431751146105e3578063b0355f5b146103ff578063c27cdcfb146106a157610248565b8063863fe4ab146105b35780638cc8f561146104b657806396713da9146105bb5780639dc09262146105c3578063a1a11bf5146105cb57610248565b8063493279b1116101c957806370fd5bad1161018d57806370fd5bad146104b657806374f079b8146104be57806375d47a0a146104c65780637942fd05146104ce57806384013b6a146104d657610248565b8063493279b11461045f5780634bf6c8821461047e57806351e80672146104865780636e47a51a1461048e5780636e47b482146104ae57610248565b8063308325f411610210578063308325f4146102cf5780633bdc47a6146102d75780633dffc387146103ff578063422f90501461040757806343756e5c1461043b57610248565b806305e682581461024d5780630bee7a671461026b57806314b3023b1461028c57806322556cdc146102a65780632ff32aea146102ae575b600080fd5b6102556107dc565b6040805160ff9092168252519081900360200190f35b6102736107e1565b6040805163ffffffff9092168252519081900360200190f35b6102946107e6565b60408051918252519081900360200190f35b6102946107ec565b6102b66107f1565b60408051600792830b90920b8252519081900360200190f35b6102946107fa565b61038a600480360360608110156102ed57600080fd5b60ff82351691602081013591810190606081016040820135600160201b81111561031657600080fd5b82018360208201111561032857600080fd5b803590602001918460018302840111600160201b8311171561034957600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610800945050505050565b6040805160208082528351818301528351919283929083019185019080838360005b838110156103c45781810151838201526020016103ac565b50505050905090810190601f1680156103f15780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610255610876565b6104276004803603602081101561041d57600080fd5b503560ff1661087b565b604080519115158252519081900360200190f35b610443610890565b604080516001600160a01b039092168252519081900360200190f35b610467610896565b6040805161ffff9092168252519081900360200190f35b61025561089b565b6104436108a0565b610443600480360360208110156104a457600080fd5b503560ff166108a6565b6104436108c1565b6102556108c7565b6102946108cc565b6104436108d2565b6102556108d8565b6105b1600480360360a08110156104ec57600080fd5b810190602081018135600160201b81111561050657600080fd5b82018360208201111561051857600080fd5b803590602001918460018302840111600160201b8311171561053957600080fd5b919390929091602081019035600160201b81111561055657600080fd5b82018360208201111561056857600080fd5b803590602001918460018302840111600160201b8311171561058957600080fd5b919350915080356001600160401b03908116916020810135909116906040013560ff166108dd565b005b610294611a8d565b610255611a95565b610443611a9a565b610443611aa0565b610427611aa6565b6102736107dc565b6105b1600480360360408110156105f957600080fd5b810190602081018135600160201b81111561061357600080fd5b82018360208201111561062557600080fd5b803590602001918460018302840111600160201b8311171561064657600080fd5b919390929091602081019035600160201b81111561066357600080fd5b82018360208201111561067557600080fd5b803590602001918460018302840111600160201b8311171561069657600080fd5b509092509050611aaf565b6106c1600480360360208110156106b757600080fd5b503560ff16612051565b604080516001600160401b039092168252519081900360200190f35b61044361206c565b610427600480360360408110156106fb57600080fd5b5080356001600160a01b0316906020013560ff16612072565b61038a612092565b6104436120b1565b6105b16120b7565b6106c16004803603602081101561074257600080fd5b503560ff1661246e565b6105b16004803603606081101561076257600080fd5b60ff8235169190810190604081016020820135600160201b81111561078657600080fd5b82018360208201111561079857600080fd5b803590602001918460018302840111600160201b831117156107b957600080fd5b919350915035612489565b6104436125da565b6102556125e0565b6104436125e5565b600081565b606481565b60015481565b603281565b60045460070b81565b60025481565b60606000825160210190506060816040519080825280601f01601f191660200182016040528015610838576020820181803683370190505b506021810186905260018101879052828152905060418101600061085b866125eb565b50905061086a818388516125f5565b50909695505050505050565b600181565b60096020526000908152604090205460ff1681565b61100181565b603881565b600881565b61200081565b6005602052600090815260409020546001600160a01b031681565b61100581565b600281565b60035481565b61100881565b600b81565b60005460ff16610930576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b60408051630a83aaa960e31b815233600482015290516110069163541d5548916024808301926020929190829003018186803b15801561096f57600080fd5b505afa158015610983573d6000803e3d6000fd5b505050506040513d602081101561099957600080fd5b50516109ec576040805162461bcd60e51b815260206004820152601f60248201527f746865206d73672073656e646572206973206e6f7420612072656c6179657200604482015290519081900360640190fd5b60ff8116600090815260086020526040902054829082906001600160401b039081169083168114610a5c576040805162461bcd60e51b815260206004820152601560248201527439b2b8bab2b731b2903737ba1034b71037b93232b960591b604482015290519081900360640190fd5b60ff8216600090815260086020908152604091829020805467ffffffffffffffff1916600185016001600160401b039081169190911790915582516337d7f9c160e21b81529089166004820152915188926110039263df5fe70492602480840193829003018186803b158015610ad157600080fd5b505afa158015610ae5573d6000803e3d6000fd5b505050506040513d6020811015610afb57600080fd5b5051610b385760405162461bcd60e51b8152600401808060200182810382526023815260200180612bcc6023913960400191505060405180910390fd5b60ff851660009081526005602052604090205485906001600160a01b0316610ba7576040805162461bcd60e51b815260206004820152601860248201527f6368616e6e656c206973206e6f7420737570706f727465640000000000000000604482015290519081900360640190fd5b60608c8c8080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050905060608b8b8080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805163cba510a960e01b81526001600160401b038f1660048201529051939450610cce93611003935063cba510a992506024808301926020929190829003018186803b158015610c7757600080fd5b505afa158015610c8b573d6000803e3d6000fd5b505050506040513d6020811015610ca157600080fd5b505160408051808201909152600381526269626360e81b6020820152610cc78c8c612636565b858561267d565b610d16576040805162461bcd60e51b815260206004820152601460248201527334b73b30b634b21036b2b935b63290383937b7b360611b604482015290519081900360640190fd5b60408051631bb5062960e31b81526001600160401b038c16600482015290516000916110039163dda8314891602480820192602092909190829003018186803b158015610d6257600080fd5b505afa158015610d76573d6000803e3d6000fd5b505050506040513d6020811015610d8c57600080fd5b5051905088600080806060610da08861277a565b935093509350935083610e61578460ff168f6001600160401b03167ff7b2e42d694eb1100184aae86d4245d9e46966100b1dc7e723275b98326854ac8a6040518080602001828103825283818151815260200191508051906020019080838360005b83811015610e1a578181015183820152602001610e02565b50505050905090810190601f168015610e475780820380516001836020036101000a031916815260200191505b509250505060405180910390a35050505050505050611a7f565b8460ff168f6001600160401b03167f36afdaf439a8f43fe72135135d804ae620b37a474f0943b5b85f6788312cad4085604051808260ff1660ff16815260200191505060405180910390a360ff83166113ea5760ff85166000818152600560209081526040808320548151631182b87560e01b815260048101958652602481019283528651604482015286516001600160a01b03909216958695631182b875958d958a9593949093606490910192918601918190849084905b83811015610f32578181015183820152602001610f1a565b50505050905090810190601f168015610f5f5780820380516001836020036101000a031916815260200191505b509350505050600060405180830381600087803b158015610f7f57600080fd5b505af192505050801561106357506040513d6000823e601f3d908101601f191682016040526020811015610fb257600080fd5b8101908080516040519392919084600160201b821115610fd157600080fd5b908301906020820185811115610fe657600080fd5b8251600160201b811182820188101715610fff57600080fd5b82525081516020918201929091019080838360005b8381101561102c578181015183820152602001611014565b50505050905090810190601f1680156110595780820380516001836020036101000a031916815260200191505b5060405250505060015b611375576040516000815260443d101561107f5750600061111a565b60046000803e60005160e01c6308c379a081146110a057600091505061111a565b60043d036004833e81513d60248201116001600160401b03821117156110cb5760009250505061111a565b80830180516001600160401b038111156110ec57600094505050505061111a565b8060208301013d860181111561110a5760009550505050505061111a565b601f01601f191660405250925050505b806111255750611237565b60ff871660009081526007602052604081205461115c916001600160401b039091169089906111579060029088610800565b61282a565b60ff8716600090815260076020908152604080832080546001600160401b038082166001011667ffffffffffffffff19909116179055805182815284518184015284516001600160a01b038716947ff91a8f63e5b3e0e89e5f93e1915a7805f3c52d9a73b3c09769785c2c7bf87acf948794849390840192918601918190849084905b838110156111f75781810151838201526020016111df565b50505050905090810190601f1680156112245780820380516001836020036101000a031916815260200191505b509250505060405180910390a250611370565b3d808015611261576040519150601f19603f3d011682016040523d82523d6000602084013e611266565b606091505b5060ff8716600090815260076020526040812054611299916001600160401b039091169089906111579060029088610800565b60ff8716600090815260076020908152604080832080546001600160401b038082166001011667ffffffffffffffff19909116179055805182815284518184015284516001600160a01b038716947f63ac299d6332d1cc4e61b81e59bc00c0ac7c798addadf33840f1307cd2977351948794849390840192918601918190849084905b8381101561133457818101518382015260200161131c565b50505050905090810190601f1680156113615780820380516001836020036101000a031916815260200191505b509250505060405180910390a2505b6113e4565b8051156113e25760ff87166000908152600760205260408120546113ae916001600160401b039091169089906111579060019086610800565b60ff8716600090815260076020526040902080546001600160401b038082166001011667ffffffffffffffff199091161790555b505b506119b8565b60ff83166001141561168e5760ff8516600081815260056020908152604080832054815163831d65d160e01b815260048101958652602481019283528651604482015286516001600160a01b0390921695869563831d65d1958d958a9593949093606490910192918601918190849084905b8381101561147457818101518382015260200161145c565b50505050905090810190601f1680156114a15780820380516001836020036101000a031916815260200191505b509350505050600060405180830381600087803b1580156114c157600080fd5b505af19250505080156114d2575060015b6113e4576040516000815260443d10156114ee57506000611589565b60046000803e60005160e01c6308c379a0811461150f576000915050611589565b60043d036004833e81513d60248201116001600160401b038211171561153a57600092505050611589565b80830180516001600160401b0381111561155b576000945050505050611589565b8060208301013d860181111561157957600095505050505050611589565b601f01601f191660405250925050505b8061159457506115f9565b60408051602080825283518183015283516001600160a01b038616937ff91a8f63e5b3e0e89e5f93e1915a7805f3c52d9a73b3c09769785c2c7bf87acf93869390928392830191850190808383600083156111f75781810151838201526020016111df565b3d808015611623576040519150601f19603f3d011682016040523d82523d6000602084013e611628565b606091505b5060408051602080825283518183015283516001600160a01b038616937f63ac299d6332d1cc4e61b81e59bc00c0ac7c798addadf33840f1307cd2977351938693909283928301918501908083836000831561133457818101518382015260200161131c565b60ff8316600214156119b85760ff8516600081815260056020908152604080832054815163c8509d8160e01b815260048101958652602481019283528651604482015286516001600160a01b0390921695869563c8509d81958d958a9593949093606490910192918601918190849084905b83811015611718578181015183820152602001611700565b50505050905090810190601f1680156117455780820380516001836020036101000a031916815260200191505b509350505050600060405180830381600087803b15801561176557600080fd5b505af1925050508015611776575060015b6119b6576040516000815260443d10156117925750600061182d565b60046000803e60005160e01c6308c379a081146117b357600091505061182d565b60043d036004833e81513d60248201116001600160401b03821117156117de5760009250505061182d565b80830180516001600160401b038111156117ff57600094505050505061182d565b8060208301013d860181111561181d5760009550505050505061182d565b601f01601f191660405250925050505b8061183857506118e1565b816001600160a01b03167ff91a8f63e5b3e0e89e5f93e1915a7805f3c52d9a73b3c09769785c2c7bf87acf826040518080602001828103825283818151815260200191508051906020019080838360005b838110156118a1578181015183820152602001611889565b50505050905090810190601f1680156118ce5780820380516001836020036101000a031916815260200191505b509250505060405180910390a2506119b6565b3d80801561190b576040519150601f19603f3d011682016040523d82523d6000602084013e611910565b606091505b50816001600160a01b03167f63ac299d6332d1cc4e61b81e59bc00c0ac7c798addadf33840f1307cd2977351826040518080602001828103825283818151815260200191508051906020019080838360005b8381101561197a578181015183820152602001611962565b50505050905090810190601f1680156119a75780820380516001836020036101000a031916815260200191505b509250505060405180910390a2505b505b60ff80861660009081526009602052604090205461100591636f93d2e69189913391879116806119ea575060ff881615155b604080516001600160e01b031960e088901b1681526001600160a01b039586166004820152939094166024840152604483019190915215156064820152905160848083019260209291908290030181600087803b158015611a4a57600080fd5b505af1158015611a5e573d6000803e3d6000fd5b505050506040513d6020811015611a7457600080fd5b505050505050505050505b505050505050505050505050565b630100380081565b600981565b61100781565b61100681565b60005460ff1681565b3361100714611aef5760405162461bcd60e51b815260040180806020018281038252602e815260200180612b22602e913960400191505060405180910390fd5b611b5884848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050604080518082019091526012815271626174636853697a65466f724f7261636c6560701b602082015291506129809050565b15611bf357604080516020601f8401819004810282018101909252828152600091611b9b91858580838501838280828437600092019190915250612a6792505050565b90506127108111158015611bb05750600a8110155b611beb5760405162461bcd60e51b8152600401808060200182810382526032815260200180612b9a6032913960400191505060405180910390fd5b600155611fbf565b611c5c84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152601281527118591913dc955c19185d1950da185b9b995b60721b602082015291506129809050565b15611de457606082828080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505082519293505060169091149050611cdf5760405162461bcd60e51b815260040180806020018281038252605a815260200180612ac8605a913960600191505060405180910390fd5b60018101516002820151601683015160ff82161590611cfd81612a6c565b611d4e576040805162461bcd60e51b815260206004820152601960248201527f61646472657373206973206e6f74206120636f6e747261637400000000000000604482015290519081900360640190fd5b60ff8416600081815260056020908152604080832080546001600160a01b0319166001600160a01b038716908117909155808452600683528184208585528352818420805460ff199081166001179091556009909352818420805490931687151517909255519092917f7e3b6af43092577ee20e60eaa1d9b114a7031305c895ee7dd3ffe17196d2e1e091a35050505050611fbf565b611e5184848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050604080518082019091526016815275195b98589b1953dc911a5cd8589b1950da185b9b995b60521b602082015291506129809050565b15611f8257606082828080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505082519293505060029091149050611ed45760405162461bcd60e51b815260040180806020018281038252604a815260200180612b50604a913960600191505060405180910390fd5b600181810151600283015160ff80831660009081526005602052604090205492939192908316909114906001600160a01b03168015611f78576001600160a01b038116600090815260066020908152604080832060ff881680855290835292819020805460ff1916861515908117909155815190815290517fa3132e3f9819fbddc7f0ed6d38d7feef59aa95112090b7c592f5cb5bc4aa4adc929181900390910190a25b5050505050611fbf565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b6008602052600090815260409020546001600160401b031681565b61100281565b600660209081526000928352604080842090915290825290205460ff1681565b6040518060400160405280600381526020016269626360e81b81525081565b61100381565b60005460ff161561210f576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b7f1471eb6eb2c5e789fc3de43f8ce62938c7d1836ec861730447e2ada8fd81017b80546001600160a01b0319908116611008179091557f92e85d02570a8092d09a6e3a57665bc3815a2699a4074001bf1ccabf660f5a36805460ff199081169091557fd8af288fc1c8680b4f4706064cf021e264efb6828fcaf7eb5ca36818eb365bcc8054821660019081179091557f89832631fb3c3307a103ba2c84ab569c64d6182a18893dcd163f0f1c2090733a805484166110049081179091557f6cde3cea4b3a3fb2488b2808bae7556f4a405e50f65e1794383bc026131b13c38054841690557f72e4efa1513b071517c6c74dba31b5934a81aa83cddd400e7081df5529c9943680548416831790557fa9bc9a3a348c357ba16b37005d7e6b3236198c0e939f4af8c5f19b8deeb8ebc08054851690911790557fc575c31fea594a6eb97c8e9d3f9caee4c16218c6ef37e923234c0fe9014a61e78054831690557f4e523af77f034e9810f1c94057f5e931fb3d16a51511a4c3add793617d18610580548316821790557ffb33122aa9f93cc639ebe80a7bc4784c11e6053dde89c6f4f7e268c6a623da1e805484166110001790557fc7694af312c4f286114180fd0ba6a52461fcee8a381636770b19a343af92538a80548316821790557f01112dd68e482ba8d68a7e828cff8b3abcea08eab88941953c180a7e650e9cd480548316821790557fc0a4a8be475dfebc377ebef2d7c4ff47656f572a08dd92b81017efcdba0febe1805484166110071790557f87e8a52529e8ece4ef759037313542a6429ff494a9fab9027fb79db90124eba680548316821790557f4c7666bbcb22d46469f7cc282f70764a7012dca2cce630ff8d83db9a9cdd48f080548316821790557f40f28f99a40bc9f6beea1013afdbc3cdcc689eb76b82c4de06c0acf1e1932ed58054909316611001179092557f0d9cf2cd531699eed8dd34e40ff2884a14a698c4898184fba85194e6f6772d248054821683179055600b60009081527f23f68c9bd22b8a93d06adabe17481c87c016bcbd20adc8bfd707a4d813a572176020527fdf0d5d05428057f5455c2dc8e810dd86d1e9350faa72f16bda8a45443c5b39328054831684179055603283556004805467ffffffffffffffff19166001600160401b031790556002819055600381905580549091169091179055565b6007602052600090815260409020546001600160401b031681565b60005460ff166124dc576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b33600090815260066020908152604080832060ff80891685529252909120548591166125395760405162461bcd60e51b8152600401808060200182810382526031815260200180612a976031913960400191505060405180910390fd5b60ff85166000908152600760209081526040808320548151601f88018490048402810184019092528682526001600160401b03169261259e9284928a9261115792909189918c908c908190840183828082843760009201919091525061080092505050565b60ff959095166000908152600760205260409020805467ffffffffffffffff191660019096016001600160401b03169590951790945550505050565b61100081565b600381565b61100481565b8051602090910191565b5b60208110612615578251825260209283019290910190601f19016125f6565b915181516020939093036101000a6000190180199091169216919091179052565b60408051600e808252818301909252606091630100380060ff851617918391602082018180368337505050600e818101969096526006810192909252509283525090919050565b60008561268c57506000612771565b606082518451865160800101016040519080825280601f01601f1916602001820160405280156126c3576020820181803683370190505b50905060006126d182612a72565b6020808901518252019050866000806126e9896125eb565b80865260209095019490925090506127028285836125f5565b9283019261270f886125eb565b80865260209095019490925090506127288285836125f5565b9283018a81526020019261273b876125eb565b909250905061274b8285836125f5565b508351602001612759612a78565b60208183886065600019fa5051600114955050505050505b95945050505050565b600080600060606021855110156127aa575050604080516000808252602082019092529092508291508190612823565b600185015160218601518651604080516020198301808252601f19600119909401939093168101602001909152604189019392916060919080156127f5576020820181803683370190505b5090506000612803826125eb565b509050612815858260218d51036125f5565b506001975091955093509150505b9193509193565b600254431115612869576004805467ffffffffffffffff1981166001600160401b036001600793840b810190930b1617909155600355436002556128aa565b600380546001908101918290555410156128aa576004805467ffffffffffffffff1981166001600160401b036001600793840b810190930b16179091556003555b8160ff16836001600160401b0316600460009054906101000a900460070b6001600160401b03167f3a6e0fc61675aa2a100bcba0568368bb92bcec91c97673391074f11138f0cffe603885604051808361ffff1661ffff16815260200180602001828103825283818151815260200191508051906020019080838360005b83811015612940578181015183820152602001612928565b50505050905090810190601f16801561296d5780820380516001836020036101000a031916815260200191505b50935050505060405180910390a4505050565b6000816040516020018082805190602001908083835b602083106129b55780518252601f199092019160209182019101612996565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b60208310612a235780518252601f199092019160209182019101612a04565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012014905092915050565b015190565b3b151590565b60200190565b6040518060200160405280600190602082028036833750919291505056fe74686520636f6e747261637420616e64206368616e6e656c2068617665206e6f74206265656e20726567697374657265646c656e677468206f662076616c756520666f72206164644f725570646174654368616e6e656c2073686f756c642062652032322c206368616e6e656c49643a697346726f6d53797374656d3a68616e646c657241646472657373746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163746c656e677468206f662076616c756520666f7220656e61626c654f7244697361626c654368616e6e656c2073686f756c6420626520322c206368616e6e656c49643a6973456e61626c65746865206e6577426174636853697a65466f724f7261636c652073686f756c6420626520696e205b31302c2031303030305d6c6967687420636c69656e74206e6f742073796e632074686520626c6f636b20796574a264697066735822122083f6194f9a326fa5963aa39ffe11dcd28d1421a9f74fbe97a97f4853026e29ff64736f6c63430006040033" + }, + "b005741528b86F5952469d80A8614591E3c5B632": { + "balance": "0x1b1ae4d6e2ef500000" + }, + "446AA6E0DC65690403dF3F127750da1322941F3e": { + "balance": "0x1b1ae4d6e2ef500000" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } \ No newline at end of file diff --git a/examples/bsc-p2p/src/main.rs b/examples/bsc-p2p/src/main.rs new file mode 100644 index 0000000000000..558eed171c203 --- /dev/null +++ b/examples/bsc-p2p/src/main.rs @@ -0,0 +1,91 @@ +//! Example for how hook into the bsc p2p network +//! +//! Run with +//! +//! ```not_rust +//! cargo run -p bsc-p2p +//! ``` +//! +//! This launch the regular reth node overriding the engine api payload builder with our custom. +//! +//! Credits to: + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use chainspec::{boot_nodes, bsc_chain_spec}; +use reth_discv4::Discv4ConfigBuilder; +use reth_network::{NetworkConfig, NetworkEvent, NetworkEvents, NetworkManager}; +use reth_network_api::PeersInfo; +use reth_primitives::{ForkHash, ForkId}; +use reth_tracing::{ + tracing::info, tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer, + Tracer, +}; +use secp256k1::{rand, SecretKey}; +use std::{ + net::{Ipv4Addr, SocketAddr}, + time::Duration, +}; +use tokio_stream::StreamExt; + +pub mod chainspec; + +#[tokio::main] +async fn main() { + // The ECDSA private key used to create our enode identifier. + let secret_key = SecretKey::new(&mut rand::thread_rng()); + + let _ = RethTracer::new() + .with_stdout(LayerInfo::new( + LogFormat::Terminal, + LevelFilter::INFO.to_string(), + "".to_string(), + Some("always".to_string()), + )) + .init(); + + // The local address we want to bind to + let local_addr = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 30303); + + // The network configuration + let mut net_cfg = NetworkConfig::builder(secret_key) + .chain_spec(bsc_chain_spec()) + .listener_addr(local_addr) + .build_with_noop_provider() + .set_discovery_v4( + Discv4ConfigBuilder::default() + .add_boot_nodes(boot_nodes()) + // Set Discv4 lookup interval to 1 second + .lookup_interval(Duration::from_secs(1)) + .build(), + ); + + // latest BSC forkId, we need to override this to allow connections from BSC nodes + let fork_id = ForkId { hash: ForkHash([0x07, 0xb5, 0x43, 0x28]), next: 0 }; + net_cfg.fork_filter.set_current_fork_id(fork_id); + let net_manager = NetworkManager::new(net_cfg).await.unwrap(); + + // The network handle is our entrypoint into the network. + let net_handle = net_manager.handle().clone(); + let mut events = net_handle.event_listener(); + + // NetworkManager is a long running task, let's spawn it + tokio::spawn(net_manager); + info!("Looking for BSC peers..."); + + while let Some(evt) = events.next().await { + // For the sake of the example we only print the session established event + // with the chain specific details + match evt { + NetworkEvent::SessionEstablished { status, client_version, peer_id, .. } => { + info!(peers=%net_handle.num_connected_peers() , %peer_id, chain = %status.chain, ?client_version, "Session established with a new peer."); + } + NetworkEvent::SessionClosed { peer_id, reason } => { + info!(peers=%net_handle.num_connected_peers() , %peer_id, ?reason, "Session closed."); + } + + _ => {} + } + } + // We will be disconnected from peers since we are not able to answer to network requests +} From c34b31ef5345695eecf1ef0fbb7441a759dfa9a4 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 22 May 2024 15:56:50 +0200 Subject: [PATCH 585/700] chore(trie): `PrefixSet::iter` (#8343) --- crates/trie/src/prefix_set/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/trie/src/prefix_set/mod.rs b/crates/trie/src/prefix_set/mod.rs index b556dd3790752..32fdc68c812d7 100644 --- a/crates/trie/src/prefix_set/mod.rs +++ b/crates/trie/src/prefix_set/mod.rs @@ -161,6 +161,11 @@ impl PrefixSet { false } + /// Returns an iterator over reference to _all_ nibbles regardless of cursor position. + pub fn iter(&self) -> core::slice::Iter<'_, Nibbles> { + self.keys.iter() + } + /// Returns the number of elements in the set. pub fn len(&self) -> usize { self.keys.len() From ea2c31375b166ce3306da973dc6a2d56e28f685c Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 22 May 2024 16:34:32 +0200 Subject: [PATCH 586/700] dep: bump `alloy-trie` to 0.4.1 (#8344) --- Cargo.lock | 7 ++++--- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b944ab54d216..f8f92caf1d54e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -631,9 +631,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d55bd16fdb7ff4bd74cc4c878eeac7e8a27c0d7ba9df4ab58d9310aaafb62d43" +checksum = "03704f265cbbb943b117ecb5055fd46e8f41e7dc8a58b1aed20bcd40ace38c15" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -645,6 +645,7 @@ dependencies = [ "proptest", "proptest-derive", "serde", + "smallvec", "tracing", ] @@ -4566,7 +4567,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.5", + "windows-targets 0.48.5", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ebf86a15fcbca..bf7ea50bde171 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -297,7 +297,7 @@ alloy-primitives = "0.7.2" alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" -alloy-trie = "0.4.0" +alloy-trie = "0.4" alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } From e2a5857c2048060d1b5929b5888ec3f4d31bfdb6 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 22 May 2024 17:33:05 +0200 Subject: [PATCH 587/700] docs: add panic comments in from_compact() (#8346) --- crates/primitives/src/account.rs | 4 ++++ crates/primitives/src/transaction/mod.rs | 5 +++++ crates/primitives/src/trie/hash_builder/value.rs | 4 ++++ 3 files changed, 13 insertions(+) diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs index bbaf420126634..78e796147f1f1 100644 --- a/crates/primitives/src/account.rs +++ b/crates/primitives/src/account.rs @@ -106,6 +106,10 @@ impl Compact for Bytecode { len + bytecode.len() + 4 } + // # Panics + // + // A panic will be triggered if a bytecode variant of 1 or greater than 2 is passed from the + // database. fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { let len = buf.read_u32::().expect("could not read bytecode length"); let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 2201b5f0d42a3..fd9af4631170f 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -644,6 +644,11 @@ impl Compact for Transaction { // For backwards compatibility purposes, only 2 bits of the type are encoded in the identifier // parameter. In the case of a 3, the full transaction type is read from the buffer as a // single byte. + // + // # Panics + // + // A panic will be triggered if an identifier larger than 3 is passed from the database. For + // optimism a identifier with value 126 is allowed. fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { match identifier { 0 => { diff --git a/crates/primitives/src/trie/hash_builder/value.rs b/crates/primitives/src/trie/hash_builder/value.rs index a829f85175e8c..1397f5756aa49 100644 --- a/crates/primitives/src/trie/hash_builder/value.rs +++ b/crates/primitives/src/trie/hash_builder/value.rs @@ -23,6 +23,10 @@ impl Compact for StoredHashBuilderValue { } } + // # Panics + // + // A panic will be triggered if a HashBuilderValue variant greater than 1 is passed from the + // database. fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { match buf.get_u8() { 0 => { From 90713300bf61daba6b58c1560da2df69c54ca048 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 22 May 2024 18:11:30 +0200 Subject: [PATCH 588/700] docs: add warning notes about using NippyJar and Compact encoding formats with untrusted data (#8345) --- README.md | 6 +++++- crates/storage/codecs/src/lib.rs | 4 ++++ crates/storage/nippy-jar/src/lib.rs | 4 ++++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 47d8337126d56..cc720c9efa053 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ Reth is performant, feature-complete, [Cancun-ready](https://paradigmxyz.github. We actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities. -While we are aware of parties running Reth staking nodes in production, we do *not* encourage usage in production staking environments by non-professionals until our audits are done, and the 1.0 version of Reth is released, but we are available to support without warranty or liability. +While we are aware of parties running Reth staking nodes in production, we do *not* encourage usage in production staking environments by non-professionals until our audits are done, and the 1.0 version of Reth is released, but we are available to support without warranty or liability. More historical context below: * We are releasing 1.0 "production-ready" stable Reth once our Reth & Revm audits are done. ETA ~May 2024. @@ -155,5 +155,9 @@ None of this would have been possible without them, so big shoutout to the teams - [Erigon](https://github.com/ledgerwatch/erigon) (fka Turbo-Geth): Erigon pioneered the ["Staged Sync" architecture](https://erigon.substack.com/p/erigon-stage-sync-and-control-flows) that Reth is using, as well as [introduced MDBX](https://github.com/ledgerwatch/erigon/wiki/Choice-of-storage-engine) as the database of choice. We thank Erigon for pushing the state of the art research on the performance limits of Ethereum nodes. - [Akula](https://github.com/akula-bft/akula/): Reth uses forks of the Apache versions of Akula's [MDBX Bindings](https://github.com/paradigmxyz/reth/pull/132), [FastRLP](https://github.com/paradigmxyz/reth/pull/63) and [ECIES](https://github.com/paradigmxyz/reth/pull/80) . Given that these packages were already released under the Apache License, and they implement standardized solutions, we decided not to reimplement them to iterate faster. We thank the Akula team for their contributions to the Rust Ethereum ecosystem and for publishing these packages. +## Warning + +The `NippyJar` and `Compact` encoding formats and their implementations are designed for storing and retrieving data internally. They are not hardened to safely read potentially malicious data. + [book]: https://paradigmxyz.github.io/reth/ [tg-url]: https://t.me/paradigm_reth diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 9dcef12730da2..79f57991906f3 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -1,5 +1,9 @@ //! Compact codec. //! +//! *Warning*: The `Compact` encoding format and its implementations are +//! designed for storing and retrieving data internally. They are not hardened +//! to safely read potentially malicious data. +//! //! ## Feature Flags //! //! - `alloy`: [Compact] implementation for various alloy types. diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 435e91e877d4d..ad2eb19a8db7c 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -1,4 +1,8 @@ //! Immutable data store format. +//! +//! *Warning*: The `NippyJar` encoding format and its implementations are +//! designed for storing and retrieving data internally. They are not hardened +//! to safely read potentially malicious data. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", From f45ca74772338a8ea9c3b18ce9e6cb3caaad7515 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 22 May 2024 18:20:14 +0100 Subject: [PATCH 589/700] refactor(consensus, evm): move post-execution validation to consensus (#8321) --- Cargo.lock | 4 +- .../src/commands/debug_cmd/build_block.rs | 2 +- bin/reth/src/commands/debug_cmd/merkle.rs | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/chain.rs | 3 + crates/config/src/config.rs | 2 +- crates/consensus/auto-seal/src/lib.rs | 39 +++----- crates/consensus/common/src/validation.rs | 10 +-- crates/consensus/consensus/Cargo.toml | 2 +- crates/consensus/consensus/src/lib.rs | 35 +++++++- crates/consensus/consensus/src/test_utils.rs | 16 +++- crates/e2e-test-utils/src/payload.rs | 4 +- crates/ethereum/consensus/Cargo.toml | 2 +- crates/ethereum/consensus/src/lib.rs | 28 ++++-- crates/ethereum/consensus/src/validation.rs | 82 +++++++++++++++++ crates/ethereum/evm/Cargo.toml | 4 +- crates/ethereum/evm/src/execute.rs | 55 ++++-------- crates/ethereum/evm/src/lib.rs | 1 - crates/ethereum/evm/src/verify.rs | 53 ----------- crates/evm/src/either.rs | 6 +- crates/evm/src/execute.rs | 40 ++++++--- crates/evm/src/test_utils.rs | 2 +- .../interfaces/src/blockchain_tree/error.rs | 2 +- crates/interfaces/src/executor.rs | 23 ++--- crates/net/discv4/src/lib.rs | 2 +- crates/net/downloaders/src/bodies/bodies.rs | 22 ++--- crates/net/downloaders/src/bodies/request.rs | 2 +- crates/net/eth-wire/src/multiplex.rs | 4 +- crates/node-core/src/args/pruning.rs | 2 +- crates/node-core/src/init.rs | 2 +- crates/node-core/src/node_config.rs | 4 +- crates/node-core/src/utils.rs | 4 +- crates/optimism/consensus/Cargo.toml | 2 +- crates/optimism/consensus/src/lib.rs | 26 ++++-- crates/optimism/consensus/src/validation.rs | 90 +++++++++++++++++++ crates/optimism/evm/Cargo.toml | 5 ++ crates/optimism/evm/src/execute.rs | 60 ++++--------- crates/optimism/evm/src/lib.rs | 1 - crates/optimism/evm/src/verify.rs | 58 ------------ crates/payload/validator/src/lib.rs | 2 +- crates/primitives/src/alloy_compat.rs | 2 +- crates/primitives/src/compression/mod.rs | 2 +- crates/primitives/src/lib.rs | 4 +- crates/primitives/src/receipt.rs | 35 ++++---- crates/primitives/src/transaction/mod.rs | 2 +- crates/rpc/ipc/src/server/ipc.rs | 2 +- crates/rpc/ipc/src/server/mod.rs | 2 +- crates/rpc/rpc-types/src/mev.rs | 2 +- crates/rpc/rpc/src/otterscan.rs | 2 +- crates/stages/src/stages/execution.rs | 8 +- crates/storage/nippy-jar/src/lib.rs | 2 +- .../src/providers/static_file/writer.rs | 2 +- 52 files changed, 424 insertions(+), 346 deletions(-) create mode 100644 crates/ethereum/consensus/src/validation.rs delete mode 100644 crates/ethereum/evm/src/verify.rs create mode 100644 crates/optimism/consensus/src/validation.rs delete mode 100644 crates/optimism/evm/src/verify.rs diff --git a/Cargo.lock b/Cargo.lock index f8f92caf1d54e..6edf8be156442 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6973,20 +6973,22 @@ name = "reth-evm-ethereum" version = "0.2.0-beta.7" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "reth-ethereum-consensus", "reth-evm", "reth-interfaces", "reth-primitives", "reth-revm", "revm-primitives", - "tracing", ] [[package]] name = "reth-evm-optimism" version = "0.2.0-beta.7" dependencies = [ + "reth-consensus-common", "reth-evm", "reth-interfaces", + "reth-optimism-consensus", "reth-primitives", "reth-provider", "reth-revm", diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 31585c2f6ea3b..7914ec7829dee 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -298,7 +298,7 @@ impl Command { consensus.validate_header_with_total_difficulty(block, U256::MAX)?; consensus.validate_header(block)?; - consensus.validate_block(block)?; + consensus.validate_block_pre_execution(block)?; let senders = block.senders().expect("sender recovery failed"); let block_with_senders = diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 40d79a85d5779..291788bad7579 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -197,7 +197,7 @@ impl Command { )), PruneModes::none(), ); - executor.execute_one((&sealed_block.clone().unseal(), td).into())?; + executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); BundleStateWithReceipts::new(bundle, receipts, first_block).write_to_storage( provider_rw.tx_ref(), diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 2a0bfb8bae652..ef8fe65e72a97 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -730,7 +730,7 @@ where return Err(e) } - if let Err(e) = self.externals.consensus.validate_block(block) { + if let Err(e) = self.externals.consensus.validate_block_pre_execution(block) { error!(?block, "Failed to validate block {}: {e}", block.header.hash()); return Err(e) } diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index ce6487a060b93..9b3c52cf82cb8 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -210,8 +210,11 @@ impl AppendableChain { let executor = externals.executor_factory.executor(db); let block_hash = block.hash(); let block = block.unseal(); + let state = executor.execute((&block, U256::MAX).into())?; let BlockExecutionOutput { state, receipts, .. } = state; + externals.consensus.validate_block_post_execution(&block, &receipts)?; + let bundle_state = BundleStateWithReceipts::new( state, Receipts::from_block_receipt(receipts), diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index aa8b7ee09ab14..4215f89a438c7 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -57,7 +57,7 @@ impl Config { return Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, format!("reth config file extension must be '{EXTENSION}'"), - )); + )) } confy::store_path(path, self).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) } diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index e954108c8c408..f318b7adea46e 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -22,8 +22,9 @@ use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ constants::{EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, - proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Header, - Receipts, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, B256, U256, + proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, + ChainSpec, Header, Receipt, Receipts, SealedBlock, SealedHeader, TransactionSigned, + Withdrawals, B256, U256, }; use reth_provider::{ BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, StateProviderFactory, @@ -84,7 +85,15 @@ impl Consensus for AutoSealConsensus { Ok(()) } - fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + Ok(()) + } + + fn validate_block_post_execution( + &self, + _block: &BlockWithSenders, + _receipts: &[Receipt], + ) -> Result<(), ConsensusError> { Ok(()) } } @@ -361,7 +370,7 @@ impl StorageInner { let header = self.build_header_template(&transactions, &ommers, withdrawals.as_ref(), chain_spec); - let mut block = Block { + let block = Block { header, body: transactions, ommers: ommers.clone(), @@ -376,27 +385,7 @@ impl StorageInner { provider.latest().map_err(BlockExecutionError::LatestBlock)?, ); - // TODO(mattsse): At this point we don't know certain fields of the header, so we first - // execute it and then update the header this can be improved by changing the executor - // input, for now we intercept the errors and retry - loop { - match executor.executor(&mut db).execute((&block, U256::ZERO).into()) { - Err(BlockExecutionError::Validation(BlockValidationError::BlockGasUsed { - gas, - .. - })) => { - block.block.header.gas_used = gas.got; - } - Err(BlockExecutionError::Validation(BlockValidationError::ReceiptRootDiff( - err, - ))) => { - block.block.header.receipts_root = err.got; - } - _ => break, - }; - } - - // now execute the block + // execute the block let BlockExecutionOutput { state, receipts, .. } = executor.executor(&mut db).execute((&block, U256::ZERO).into())?; let bundle_state = BundleStateWithReceipts::new( diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 8a3d9588e9246..ffa48e7715236 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -61,7 +61,7 @@ pub fn validate_header_standalone( /// - Compares the transactions root in the block header to the block body /// - Pre-execution transaction validation /// - (Optionally) Compares the receipts root in the block header to the block body -pub fn validate_block_standalone( +pub fn validate_block_pre_execution( block: &SealedBlock, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { @@ -366,13 +366,13 @@ mod tests { // Single withdrawal let block = create_block_with_withdrawals(&[1]); - assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(())); + assert_eq!(validate_block_pre_execution(&block, &chain_spec), Ok(())); // Multiple increasing withdrawals let block = create_block_with_withdrawals(&[1, 2, 3]); - assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(())); + assert_eq!(validate_block_pre_execution(&block, &chain_spec), Ok(())); let block = create_block_with_withdrawals(&[5, 6, 7, 8, 9]); - assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(())); + assert_eq!(validate_block_pre_execution(&block, &chain_spec), Ok(())); let (_, parent) = mock_block(); // Withdrawal index should be the last withdrawal index + 1 @@ -428,7 +428,7 @@ mod tests { // validate blob, it should fail blob gas used validation assert_eq!( - validate_block_standalone(&block, &chain_spec), + validate_block_pre_execution(&block, &chain_spec), Err(ConsensusError::BlobGasUsedDiff(GotExpected { got: 1, expected: expected_blob_gas_used diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 308a16f2026ec..43264872e1f38 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -18,4 +18,4 @@ auto_impl.workspace = true thiserror.workspace = true [features] -test-utils = [] \ No newline at end of file +test-utils = [] diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 2dee6b1245e20..46fce6d02ee00 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -9,8 +9,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_primitives::{ - BlockHash, BlockNumber, GotExpected, GotExpectedBoxed, Header, HeaderValidationError, - InvalidTransactionError, SealedBlock, SealedHeader, B256, U256, + BlockHash, BlockNumber, BlockWithSenders, Bloom, GotExpected, GotExpectedBoxed, Header, + HeaderValidationError, InvalidTransactionError, Receipt, SealedBlock, SealedHeader, B256, U256, }; use std::fmt::Debug; @@ -83,7 +83,19 @@ pub trait Consensus: Debug + Send + Sync { /// **This should not be called for the genesis block**. /// /// Note: validating blocks does not include other validations of the Consensus - fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError>; + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError>; + + /// Validate a block considering world state, i.e. things that can not be checked before + /// execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity". + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + ) -> Result<(), ConsensusError>; } /// Consensus Errors @@ -98,6 +110,15 @@ pub enum ConsensusError { gas_limit: u64, }, + /// Error when block gas used doesn't match expected value + #[error("block gas used mismatch: {gas}; gas spent by each transaction: {gas_spent_by_tx:?}")] + BlockGasUsed { + /// The gas diff. + gas: GotExpected, + /// Gas spent by each transaction + gas_spent_by_tx: Vec<(u64, u64)>, + }, + /// Error when the hash of block ommer is different from the expected hash. #[error("mismatched block ommer hash: {0}")] BodyOmmersHashDiff(GotExpectedBoxed), @@ -111,6 +132,14 @@ pub enum ConsensusError { #[error("mismatched block transaction root: {0}")] BodyTransactionRootDiff(GotExpectedBoxed), + /// Error when the receipt root in the block is different from the expected receipt root. + #[error("receipt root mismatch: {0}")] + BodyReceiptRootDiff(GotExpectedBoxed), + + /// Error when header bloom filter is different from the expected bloom filter. + #[error("header bloom filter mismatch: {0}")] + BodyBloomLogDiff(GotExpectedBoxed), + /// Error when the withdrawals root in the block is different from the expected withdrawals /// root. #[error("mismatched block withdrawals root: {0}")] diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index a8655661b8c8b..a616d4f43b897 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,5 +1,5 @@ use crate::{Consensus, ConsensusError}; -use reth_primitives::{Header, SealedBlock, SealedHeader, U256}; +use reth_primitives::{BlockWithSenders, Header, Receipt, SealedBlock, SealedHeader, U256}; use std::sync::atomic::{AtomicBool, Ordering}; /// Consensus engine implementation for testing @@ -60,7 +60,19 @@ impl Consensus for TestConsensus { } } - fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_block_post_execution( + &self, + _block: &BlockWithSenders, + _receipts: &[Receipt], + ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 47f4134d7fe83..828bc5f32c4f1 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -50,9 +50,9 @@ impl PayloadTestContext { let payload = self.payload_builder.best_payload(payload_id).await.unwrap().unwrap(); if payload.block().body.is_empty() { tokio::time::sleep(std::time::Duration::from_millis(20)).await; - continue; + continue } - break; + break } } diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index f3ff5d4d36e64..984fb1ec6fc6b 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -17,4 +17,4 @@ reth-primitives.workspace = true reth-consensus.workspace = true [features] -optimism = ["reth-primitives/optimism"] \ No newline at end of file +optimism = ["reth-primitives/optimism"] diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index ed283f0262a86..0264089475ab2 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -9,12 +9,18 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_consensus::{Consensus, ConsensusError}; -use reth_consensus_common::validation; +use reth_consensus_common::validation::{ + validate_block_pre_execution, validate_header_extradata, validate_header_standalone, +}; use reth_primitives::{ - Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, + BlockWithSenders, Chain, ChainSpec, Hardfork, Header, Receipt, SealedBlock, SealedHeader, + EMPTY_OMMER_ROOT_HASH, U256, }; use std::{sync::Arc, time::SystemTime}; +mod validation; +pub use validation::validate_block_post_execution; + /// Ethereum beacon consensus /// /// This consensus engine does basic checks as outlined in the execution specs. @@ -33,7 +39,7 @@ impl EthBeaconConsensus { impl Consensus for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validation::validate_header_standalone(header, &self.chain_spec)?; + validate_header_standalone(header, &self.chain_spec)?; Ok(()) } @@ -87,7 +93,7 @@ impl Consensus for EthBeaconConsensus { // is greater than its parent timestamp. // validate header extradata for all networks post merge - validation::validate_header_extradata(header)?; + validate_header_extradata(header)?; // mixHash is used instead of difficulty inside EVM // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty @@ -111,14 +117,22 @@ impl Consensus for EthBeaconConsensus { // * If the network is goerli pre-merge, ignore the extradata check, since we do not // support clique. Same goes for OP blocks below Bedrock. if self.chain_spec.chain != Chain::goerli() && !self.chain_spec.is_optimism() { - validation::validate_header_extradata(header)?; + validate_header_extradata(header)?; } } Ok(()) } - fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - validation::validate_block_standalone(block, &self.chain_spec) + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validate_block_pre_execution(block, &self.chain_spec) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, receipts) } } diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs new file mode 100644 index 0000000000000..30ff6fee26455 --- /dev/null +++ b/crates/ethereum/consensus/src/validation.rs @@ -0,0 +1,82 @@ +use reth_consensus::ConsensusError; +use reth_primitives::{ + gas_spent_by_transactions, BlockWithSenders, Bloom, ChainSpec, GotExpected, Receipt, + ReceiptWithBloom, B256, +}; + +/// Validate a block with regard to execution results: +/// +/// - Compares the receipts root in the block header to the block body +/// - Compares the gas used in the block header to the actual gas usage after execution +pub fn validate_block_post_execution( + block: &BlockWithSenders, + chain_spec: &ChainSpec, + receipts: &[Receipt], +) -> Result<(), ConsensusError> { + // Before Byzantium, receipts contained state root that would mean that expensive + // operation as hashing that is required for state root got calculated in every + // transaction This was replaced with is_success flag. + // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 + if chain_spec.is_byzantium_active_at_block(block.header.number) { + verify_receipts(block.header.receipts_root, block.header.logs_bloom, receipts.iter())?; + } + + // Check if gas used matches the value set in header. + let cumulative_gas_used = + receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); + if block.gas_used != cumulative_gas_used { + return Err(ConsensusError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: gas_spent_by_transactions(receipts), + }) + } + + Ok(()) +} + +/// Calculate the receipts root, and compare it against against the expected receipts root and logs +/// bloom. +fn verify_receipts<'a>( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, +) -> Result<(), ConsensusError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), ConsensusError> { + if calculated_receipts_root != expected_receipts_root { + return Err(ConsensusError::BodyReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + )) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(ConsensusError::BodyBloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + )) + } + + Ok(()) +} diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index e9f8bc5ad317b..c4811b59f481b 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -16,13 +16,11 @@ reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true reth-interfaces.workspace = true +reth-ethereum-consensus.workspace = true # Ethereum revm-primitives.workspace = true -# misc -tracing.workspace = true - [dev-dependencies] reth-revm = { workspace = true, features = ["test-utils"] } alloy-eips.workspace = true diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 15702ba7508d9..5addc45ac7d1b 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -2,9 +2,9 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - verify::verify_receipts, EthEvmConfig, }; +use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, @@ -17,8 +17,8 @@ use reth_interfaces::{ provider::ProviderError, }; use reth_primitives::{ - BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, - Receipts, Withdrawals, MAINNET, U256, + BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Withdrawals, + MAINNET, U256, }; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, @@ -31,7 +31,6 @@ use revm_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, }; use std::sync::Arc; -use tracing::debug; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] @@ -187,16 +186,6 @@ where } drop(evm); - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - let receipts = Receipts::from_block_receipt(receipts); - return Err(BlockValidationError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: receipts.gas_spent_by_tx()?, - } - .into()) - } - Ok((receipts, cumulative_gas_used)) } } @@ -260,8 +249,8 @@ where /// /// Returns the receipts of the transactions in the block and the total gas used. /// - /// Returns an error if execution fails or receipt verification fails. - fn execute_and_verify( + /// Returns an error if execution fails. + fn execute_without_verification( &mut self, block: &BlockWithSenders, total_difficulty: U256, @@ -280,21 +269,6 @@ where // 3. apply post execution changes self.post_execution(block, total_difficulty)?; - // Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is required for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = verify_receipts( - block.header.receipts_root, - block.header.logs_bloom, - receipts.iter(), - ) { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - } - Ok((receipts, gas_used)) } @@ -363,7 +337,7 @@ where /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; + let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; // NOTE: we need to merge keep the reverts for the bundle retention self.state.merge_transitions(BundleRetention::Reverts); @@ -403,9 +377,12 @@ where type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; + let (receipts, _gas_used) = + self.executor.execute_without_verification(block, total_difficulty)?; + + validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.number); @@ -523,7 +500,7 @@ mod tests { // Now execute a block with the fixed header, ensure that it does not fail executor - .execute_and_verify( + .execute_without_verification( &BlockWithSenders { block: Block { header: header.clone(), @@ -634,7 +611,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail executor - .execute_and_verify( + .execute_without_verification( &BlockWithSenders { block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, senders: vec![], @@ -672,7 +649,7 @@ mod tests { // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); let _err = executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { @@ -698,7 +675,7 @@ mod tests { // now try to process the genesis block again, this time ensuring that a system contract // call does not occur executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, @@ -752,7 +729,7 @@ mod tests { // Now execute a block with the fixed header, ensure that it does not fail executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 7799cf4107eed..9e5db6bc25de9 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -16,7 +16,6 @@ use reth_primitives::{ }; use reth_revm::{Database, EvmBuilder}; pub mod execute; -pub mod verify; /// Ethereum DAO hardfork state change data. pub mod dao_fork; diff --git a/crates/ethereum/evm/src/verify.rs b/crates/ethereum/evm/src/verify.rs deleted file mode 100644 index 6f552fe424224..0000000000000 --- a/crates/ethereum/evm/src/verify.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Helpers for verifying the receipts. - -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Bloom, GotExpected, Receipt, ReceiptWithBloom, B256}; - -/// Calculate the receipts root, and compare it against against the expected receipts root and logs -/// bloom. -pub fn verify_receipts<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -/// Compare the calculated receipts root with the expected receipts root, also compare -/// the calculated logs bloom with the expected logs bloom. -pub fn compare_receipts_root_and_logs_bloom( - calculated_receipts_root: B256, - calculated_logs_bloom: Bloom, - expected_receipts_root: B256, - expected_logs_bloom: Bloom, -) -> Result<(), BlockExecutionError> { - if calculated_receipts_root != expected_receipts_root { - return Err(BlockValidationError::ReceiptRootDiff( - GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), - ) - .into()) - } - - if calculated_logs_bloom != expected_logs_bloom { - return Err(BlockValidationError::BloomLogDiff( - GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), - ) - .into()) - } - - Ok(()) -} diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index d1ae4ed78ff47..ae1c95461be8c 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -89,10 +89,10 @@ where type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { match self { - Either::Left(a) => a.execute_one(input), - Either::Right(b) => b.execute_one(input), + Either::Left(a) => a.execute_and_verify_one(input), + Either::Right(b) => b.execute_and_verify_one(input), } } diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index e7ce09e798056..69351226868ec 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -5,8 +5,10 @@ use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, Receip use revm::db::BundleState; use revm_primitives::db::Database; -/// A general purpose executor trait that executes on an input (e.g. blocks) and produces an output +/// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). +/// +/// This executor does not validate the output, see [BatchExecutor] for that. pub trait Executor { /// The input type for the executor. type Input<'a>; @@ -17,12 +19,17 @@ pub trait Executor { /// Consumes the type and executes the block. /// - /// Returns the output of the block execution. + /// # Note + /// Execution happens without any validation of the output. To validate the output, use the + /// [BatchExecutor]. + /// + /// # Returns + /// The output of the block execution. fn execute(self, input: Self::Input<'_>) -> Result; } -/// A general purpose executor that can execute multiple inputs in sequence and keep track of the -/// state over the entire batch. +/// A general purpose executor that can execute multiple inputs in sequence, validate the outputs, +/// and keep track of the state over the entire batch. pub trait BatchExecutor { /// The input type for the executor. type Input<'a>; @@ -31,27 +38,34 @@ pub trait BatchExecutor { /// The error type returned by the executor. type Error; - /// Executes the next block in the batch and update the state internally. - fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; + /// Executes the next block in the batch, verifies the output and updates the state internally. + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; - /// Executes multiple inputs in the batch and update the state internally. - fn execute_many<'a, I>(&mut self, inputs: I) -> Result<(), Self::Error> + /// Executes multiple inputs in the batch, verifies the output, and updates the state + /// internally. + /// + /// This method is a convenience function for calling [`BatchExecutor::execute_and_verify_one`] + /// for each input. + fn execute_and_verify_many<'a, I>(&mut self, inputs: I) -> Result<(), Self::Error> where I: IntoIterator>, { for input in inputs { - self.execute_one(input)?; + self.execute_and_verify_one(input)?; } Ok(()) } - /// Executes the entire batch and return the final state. - fn execute_batch<'a, I>(mut self, batch: I) -> Result + /// Executes the entire batch, verifies the output, and returns the final state. + /// + /// This method is a convenience function for calling [`BatchExecutor::execute_and_verify_many`] + /// and [`BatchExecutor::finalize`]. + fn execute_and_verify_batch<'a, I>(mut self, batch: I) -> Result where I: IntoIterator>, Self: Sized, { - self.execute_many(batch)?; + self.execute_and_verify_many(batch)?; Ok(self.finalize()) } @@ -222,7 +236,7 @@ mod tests { type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> { Ok(()) } diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index e0ee4691704b2..8d5b526827407 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -64,7 +64,7 @@ impl BatchExecutor for MockExecutorProvider { type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { Ok(()) } diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index a98d765014bae..379b9f141a708 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -297,7 +297,7 @@ impl InsertBlockErrorKind { // other execution errors that are considered internal errors InsertBlockErrorKind::Execution(err) => { match err { - BlockExecutionError::Validation(_) => { + BlockExecutionError::Validation(_) | BlockExecutionError::Consensus(_) => { // this is caused by an invalid block true } diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index 04b9832f092df..0620d032a9ba0 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -1,8 +1,6 @@ use crate::{provider::ProviderError, trie::StateRootError}; -use reth_primitives::{ - revm_primitives::EVMError, BlockNumHash, Bloom, GotExpected, GotExpectedBoxed, - PruneSegmentError, B256, -}; +use reth_consensus::ConsensusError; +use reth_primitives::{revm_primitives::EVMError, BlockNumHash, PruneSegmentError, B256}; use thiserror::Error; /// Transaction validation errors @@ -23,12 +21,6 @@ pub enum BlockValidationError { /// Error when incrementing balance in post execution #[error("incrementing balance in post execution failed")] IncrementBalanceFailed, - /// Error when receipt root doesn't match expected value - #[error("receipt root mismatch: {0}")] - ReceiptRootDiff(GotExpectedBoxed), - /// Error when header bloom filter doesn't match expected value - #[error("header bloom filter mismatch: {0}")] - BloomLogDiff(GotExpectedBoxed), /// Error when the state root does not match the expected value. #[error(transparent)] StateRoot(#[from] StateRootError), @@ -40,14 +32,6 @@ pub enum BlockValidationError { /// The available block gas block_available_gas: u64, }, - /// Error when block gas used doesn't match expected value - #[error("block gas used mismatch: {gas}; gas spent by each transaction: {gas_spent_by_tx:?}")] - BlockGasUsed { - /// The gas diff. - gas: GotExpected, - /// Gas spent by each transaction - gas_spent_by_tx: Vec<(u64, u64)>, - }, /// Error for pre-merge block #[error("block {hash} is pre merge")] BlockPreMerge { @@ -88,6 +72,9 @@ pub enum BlockExecutionError { /// Pruning error, transparently wrapping `PruneSegmentError` #[error(transparent)] Pruning(#[from] PruneSegmentError), + /// Consensus error, transparently wrapping `ConsensusError` + #[error(transparent)] + Consensus(#[from] ConsensusError), /// Transaction error on revert with inner details #[error("transaction error on revert: {inner}")] CanonicalRevert { diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 77cc309ebf93d..2019f58ee1609 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -2266,7 +2266,7 @@ mod tests { assert!(service.pending_pings.contains_key(&node.id)); assert_eq!(service.pending_pings.len(), num_inserted); if num_inserted == MAX_NODES_PING { - break; + break } } } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 8f97e09c7dd43..a806f2fa62e40 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -95,7 +95,7 @@ where max_non_empty: u64, ) -> DownloadResult>> { if range.is_empty() || max_non_empty == 0 { - return Ok(None); + return Ok(None) } // Collect headers while @@ -144,7 +144,7 @@ where // if we're only connected to a few peers, we keep it low if num_peers < *self.concurrent_requests_range.start() { - return max_requests; + return max_requests } max_requests.min(*self.concurrent_requests_range.end()) @@ -238,7 +238,7 @@ where .skip_while(|b| b.block_number() < expected) .take_while(|b| self.download_range.contains(&b.block_number())) .collect() - }); + }) } // Drop buffered response since we passed that range @@ -257,7 +257,7 @@ where self.queued_bodies.shrink_to_fit(); self.metrics.total_flushed.increment(next_batch.len() as u64); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); - return Some(next_batch); + return Some(next_batch) } None } @@ -354,13 +354,13 @@ where fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); if this.is_terminated() { - return Poll::Ready(None); + return Poll::Ready(None) } // Submit new requests and poll any in progress loop { // Yield next batch if ready if let Some(next_batch) = this.try_split_next_batch() { - return Poll::Ready(Some(Ok(next_batch))); + return Poll::Ready(Some(Ok(next_batch))) } // Poll requests @@ -373,7 +373,7 @@ where Err(error) => { tracing::debug!(target: "downloaders::bodies", %error, "Request failed"); this.clear(); - return Poll::Ready(Some(Err(error))); + return Poll::Ready(Some(Err(error))) } }; } @@ -396,7 +396,7 @@ where Err(error) => { tracing::error!(target: "downloaders::bodies", %error, "Failed to download from next request"); this.clear(); - return Poll::Ready(Some(Err(error))); + return Poll::Ready(Some(Err(error))) } }; } @@ -409,21 +409,21 @@ where this.buffered_responses.shrink_to_fit(); if !new_request_submitted { - break; + break } } // All requests are handled, stream is finished if this.in_progress_queue.is_empty() { if this.queued_bodies.is_empty() { - return Poll::Ready(None); + return Poll::Ready(None) } let batch_size = this.stream_batch_size.min(this.queued_bodies.len()); let next_batch = this.queued_bodies.drain(..batch_size).collect::>(); this.queued_bodies.shrink_to_fit(); this.metrics.total_flushed.increment(next_batch.len() as u64); this.metrics.queued_blocks.set(this.queued_bodies.len() as f64); - return Poll::Ready(Some(Ok(next_batch))); + return Poll::Ready(Some(Ok(next_batch))) } Poll::Pending diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index dfe877a0b917d..593c738e0bba1 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -180,7 +180,7 @@ where let block = SealedBlock::new(next_header, next_body); - if let Err(error) = self.consensus.validate_block(&block) { + if let Err(error) = self.consensus.validate_block_pre_execution(&block) { // Body is invalid, put the header back and return an error let hash = block.hash(); let number = block.number; diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 04b7cda37e50a..82172f8d5c744 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -479,9 +479,9 @@ where if let Err(disconnect_err) = this.inner.conn.start_disconnect(DisconnectReason::DisconnectRequested) { - return Poll::Ready(Some(Err(disconnect_err.into()))); + return Poll::Ready(Some(Err(disconnect_err.into()))) } - return Poll::Ready(Some(Err(err.into()))); + return Poll::Ready(Some(Err(err.into()))) } Poll::Pending => { conn_ready = false; diff --git a/crates/node-core/src/args/pruning.rs b/crates/node-core/src/args/pruning.rs index 4adc721586ba5..e585a216dc721 100644 --- a/crates/node-core/src/args/pruning.rs +++ b/crates/node-core/src/args/pruning.rs @@ -20,7 +20,7 @@ impl PruningArgs { /// Returns pruning configuration. pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option { if !self.full { - return None; + return None } Some(PruneConfig { block_interval: 5, diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 6d924b6b1a47e..05435ce37e989 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -373,7 +373,7 @@ fn parse_accounts( while let Ok(n) = reader.read_line(&mut line) { if n == 0 { - break; + break } let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?; diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 52333c1471420..5cb251f8afe0a 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -411,7 +411,7 @@ impl NodeConfig { // try to look up the header in the database if let Some(header) = header { info!(target: "reth::cli", ?tip, "Successfully looked up tip block in the database"); - return Ok(header.number); + return Ok(header.number) } Ok(self.fetch_tip_from_network(client, tip.into()).await?.number) @@ -434,7 +434,7 @@ impl NodeConfig { match get_single_header(&client, tip).await { Ok(tip_header) => { info!(target: "reth::cli", ?tip, "Successfully fetched tip"); - return Ok(tip_header); + return Ok(tip_header) } Err(error) => { fetch_failures += 1; diff --git a/crates/node-core/src/utils.rs b/crates/node-core/src/utils.rs index f9b4ff599ca6c..963f863c5aae6 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node-core/src/utils.rs @@ -2,7 +2,7 @@ //! blocks from the network. use eyre::Result; -use reth_consensus_common::validation::validate_block_standalone; +use reth_consensus_common::validation::validate_block_pre_execution; use reth_fs_util as fs; use reth_interfaces::p2p::{ bodies::client::BodiesClient, @@ -121,7 +121,7 @@ where withdrawals: block.withdrawals, }; - validate_block_standalone(&block, &chain_spec)?; + validate_block_pre_execution(&block, &chain_spec)?; Ok(block) } diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 4ebbaa8d8af02..a2a5edb5d5c96 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -20,4 +20,4 @@ reth-consensus.workspace = true [features] optimism = [ "reth-primitives/optimism", -] \ No newline at end of file +] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 4deea2879624f..09f9c1f38d163 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -10,10 +10,18 @@ #![cfg(feature = "optimism")] use reth_consensus::{Consensus, ConsensusError}; -use reth_consensus_common::{validation, validation::validate_header_extradata}; -use reth_primitives::{ChainSpec, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256}; +use reth_consensus_common::validation::{ + validate_block_pre_execution, validate_header_extradata, validate_header_standalone, +}; +use reth_primitives::{ + BlockWithSenders, ChainSpec, Header, Receipt, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, + U256, +}; use std::{sync::Arc, time::SystemTime}; +mod validation; +pub use validation::validate_block_post_execution; + /// Optimism consensus implementation. /// /// Provides basic checks as outlined in the execution specs. @@ -37,7 +45,7 @@ impl OptimismBeaconConsensus { impl Consensus for OptimismBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validation::validate_header_standalone(header, &self.chain_spec)?; + validate_header_standalone(header, &self.chain_spec)?; Ok(()) } @@ -96,7 +104,15 @@ impl Consensus for OptimismBeaconConsensus { Ok(()) } - fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - validation::validate_block_standalone(block, &self.chain_spec) + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validate_block_pre_execution(block, &self.chain_spec) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, receipts) } } diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs new file mode 100644 index 0000000000000..0998cf1b8a0f2 --- /dev/null +++ b/crates/optimism/consensus/src/validation.rs @@ -0,0 +1,90 @@ +use reth_consensus::ConsensusError; +use reth_primitives::{ + gas_spent_by_transactions, proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, + ChainSpec, GotExpected, Receipt, ReceiptWithBloom, B256, +}; + +/// Validate a block with regard to execution results: +/// +/// - Compares the receipts root in the block header to the block body +/// - Compares the gas used in the block header to the actual gas usage after execution +pub fn validate_block_post_execution( + block: &BlockWithSenders, + chain_spec: &ChainSpec, + receipts: &[Receipt], +) -> Result<(), ConsensusError> { + // Before Byzantium, receipts contained state root that would mean that expensive + // operation as hashing that is required for state root got calculated in every + // transaction This was replaced with is_success flag. + // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 + if chain_spec.is_byzantium_active_at_block(block.header.number) { + verify_receipts( + block.header.receipts_root, + block.header.logs_bloom, + receipts.iter(), + chain_spec, + block.timestamp, + )?; + } + + // Check if gas used matches the value set in header. + let cumulative_gas_used = + receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); + if block.gas_used != cumulative_gas_used { + return Err(ConsensusError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: gas_spent_by_transactions(receipts), + }) + } + + Ok(()) +} + +/// Verify the calculated receipts root against the expected receipts root. +fn verify_receipts<'a>( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, + chain_spec: &ChainSpec, + timestamp: u64, +) -> Result<(), ConsensusError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = + calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), ConsensusError> { + if calculated_receipts_root != expected_receipts_root { + return Err(ConsensusError::BodyReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + )) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(ConsensusError::BodyBloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + )) + } + + Ok(()) +} diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index a1c3a168bdab2..5af74476117e8 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -17,8 +17,12 @@ reth-primitives.workspace = true reth-revm.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true +reth-consensus-common.workspace = true # Optimism +reth-optimism-consensus.workspace = true + +# revm revm.workspace = true revm-primitives.workspace = true @@ -35,4 +39,5 @@ optimism = [ "reth-provider/optimism", "reth-interfaces/optimism", "revm-primitives/optimism", + "reth-optimism-consensus/optimism", ] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index f729ceda1c744..44bef823dc3fb 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,9 +1,6 @@ //! Optimism block executor. -use crate::{ - l1::ensure_create2_deployer, verify::verify_receipts, OptimismBlockExecutionError, - OptimismEvmConfig, -}; +use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; use reth_evm::{ execute::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, @@ -15,9 +12,10 @@ use reth_interfaces::{ executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, }; +use reth_optimism_consensus::validate_block_post_execution; use reth_primitives::{ - BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, - Receipts, TxType, Withdrawals, U256, + BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Receipts, + TxType, Withdrawals, U256, }; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, @@ -30,7 +28,7 @@ use revm_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, }; use std::sync::Arc; -use tracing::{debug, trace}; +use tracing::trace; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] @@ -157,12 +155,12 @@ where transaction_gas_limit: transaction.gas_limit(), block_available_gas, } - .into()); + .into()) } // An optimism block should never contain blob transactions. if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()); + return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()) } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -221,16 +219,6 @@ where } drop(evm); - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - let receipts = Receipts::from_block_receipt(receipts); - return Err(BlockValidationError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: receipts.gas_spent_by_tx()?, - } - .into()); - } - Ok((receipts, cumulative_gas_used)) } } @@ -292,8 +280,8 @@ where /// /// Returns the receipts of the transactions in the block and the total gas used. /// - /// Returns an error if execution fails or receipt verification fails. - fn execute_and_verify( + /// Returns an error if execution fails. + fn execute_without_verification( &mut self, block: &BlockWithSenders, total_difficulty: U256, @@ -312,23 +300,6 @@ where // 3. apply post execution changes self.post_execution(block, total_difficulty)?; - // Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is required for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = verify_receipts( - block.header.receipts_root, - block.header.logs_bloom, - receipts.iter(), - self.chain_spec(), - block.timestamp, - ) { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error); - }; - } - Ok((receipts, gas_used)) } @@ -383,7 +354,7 @@ where /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; + let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; // NOTE: we need to merge keep the reverts for the bundle retention self.state.merge_transitions(BundleRetention::Reverts); @@ -426,9 +397,12 @@ where type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; + let (receipts, _gas_used) = + self.executor.execute_without_verification(block, total_difficulty)?; + + validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.number); @@ -557,7 +531,7 @@ mod tests { // Attempt to execute a block with one deposit and one non-deposit transaction executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { @@ -638,7 +612,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 31d39fcb6ac4b..be3897ef389e8 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -23,7 +23,6 @@ pub mod l1; pub use l1::*; mod error; -pub mod verify; pub use error::OptimismBlockExecutionError; /// Optimism-related EVM configuration. diff --git a/crates/optimism/evm/src/verify.rs b/crates/optimism/evm/src/verify.rs deleted file mode 100644 index d96965d03b5fd..0000000000000 --- a/crates/optimism/evm/src/verify.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Helpers for verifying the receipts. - -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{ - proofs::calculate_receipt_root_optimism, Bloom, ChainSpec, GotExpected, Receipt, - ReceiptWithBloom, B256, -}; - -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipts<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -/// Compare the calculated receipts root with the expected receipts root, also compare -/// the calculated logs bloom with the expected logs bloom. -pub fn compare_receipts_root_and_logs_bloom( - calculated_receipts_root: B256, - calculated_logs_bloom: Bloom, - expected_receipts_root: B256, - expected_logs_bloom: Bloom, -) -> Result<(), BlockExecutionError> { - if calculated_receipts_root != expected_receipts_root { - return Err(BlockValidationError::ReceiptRootDiff( - GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), - ) - .into()) - } - - if calculated_logs_bloom != expected_logs_bloom { - return Err(BlockValidationError::BloomLogDiff( - GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), - ) - .into()) - } - - Ok(()) -} diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 6b95b0425763b..7f85a0177ae6a 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -155,7 +155,7 @@ impl ExecutionPayloadValidator { let shanghai_active = self.is_shanghai_active_at_timestamp(sealed_block.timestamp); if !shanghai_active && sealed_block.withdrawals.is_some() { // shanghai not active but withdrawals present - return Err(PayloadError::PreShanghaiBlockWithWitdrawals); + return Err(PayloadError::PreShanghaiBlockWithWitdrawals) } // EIP-4844 checks diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index be8144e901200..2cdaee72db4de 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -115,7 +115,7 @@ impl TryFrom for Transaction { return Err(ConversionError::Eip2718Error( RlpError::Custom("EIP-1559 fields are present in a legacy transaction") .into(), - )); + )) } Ok(Transaction::Legacy(TxLegacy { chain_id: tx.chain_id, diff --git a/crates/primitives/src/compression/mod.rs b/crates/primitives/src/compression/mod.rs index 200b6bc4360f0..b0a3fd2fe509b 100644 --- a/crates/primitives/src/compression/mod.rs +++ b/crates/primitives/src/compression/mod.rs @@ -69,7 +69,7 @@ impl ReusableDecompressor { reserved_upper_bound = true; if let Some(upper_bound) = Decompressor::upper_bound(src) { if let Some(additional) = upper_bound.checked_sub(self.buf.capacity()) { - break 'b additional; + break 'b additional } } } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 3c57158f1a3c4..35dfc14915a23 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -82,7 +82,9 @@ pub use prune::{ PrunePurpose, PruneSegment, PruneSegmentError, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; -pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts}; +pub use receipt::{ + gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, +}; pub use static_file::StaticFileSegment; pub use storage::StorageEntry; diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 63955a1d13b1f..74e90363daee7 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,6 +1,6 @@ #[cfg(feature = "zstd-codec")] use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::{logs_bloom, Bloom, Bytes, PruneSegmentError, TxType, B256}; +use crate::{logs_bloom, Bloom, Bytes, TxType, B256}; use alloy_primitives::Log; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; @@ -117,22 +117,6 @@ impl Receipts { timestamp, )) } - - /// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). - pub fn gas_spent_by_tx(&self) -> Result, PruneSegmentError> { - let Some(block_r) = self.last() else { - return Ok(vec![]); - }; - let mut out = Vec::with_capacity(block_r.len()); - for (id, tx_r) in block_r.iter().enumerate() { - if let Some(receipt) = tx_r.as_ref() { - out.push((id as u64, receipt.cumulative_gas_used)); - } else { - return Err(PruneSegmentError::ReceiptsPruned); - } - } - Ok(out) - } } impl Deref for Receipts { @@ -203,6 +187,17 @@ impl ReceiptWithBloom { } } +/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). +pub fn gas_spent_by_transactions>( + receipts: impl IntoIterator, +) -> Vec<(u64, u64)> { + receipts + .into_iter() + .enumerate() + .map(|(id, receipt)| (id as u64, receipt.deref().cumulative_gas_used)) + .collect() +} + #[cfg(any(test, feature = "arbitrary"))] impl proptest::arbitrary::Arbitrary for Receipt { type Parameters = (); @@ -312,7 +307,7 @@ impl ReceiptWithBloom { let b = &mut &**buf; let rlp_head = alloy_rlp::Header::decode(b)?; if !rlp_head.list { - return Err(alloy_rlp::Error::UnexpectedString); + return Err(alloy_rlp::Error::UnexpectedString) } let started_len = b.len(); @@ -357,7 +352,7 @@ impl ReceiptWithBloom { return Err(alloy_rlp::Error::ListLengthMismatch { expected: rlp_head.payload_length, got: consumed, - }); + }) } *buf = *b; Ok(this) @@ -510,7 +505,7 @@ impl<'a> ReceiptWithBloomEncoder<'a> { fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { if matches!(self.receipt.tx_type, TxType::Legacy) { self.encode_fields(out); - return; + return } let mut payload = Vec::new(); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index fd9af4631170f..c823a21577873 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1424,7 +1424,7 @@ impl Decodable for TransactionSigned { /// header if the first byte is less than `0xf7`. fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { if buf.is_empty() { - return Err(RlpError::InputTooShort); + return Err(RlpError::InputTooShort) } // decode header diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index c73d9bb93674b..33ed8d2d5531d 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -151,7 +151,7 @@ where return Some(batch_response_error( Id::Null, reject_too_big_request(max_request_body_size as u32), - )); + )) } // Single request or notification diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 04608745484aa..2bec090cc44a6 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -871,7 +871,7 @@ mod tests { // and you might want to do something smarter if it's // critical that "the most recent item" must be sent when it is produced. if sink.send(notif).await.is_err() { - break Ok(()); + break Ok(()) } closed = c; diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs index 9126c09635dbe..5da5a5667daa3 100644 --- a/crates/rpc/rpc-types/src/mev.rs +++ b/crates/rpc/rpc-types/src/mev.rs @@ -755,7 +755,7 @@ mod u256_numeric_string { match val { serde_json::Value::String(s) => { if let Ok(val) = s.parse::() { - return Ok(U256::from(val)); + return Ok(U256::from(val)) } U256::from_str(&s).map_err(de::Error::custom) } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 2f62e66a31d59..1682f6f88d7ab 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -129,7 +129,7 @@ where if tx_len != receipts.len() { return Err(internal_rpc_err( "the number of transactions does not match the number of receipts", - )); + )) } // make sure the block is full diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 0f933cea78221..e16b9e8b6d0c6 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -240,9 +240,11 @@ where // Execute the block let execute_start = Instant::now(); - executor.execute_one((&block, td).into()).map_err(|error| StageError::Block { - block: Box::new(block.header.clone().seal_slow()), - error: BlockErrorKind::Execution(error), + executor.execute_and_verify_one((&block, td).into()).map_err(|error| { + StageError::Block { + block: Box::new(block.header.clone().seal_slow()), + error: BlockErrorKind::Execution(error), + } })?; execution_duration += execute_start.elapsed(); diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index ad2eb19a8db7c..ac9e771b1e76a 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -534,7 +534,7 @@ impl DataReader { let offset_end = index + self.offset_size as usize; if offset_end > self.offset_mmap.len() { - return Err(NippyJarError::OffsetOutOfBounds { index }); + return Err(NippyJarError::OffsetOutOfBounds { index }) } buffer[..self.offset_size as usize].copy_from_slice(&self.offset_mmap[index..offset_end]); diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 3a0f2d03174d3..a81ef5b005ba4 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -522,7 +522,7 @@ impl StaticFileProviderRW { if self.prune_on_commit.is_some() { return Err(ProviderError::NippyJar( "Pruning should be comitted before appending or pruning more data".to_string(), - )); + )) } Ok(()) } From d0386b8166aaf80a828643f5661b06d3bf71320c Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 22 May 2024 19:36:51 +0200 Subject: [PATCH 590/700] feat: use broadcast channel for event listeners (#8193) Co-authored-by: Emilia Hane --- Cargo.lock | 7 +- bin/reth/src/commands/import.rs | 2 +- crates/consensus/auto-seal/Cargo.toml | 1 + crates/consensus/auto-seal/src/task.rs | 8 +- crates/consensus/beacon/src/engine/handle.rs | 29 +++--- .../beacon/src/engine/hooks/static_file.rs | 3 +- crates/consensus/beacon/src/engine/message.rs | 9 +- crates/consensus/beacon/src/engine/mod.rs | 30 +++--- crates/consensus/beacon/src/engine/sync.rs | 21 ++--- crates/e2e-test-utils/Cargo.toml | 1 + crates/e2e-test-utils/src/network.rs | 4 +- crates/net/network/src/manager.rs | 22 ++--- crates/net/network/src/network.rs | 20 ++-- crates/net/network/src/test_utils/testnet.rs | 8 +- crates/net/network/src/transactions/mod.rs | 16 ++-- crates/node-core/src/engine/engine_store.rs | 3 +- crates/node/builder/src/launch/mod.rs | 8 +- crates/node/events/src/node.rs | 6 ++ crates/prune/Cargo.toml | 1 - crates/prune/src/pruner.rs | 15 ++- crates/rpc/rpc-builder/Cargo.toml | 1 + crates/rpc/rpc-builder/tests/it/utils.rs | 3 +- crates/rpc/rpc-engine-api/Cargo.toml | 1 + crates/rpc/rpc-engine-api/src/engine_api.rs | 6 +- crates/stages-api/Cargo.toml | 2 +- crates/stages-api/src/error.rs | 5 +- crates/stages-api/src/pipeline/builder.rs | 2 +- crates/stages-api/src/pipeline/mod.rs | 39 ++++---- crates/static-file/Cargo.toml | 1 + .../static-file/src/static_file_producer.rs | 26 +++--- crates/tokio-util/Cargo.toml | 4 + crates/tokio-util/src/event_listeners.rs | 46 ---------- crates/tokio-util/src/event_sender.rs | 42 +++++++++ crates/tokio-util/src/event_stream.rs | 92 +++++++++++++++++++ crates/tokio-util/src/lib.rs | 6 +- 35 files changed, 293 insertions(+), 197 deletions(-) delete mode 100644 crates/tokio-util/src/event_listeners.rs create mode 100644 crates/tokio-util/src/event_sender.rs create mode 100644 crates/tokio-util/src/event_stream.rs diff --git a/Cargo.lock b/Cargo.lock index 6edf8be156442..e34086b13275a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6450,6 +6450,7 @@ dependencies = [ "reth-revm", "reth-rpc-types", "reth-stages-api", + "reth-tokio-util", "reth-transaction-pool", "tokio", "tokio-stream", @@ -6785,6 +6786,7 @@ dependencies = [ "reth-provider", "reth-rpc", "reth-rpc-layer", + "reth-tokio-util", "reth-tracing", "secp256k1 0.28.2", "serde_json", @@ -7631,7 +7633,6 @@ dependencies = [ "reth-tracing", "thiserror", "tokio", - "tokio-stream", "tracing", ] @@ -7758,6 +7759,7 @@ dependencies = [ "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", + "reth-tokio-util", "reth-tracing", "reth-transaction-pool", "serde", @@ -7792,6 +7794,7 @@ dependencies = [ "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", + "reth-tokio-util", "serde", "thiserror", "tokio", @@ -7924,6 +7927,7 @@ dependencies = [ "reth-stages", "reth-tokio-util", "tempfile", + "tokio", "tokio-stream", "tracing", ] @@ -7959,6 +7963,7 @@ version = "0.2.0-beta.7" dependencies = [ "tokio", "tokio-stream", + "tracing", ] [[package]] diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 235ada848549c..1108f8aa7856a 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -257,7 +257,7 @@ where let max_block = file_client.max_block().unwrap_or(0); - let mut pipeline = Pipeline::builder() + let pipeline = Pipeline::builder() .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 435ade53db32c..ccbc1e06a32a7 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -25,6 +25,7 @@ reth-engine-primitives.workspace = true reth-consensus.workspace = true reth-rpc-types.workspace = true reth-network-types.workspace = true +reth-tokio-util.workspace = true # async futures-util.workspace = true diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 42f1268f33125..2a5ec4433e494 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -9,6 +9,7 @@ use reth_primitives::{ use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; use reth_rpc_types::engine::ForkchoiceState; use reth_stages_api::PipelineEvent; +use reth_tokio_util::EventStream; use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; use std::{ collections::VecDeque, @@ -18,7 +19,6 @@ use std::{ task::{Context, Poll}, }; use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, warn}; /// A Future that listens for new ready transactions and puts new blocks into storage @@ -30,7 +30,7 @@ pub struct MiningTask>>>, + insert_task: Option>>>, /// Shared storage to insert new blocks storage: Storage, /// Pool where transactions are stored @@ -42,7 +42,7 @@ pub struct MiningTask>, + pipe_line_events: Option>, /// The type used for block execution block_executor: Executor, } @@ -80,7 +80,7 @@ impl } /// Sets the pipeline events to listen on. - pub fn set_pipeline_events(&mut self, events: UnboundedReceiverStream) { + pub fn set_pipeline_events(&mut self, events: EventStream) { self.pipe_line_events = Some(events); } } diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 121a8fac0703b..bec289bf4a7fa 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -10,28 +10,20 @@ use reth_interfaces::RethResult; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; -use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; -use tokio_stream::wrappers::UnboundedReceiverStream; +use reth_tokio_util::{EventSender, EventStream}; +use tokio::sync::{mpsc::UnboundedSender, oneshot}; /// A _shareable_ beacon consensus frontend type. Used to interact with the spawned beacon consensus /// engine task. /// /// See also `BeaconConsensusEngine` -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct BeaconConsensusEngineHandle where Engine: EngineTypes, { pub(crate) to_engine: UnboundedSender>, -} - -impl Clone for BeaconConsensusEngineHandle -where - Engine: EngineTypes, -{ - fn clone(&self) -> Self { - Self { to_engine: self.to_engine.clone() } - } + event_sender: EventSender, } // === impl BeaconConsensusEngineHandle === @@ -41,8 +33,11 @@ where Engine: EngineTypes, { /// Creates a new beacon consensus engine handle. - pub fn new(to_engine: UnboundedSender>) -> Self { - Self { to_engine } + pub fn new( + to_engine: UnboundedSender>, + event_sender: EventSender, + ) -> Self { + Self { to_engine, event_sender } } /// Sends a new payload message to the beacon consensus engine and waits for a response. @@ -97,9 +92,7 @@ where } /// Creates a new [`BeaconConsensusEngineEvent`] listener stream. - pub fn event_listener(&self) -> UnboundedReceiverStream { - let (tx, rx) = mpsc::unbounded_channel(); - let _ = self.to_engine.send(BeaconEngineMessage::EventListener(tx)); - UnboundedReceiverStream::new(rx) + pub fn event_listener(&self) -> EventStream { + self.event_sender.new_listener() } } diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 2cff68e1d26c3..01b7056c37f02 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -91,8 +91,7 @@ impl StaticFileHook { return Ok(None) }; - let Some(mut locked_static_file_producer) = static_file_producer.try_lock_arc() - else { + let Some(locked_static_file_producer) = static_file_producer.try_lock_arc() else { trace!(target: "consensus::engine::hooks::static_file", "StaticFileProducer lock is already taken"); return Ok(None) }; diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index f9f1a84d46f49..108dab41eb0f4 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -1,7 +1,4 @@ -use crate::{ - engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}, - BeaconConsensusEngineEvent, -}; +use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; use futures::{future::Either, FutureExt}; use reth_engine_primitives::EngineTypes; use reth_interfaces::RethResult; @@ -15,7 +12,7 @@ use std::{ pin::Pin, task::{ready, Context, Poll}, }; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; +use tokio::sync::oneshot; /// Represents the outcome of forkchoice update. /// @@ -162,6 +159,4 @@ pub enum BeaconEngineMessage { }, /// Message with exchanged transition configuration. TransitionConfigurationExchanged, - /// Add a new listener for [`BeaconEngineMessage`]. - EventListener(UnboundedSender), } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 1057457c77985..8139a0c577314 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -29,7 +29,7 @@ use reth_rpc_types::engine::{ }; use reth_stages_api::{ControlFlow, Pipeline}; use reth_tasks::TaskSpawner; -use reth_tokio_util::EventListeners; +use reth_tokio_util::EventSender; use std::{ pin::Pin, sync::Arc, @@ -202,8 +202,8 @@ where /// be used to download and execute the missing blocks. pipeline_run_threshold: u64, hooks: EngineHooksController, - /// Listeners for engine events. - listeners: EventListeners, + /// Sender for engine events. + event_sender: EventSender, /// Consensus engine metrics. metrics: EngineMetrics, } @@ -282,8 +282,8 @@ where engine_message_stream: BoxStream<'static, BeaconEngineMessage>, hooks: EngineHooks, ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { - let handle = BeaconConsensusEngineHandle { to_engine }; - let listeners = EventListeners::default(); + let event_sender = EventSender::default(); + let handle = BeaconConsensusEngineHandle::new(to_engine, event_sender.clone()); let sync = EngineSyncController::new( pipeline, client, @@ -291,7 +291,7 @@ where run_pipeline_continuously, max_block, blockchain.chain_spec(), - listeners.clone(), + event_sender.clone(), ); let mut this = Self { sync, @@ -306,7 +306,7 @@ where blockchain_tree_action: None, pipeline_run_threshold, hooks: EngineHooksController::new(hooks), - listeners, + event_sender, metrics: EngineMetrics::default(), }; @@ -406,7 +406,7 @@ where if should_update_head { let head = outcome.header(); let _ = self.update_head(head.clone()); - self.listeners.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( + self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( Box::new(head.clone()), elapsed, )); @@ -543,7 +543,7 @@ where } // notify listeners about new processed FCU - self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); + self.event_sender.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); } /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less @@ -597,13 +597,6 @@ where self.handle.clone() } - /// Pushes an [UnboundedSender] to the engine's listeners. Also pushes an [UnboundedSender] to - /// the sync controller's listeners. - pub(crate) fn push_listener(&mut self, listener: UnboundedSender) { - self.listeners.push_listener(listener.clone()); - self.sync.push_listener(listener); - } - /// Returns true if the distance from the local tip to the block is greater than the configured /// threshold. /// @@ -1255,7 +1248,7 @@ where } else { BeaconConsensusEngineEvent::ForkBlockAdded(block) }; - self.listeners.notify(event); + self.event_sender.notify(event); PayloadStatusEnum::Valid } InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { @@ -1429,7 +1422,7 @@ where match make_canonical_result { Ok(outcome) => { if let CanonicalOutcome::Committed { head } = &outcome { - self.listeners.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( + self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( Box::new(head.clone()), elapsed, )); @@ -1878,7 +1871,6 @@ where BeaconEngineMessage::TransitionConfigurationExchanged => { this.blockchain.on_transition_configuration_exchanged(); } - BeaconEngineMessage::EventListener(tx) => this.push_listener(tx), } continue } diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 09c6d208b6e55..441c3ce0362cd 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -14,14 +14,14 @@ use reth_interfaces::p2p::{ use reth_primitives::{stage::PipelineTarget, BlockNumber, ChainSpec, SealedBlock, B256}; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; use reth_tasks::TaskSpawner; -use reth_tokio_util::EventListeners; +use reth_tokio_util::EventSender; use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap}, sync::Arc, task::{ready, Context, Poll}, }; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; +use tokio::sync::oneshot; use tracing::trace; /// Manages syncing under the control of the engine. @@ -49,8 +49,8 @@ where inflight_full_block_requests: Vec>, /// In-flight full block _range_ requests in progress. inflight_block_range_requests: Vec>, - /// Listeners for engine events. - listeners: EventListeners, + /// Sender for engine events. + event_sender: EventSender, /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for /// ordering. This means the blocks will be popped from the heap with ascending block numbers. range_buffered_blocks: BinaryHeap>, @@ -76,7 +76,7 @@ where run_pipeline_continuously: bool, max_block: Option, chain_spec: Arc, - listeners: EventListeners, + event_sender: EventSender, ) -> Self { Self { full_block_client: FullBlockClient::new( @@ -90,7 +90,7 @@ where inflight_block_range_requests: Vec::new(), range_buffered_blocks: BinaryHeap::new(), run_pipeline_continuously, - listeners, + event_sender, max_block, metrics: EngineSyncMetrics::default(), } @@ -127,11 +127,6 @@ where self.run_pipeline_continuously } - /// Pushes an [UnboundedSender] to the sync controller's listeners. - pub(crate) fn push_listener(&mut self, listener: UnboundedSender) { - self.listeners.push_listener(listener); - } - /// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`. #[allow(dead_code)] pub(crate) fn is_pipeline_sync_pending(&self) -> bool { @@ -169,7 +164,7 @@ where ); // notify listeners that we're downloading a block range - self.listeners.notify(BeaconConsensusEngineEvent::LiveSyncProgress( + self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( ConsensusEngineLiveSyncProgress::DownloadingBlocks { remaining_blocks: count, target: hash, @@ -198,7 +193,7 @@ where ); // notify listeners that we're downloading a block - self.listeners.notify(BeaconConsensusEngineEvent::LiveSyncProgress( + self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( ConsensusEngineLiveSyncProgress::DownloadingBlocks { remaining_blocks: 1, target: hash, diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 59424cac98fda..4165044ae2e59 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -20,6 +20,7 @@ reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-provider.workspace = true reth-node-builder.workspace = true +reth-tokio-util.workspace = true jsonrpsee.workspace = true diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 92e9b316a9a46..5b148b09f55ca 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -1,12 +1,12 @@ use futures_util::StreamExt; use reth::network::{NetworkEvent, NetworkEvents, NetworkHandle, PeersInfo}; use reth_primitives::NodeRecord; +use reth_tokio_util::EventStream; use reth_tracing::tracing::info; -use tokio_stream::wrappers::UnboundedReceiverStream; /// Helper for network operations pub struct NetworkTestContext { - network_events: UnboundedReceiverStream, + network_events: EventStream, network: NetworkHandle, } diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index d516625c64074..b6b1d4d1ecbc7 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -49,7 +49,7 @@ use reth_primitives::{ForkId, NodeRecord}; use reth_provider::{BlockNumReader, BlockReader}; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use reth_tasks::shutdown::GracefulShutdown; -use reth_tokio_util::EventListeners; +use reth_tokio_util::EventSender; use secp256k1::SecretKey; use std::{ net::SocketAddr, @@ -84,8 +84,8 @@ pub struct NetworkManager { from_handle_rx: UnboundedReceiverStream, /// Handles block imports according to the `eth` protocol. block_import: Box, - /// All listeners for high level network events. - event_listeners: EventListeners, + /// Sender for high level network events. + event_sender: EventSender, /// Sender half to send events to the /// [`TransactionsManager`](crate::transactions::TransactionsManager) task, if configured. to_transactions_manager: Option>, @@ -246,6 +246,8 @@ where let (to_manager_tx, from_handle_rx) = mpsc::unbounded_channel(); + let event_sender: EventSender = Default::default(); + let handle = NetworkHandle::new( Arc::clone(&num_active_peers), listener_address, @@ -258,6 +260,7 @@ where Arc::new(AtomicU64::new(chain_spec.chain.id())), tx_gossip_disabled, discv4, + event_sender.clone(), ); Ok(Self { @@ -265,7 +268,7 @@ where handle, from_handle_rx: UnboundedReceiverStream::new(from_handle_rx), block_import, - event_listeners: Default::default(), + event_sender, to_transactions_manager: None, to_eth_request_handler: None, num_active_peers, @@ -528,9 +531,6 @@ where /// Handler for received messages from a handle fn on_handle_message(&mut self, msg: NetworkHandleMessage) { match msg { - NetworkHandleMessage::EventListener(tx) => { - self.event_listeners.push_listener(tx); - } NetworkHandleMessage::DiscoveryListener(tx) => { self.swarm.state_mut().discovery_mut().add_listener(tx); } @@ -690,7 +690,7 @@ where self.update_active_connection_metrics(); - self.event_listeners.notify(NetworkEvent::SessionEstablished { + self.event_sender.notify(NetworkEvent::SessionEstablished { peer_id, remote_addr, client_version, @@ -702,12 +702,12 @@ where } SwarmEvent::PeerAdded(peer_id) => { trace!(target: "net", ?peer_id, "Peer added"); - self.event_listeners.notify(NetworkEvent::PeerAdded(peer_id)); + self.event_sender.notify(NetworkEvent::PeerAdded(peer_id)); self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64); } SwarmEvent::PeerRemoved(peer_id) => { trace!(target: "net", ?peer_id, "Peer dropped"); - self.event_listeners.notify(NetworkEvent::PeerRemoved(peer_id)); + self.event_sender.notify(NetworkEvent::PeerRemoved(peer_id)); self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64); } SwarmEvent::SessionClosed { peer_id, remote_addr, error } => { @@ -750,7 +750,7 @@ where .saturating_sub(1) as f64, ); - self.event_listeners.notify(NetworkEvent::SessionClosed { peer_id, reason }); + self.event_sender.notify(NetworkEvent::SessionClosed { peer_id, reason }); } SwarmEvent::IncomingPendingSessionClosed { remote_addr, error } => { trace!( diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 86669bf19f489..8d9b277f41918 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -16,6 +16,7 @@ use reth_network_api::{ use reth_network_types::PeerId; use reth_primitives::{Head, NodeRecord, TransactionSigned, B256}; use reth_rpc_types::NetworkStatus; +use reth_tokio_util::{EventSender, EventStream}; use secp256k1::SecretKey; use std::{ net::SocketAddr, @@ -24,7 +25,10 @@ use std::{ Arc, }, }; -use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; +use tokio::sync::{ + mpsc::{self, UnboundedSender}, + oneshot, +}; use tokio_stream::wrappers::UnboundedReceiverStream; /// A _shareable_ network frontend. Used to interact with the network. @@ -53,6 +57,7 @@ impl NetworkHandle { chain_id: Arc, tx_gossip_disabled: bool, discv4: Option, + event_sender: EventSender, ) -> Self { let inner = NetworkInner { num_active_peers, @@ -68,6 +73,7 @@ impl NetworkHandle { chain_id, tx_gossip_disabled, discv4, + event_sender, }; Self { inner: Arc::new(inner) } } @@ -196,10 +202,8 @@ impl NetworkHandle { // === API Implementations === impl NetworkEvents for NetworkHandle { - fn event_listener(&self) -> UnboundedReceiverStream { - let (tx, rx) = mpsc::unbounded_channel(); - let _ = self.manager().send(NetworkHandleMessage::EventListener(tx)); - UnboundedReceiverStream::new(rx) + fn event_listener(&self) -> EventStream { + self.inner.event_sender.new_listener() } fn discovery_listener(&self) -> UnboundedReceiverStream { @@ -401,12 +405,14 @@ struct NetworkInner { tx_gossip_disabled: bool, /// The instance of the discv4 service discv4: Option, + /// Sender for high level network events. + event_sender: EventSender, } /// Provides event subscription for the network. pub trait NetworkEvents: Send + Sync { /// Creates a new [`NetworkEvent`] listener channel. - fn event_listener(&self) -> UnboundedReceiverStream; + fn event_listener(&self) -> EventStream; /// Returns a new [`DiscoveryEvent`] stream. /// /// This stream yields [`DiscoveryEvent`]s for each peer that is discovered. @@ -430,8 +436,6 @@ pub(crate) enum NetworkHandleMessage { RemovePeer(PeerId, PeerKind), /// Disconnects a connection to a peer if it exists, optionally providing a disconnect reason. DisconnectPeer(PeerId, Option), - /// Adds a new listener for `NetworkEvent`. - EventListener(UnboundedSender), /// Broadcasts an event to announce a new block to all nodes. AnnounceBlock(NewBlock, B256), /// Sends a list of transactions to the given peer. diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index a92934c0cbcee..99c98db55d5dc 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -20,6 +20,7 @@ use reth_provider::{ test_utils::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, }; use reth_tasks::TokioTaskExecutor; +use reth_tokio_util::EventStream; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, test_utils::{TestPool, TestPoolBuilder}, @@ -40,7 +41,6 @@ use tokio::{ }, task::JoinHandle, }; -use tokio_stream::wrappers::UnboundedReceiverStream; /// A test network consisting of multiple peers. pub struct Testnet { @@ -503,7 +503,7 @@ impl PeerHandle { } /// Creates a new [`NetworkEvent`] listener channel. - pub fn event_listener(&self) -> UnboundedReceiverStream { + pub fn event_listener(&self) -> EventStream { self.network.event_listener() } @@ -591,14 +591,14 @@ impl Default for PeerConfig { /// This makes it easier to await established connections #[derive(Debug)] pub struct NetworkEventStream { - inner: UnboundedReceiverStream, + inner: EventStream, } // === impl NetworkEventStream === impl NetworkEventStream { /// Create a new [`NetworkEventStream`] from the given network event receiver stream. - pub fn new(inner: UnboundedReceiverStream) -> Self { + pub fn new(inner: EventStream) -> Self { Self { inner } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 070b9c7a1478e..b6b2328e4f8be 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -30,6 +30,7 @@ use reth_network_types::PeerId; use reth_primitives::{ FromRecoveredPooledTransaction, PooledTransactionsElement, TransactionSigned, TxHash, B256, }; +use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, @@ -197,7 +198,7 @@ pub struct TransactionsManager { /// Subscriptions to all network related events. /// /// From which we get all new incoming transaction related messages. - network_events: UnboundedReceiverStream, + network_events: EventStream, /// Transaction fetcher to handle inflight and missing transaction requests. transaction_fetcher: TransactionFetcher, /// All currently pending transactions grouped by peers. @@ -880,8 +881,8 @@ where } /// Handles a received event related to common network events. - fn on_network_event(&mut self, event: NetworkEvent) { - match event { + fn on_network_event(&mut self, event_result: NetworkEvent) { + match event_result { NetworkEvent::SessionClosed { peer_id, .. } => { // remove the peer self.peers.remove(&peer_id); @@ -1626,6 +1627,7 @@ mod tests { use secp256k1::SecretKey; use std::{fmt, future::poll_fn, hash}; use tests::fetcher::TxFetchMetadata; + use tracing::error; async fn new_tx_manager() -> TransactionsManager { let secret_key = SecretKey::new(&mut rand::thread_rng()); @@ -1734,7 +1736,7 @@ mod tests { } NetworkEvent::PeerAdded(_peer_id) => continue, ev => { - panic!("unexpected event {ev:?}") + error!("unexpected event {ev:?}") } } } @@ -1820,7 +1822,7 @@ mod tests { } NetworkEvent::PeerAdded(_peer_id) => continue, ev => { - panic!("unexpected event {ev:?}") + error!("unexpected event {ev:?}") } } } @@ -1904,7 +1906,7 @@ mod tests { } NetworkEvent::PeerAdded(_peer_id) => continue, ev => { - panic!("unexpected event {ev:?}") + error!("unexpected event {ev:?}") } } } @@ -1992,7 +1994,7 @@ mod tests { }), NetworkEvent::PeerAdded(_peer_id) => continue, ev => { - panic!("unexpected event {ev:?}") + error!("unexpected event {ev:?}") } } } diff --git a/crates/node-core/src/engine/engine_store.rs b/crates/node-core/src/engine/engine_store.rs index 2a1ffc3b0ed20..d59651ce9ca31 100644 --- a/crates/node-core/src/engine/engine_store.rs +++ b/crates/node-core/src/engine/engine_store.rs @@ -89,8 +89,7 @@ impl EngineMessageStore { )?; } // noop - BeaconEngineMessage::TransitionConfigurationExchanged | - BeaconEngineMessage::EventListener(_) => (), + BeaconEngineMessage::TransitionConfigurationExchanged => (), }; Ok(()) } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index ece149e31dc2f..4987586bc9f3f 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -282,7 +282,7 @@ where // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - let (mut pipeline, client) = if ctx.is_dev() { + let (pipeline, client) = if ctx.is_dev() { info!(target: "reth::cli", "Starting Reth in dev mode"); for (idx, (address, alloc)) in ctx.chain_spec().genesis.alloc.iter().enumerate() { @@ -305,7 +305,7 @@ where ) .build(); - let mut pipeline = crate::setup::build_networked_pipeline( + let pipeline = crate::setup::build_networked_pipeline( ctx.node_config(), &ctx.toml_config().stages, client.clone(), @@ -358,7 +358,7 @@ where pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } - let mut pruner = pruner_builder.build(ctx.provider_factory().clone()); + let pruner = pruner_builder.build(ctx.provider_factory().clone()); let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); @@ -395,7 +395,7 @@ where Either::Right(stream::empty()) }, pruner_events.map(Into::into), - static_file_producer_events.map(Into::into) + static_file_producer_events.map(Into::into), ); ctx.task_executor().spawn_critical( "events task", diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index ba7ae8da4600e..383da986b4a14 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -392,6 +392,9 @@ pub enum NodeEvent { Pruner(PrunerEvent), /// A static_file_producer event StaticFileProducer(StaticFileProducerEvent), + /// Used to encapsulate various conditions or situations that do not + /// naturally fit into the other more specific variants. + Other(String), } impl From for NodeEvent { @@ -575,6 +578,9 @@ where NodeEvent::StaticFileProducer(event) => { this.state.handle_static_file_producer_event(event); } + NodeEvent::Other(event_description) => { + warn!("{event_description}"); + } } } diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index cc24e68b8341d..65b4ba19c6c1f 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -30,7 +30,6 @@ thiserror.workspace = true itertools.workspace = true rayon.workspace = true tokio.workspace = true -tokio-stream.workspace = true [dev-dependencies] # reth diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 55a998709d8e1..f4111f131a506 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -13,13 +13,12 @@ use reth_primitives::{ use reth_provider::{ DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, StaticFileProviderFactory, }; -use reth_tokio_util::EventListeners; +use reth_tokio_util::{EventSender, EventStream}; use std::{ collections::BTreeMap, time::{Duration, Instant}, }; use tokio::sync::watch; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::debug; /// Result of [Pruner::run] execution. @@ -53,7 +52,7 @@ pub struct Pruner { finished_exex_height: watch::Receiver, #[doc(hidden)] metrics: Metrics, - listeners: EventListeners, + event_sender: EventSender, } impl Pruner { @@ -77,13 +76,13 @@ impl Pruner { timeout, finished_exex_height, metrics: Metrics::default(), - listeners: Default::default(), + event_sender: Default::default(), } } /// Listen for events on the pruner. - pub fn events(&mut self) -> UnboundedReceiverStream { - self.listeners.new_listener() + pub fn events(&self) -> EventStream { + self.event_sender.new_listener() } /// Run the pruner @@ -100,7 +99,7 @@ impl Pruner { return Ok(PruneProgress::Finished) } - self.listeners.notify(PrunerEvent::Started { tip_block_number }); + self.event_sender.notify(PrunerEvent::Started { tip_block_number }); debug!(target: "pruner", %tip_block_number, "Pruner started"); let start = Instant::now(); @@ -154,7 +153,7 @@ impl Pruner { "{message}", ); - self.listeners.notify(PrunerEvent::Finished { tip_block_number, elapsed, stats }); + self.event_sender.notify(PrunerEvent::Finished { tip_block_number, elapsed, stats }); Ok(progress) } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 9087ff7c7ffc5..e3b5f4766d905 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -56,6 +56,7 @@ reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } +reth-tokio-util.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index a3272ac026a61..dd58bf2de299d 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -26,7 +26,8 @@ pub fn test_address() -> SocketAddr { pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { let config = AuthServerConfig::builder(secret).socket_addr(test_address()).build(); let (tx, _rx) = unbounded_channel(); - let beacon_engine_handle = BeaconConsensusEngineHandle::::new(tx); + let beacon_engine_handle = + BeaconConsensusEngineHandle::::new(tx, Default::default()); let engine_api = EngineApi::new( NoopProvider::default(), MAINNET.clone(), diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 5fe782a6ef539..83a5f85fcfaec 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -43,6 +43,7 @@ reth-ethereum-engine-primitives.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-tokio-util.workspace = true alloy-rlp.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 0e4476bb71b09..a2275281e63ae 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -770,7 +770,7 @@ where mod tests { use super::*; use assert_matches::assert_matches; - use reth_beacon_consensus::BeaconEngineMessage; + use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_interfaces::test_utils::generators::random_block; use reth_payload_builder::test_utils::spawn_test_payload_service; @@ -778,6 +778,7 @@ mod tests { use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::payload::execution_payload_from_sealed_block; use reth_tasks::TokioTaskExecutor; + use reth_tokio_util::EventSender; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver}; fn setup_engine_api() -> (EngineApiTestHandle, EngineApi, EthEngineTypes>) @@ -786,11 +787,12 @@ mod tests { let provider = Arc::new(MockEthProvider::default()); let payload_store = spawn_test_payload_service(); let (to_engine, engine_rx) = unbounded_channel(); + let event_sender: EventSender = Default::default(); let task_executor = Box::::default(); let api = EngineApi::new( provider.clone(), chain_spec.clone(), - BeaconConsensusEngineHandle::new(to_engine), + BeaconConsensusEngineHandle::new(to_engine, event_sender), payload_store.into(), task_executor, ); diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml index 2101961fd2d83..32c4258538ad1 100644 --- a/crates/stages-api/Cargo.toml +++ b/crates/stages-api/Cargo.toml @@ -27,7 +27,6 @@ metrics.workspace = true # async tokio = { workspace = true, features = ["sync"] } -tokio-stream.workspace = true futures-util.workspace = true # misc @@ -40,6 +39,7 @@ auto_impl.workspace = true assert_matches.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } +tokio-stream.workspace = true [features] test-utils = [] diff --git a/crates/stages-api/src/error.rs b/crates/stages-api/src/error.rs index 37fe2b3fdbc2d..f6e528ca75426 100644 --- a/crates/stages-api/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -1,3 +1,4 @@ +use crate::PipelineEvent; use reth_consensus::ConsensusError; use reth_interfaces::{ db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, @@ -5,9 +6,7 @@ use reth_interfaces::{ use reth_primitives::{BlockNumber, SealedHeader, StaticFileSegment, TxNumber}; use reth_provider::ProviderError; use thiserror::Error; - -use crate::PipelineEvent; -use tokio::sync::mpsc::error::SendError; +use tokio::sync::broadcast::error::SendError; /// Represents the specific error type within a block error. #[derive(Error, Debug)] diff --git a/crates/stages-api/src/pipeline/builder.rs b/crates/stages-api/src/pipeline/builder.rs index e76f76c604c88..c059067259f88 100644 --- a/crates/stages-api/src/pipeline/builder.rs +++ b/crates/stages-api/src/pipeline/builder.rs @@ -80,7 +80,7 @@ where max_block, static_file_producer, tip_tx, - listeners: Default::default(), + event_sender: Default::default(), progress: Default::default(), metrics_tx, } diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 5aceb515b791a..66a87a0f8a457 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -17,10 +17,9 @@ use reth_provider::{ }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; -use reth_tokio_util::EventListeners; +use reth_tokio_util::{EventSender, EventStream}; use std::pin::Pin; use tokio::sync::watch; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; mod builder; @@ -75,8 +74,8 @@ pub struct Pipeline { /// The maximum block number to sync to. max_block: Option, static_file_producer: StaticFileProducer, - /// All listeners for events the pipeline emits. - listeners: EventListeners, + /// Sender for events the pipeline emits. + event_sender: EventSender, /// Keeps track of the progress of the pipeline. progress: PipelineProgress, /// A receiver for the current chain tip to sync to. @@ -108,8 +107,8 @@ where } /// Listen for events on the pipeline. - pub fn events(&mut self) -> UnboundedReceiverStream { - self.listeners.new_listener() + pub fn events(&self) -> EventStream { + self.event_sender.new_listener() } /// Registers progress metrics for each registered stage @@ -251,7 +250,7 @@ where /// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the /// lock is occupied. pub fn move_to_static_files(&self) -> RethResult<()> { - let mut static_file_producer = self.static_file_producer.lock(); + let static_file_producer = self.static_file_producer.lock(); // Copies data from database to static files let lowest_static_file_height = { @@ -312,7 +311,8 @@ where %to, "Unwind point too far for stage" ); - self.listeners.notify(PipelineEvent::Skipped { stage_id }); + self.event_sender.notify(PipelineEvent::Skipped { stage_id }); + continue } @@ -325,7 +325,7 @@ where ); while checkpoint.block_number > to { let input = UnwindInput { checkpoint, unwind_to: to, bad_block }; - self.listeners.notify(PipelineEvent::Unwind { stage_id, input }); + self.event_sender.notify(PipelineEvent::Unwind { stage_id, input }); let output = stage.unwind(&provider_rw, input); match output { @@ -350,7 +350,7 @@ where } provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; - self.listeners + self.event_sender .notify(PipelineEvent::Unwound { stage_id, result: unwind_output }); self.provider_factory.static_file_provider().commit()?; @@ -359,7 +359,8 @@ where provider_rw = self.provider_factory.provider_rw()?; } Err(err) => { - self.listeners.notify(PipelineEvent::Error { stage_id }); + self.event_sender.notify(PipelineEvent::Error { stage_id }); + return Err(PipelineError::Stage(StageError::Fatal(Box::new(err)))) } } @@ -395,7 +396,7 @@ where prev_block = prev_checkpoint.map(|progress| progress.block_number), "Stage reached target block, skipping." ); - self.listeners.notify(PipelineEvent::Skipped { stage_id }); + self.event_sender.notify(PipelineEvent::Skipped { stage_id }); // We reached the maximum block, so we skip the stage return Ok(ControlFlow::NoProgress { @@ -405,7 +406,7 @@ where let exec_input = ExecInput { target, checkpoint: prev_checkpoint }; - self.listeners.notify(PipelineEvent::Prepare { + self.event_sender.notify(PipelineEvent::Prepare { pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, total: total_stages, @@ -416,14 +417,15 @@ where }); if let Err(err) = stage.execute_ready(exec_input).await { - self.listeners.notify(PipelineEvent::Error { stage_id }); + self.event_sender.notify(PipelineEvent::Error { stage_id }); + match on_stage_error(&self.provider_factory, stage_id, prev_checkpoint, err)? { Some(ctrl) => return Ok(ctrl), None => continue, }; } - self.listeners.notify(PipelineEvent::Run { + self.event_sender.notify(PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, total: total_stages, @@ -448,7 +450,7 @@ where } provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; - self.listeners.notify(PipelineEvent::Ran { + self.event_sender.notify(PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, total: total_stages, @@ -471,7 +473,8 @@ where } Err(err) => { drop(provider_rw); - self.listeners.notify(PipelineEvent::Error { stage_id }); + self.event_sender.notify(PipelineEvent::Error { stage_id }); + if let Some(ctrl) = on_stage_error(&self.provider_factory, stage_id, prev_checkpoint, err)? { @@ -575,7 +578,7 @@ impl std::fmt::Debug for Pipeline { f.debug_struct("Pipeline") .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) .field("max_block", &self.max_block) - .field("listeners", &self.listeners) + .field("event_sender", &self.event_sender) .finish() } } diff --git a/crates/static-file/Cargo.toml b/crates/static-file/Cargo.toml index 1345b2f232fac..0f6608c8084df 100644 --- a/crates/static-file/Cargo.toml +++ b/crates/static-file/Cargo.toml @@ -21,6 +21,7 @@ reth-nippy-jar.workspace = true reth-tokio-util.workspace = true # async +tokio.workspace = true tokio-stream.workspace = true # misc diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index c7a365c9afab2..4eb0825611423 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -10,13 +10,12 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, ProviderFactory, }; -use reth_tokio_util::EventListeners; +use reth_tokio_util::{EventSender, EventStream}; use std::{ ops::{Deref, RangeInclusive}, sync::Arc, time::Instant, }; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, trace}; /// Result of [StaticFileProducerInner::run] execution. @@ -64,7 +63,7 @@ pub struct StaticFileProducerInner { /// needed in [StaticFileProducerInner] to prevent attempting to move prunable data to static /// files. See [StaticFileProducerInner::get_static_file_targets]. prune_modes: PruneModes, - listeners: EventListeners, + event_sender: EventSender, } /// Static File targets, per data part, measured in [`BlockNumber`]. @@ -107,12 +106,17 @@ impl StaticFileProducerInner { static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { - Self { provider_factory, static_file_provider, prune_modes, listeners: Default::default() } + Self { + provider_factory, + static_file_provider, + prune_modes, + event_sender: Default::default(), + } } /// Listen for events on the static_file_producer. - pub fn events(&mut self) -> UnboundedReceiverStream { - self.listeners.new_listener() + pub fn events(&self) -> EventStream { + self.event_sender.new_listener() } /// Run the static_file_producer. @@ -123,7 +127,7 @@ impl StaticFileProducerInner { /// /// NOTE: it doesn't delete the data from database, and the actual deleting (aka pruning) logic /// lives in the `prune` crate. - pub fn run(&mut self, targets: StaticFileTargets) -> StaticFileProducerResult { + pub fn run(&self, targets: StaticFileTargets) -> StaticFileProducerResult { // If there are no targets, do not produce any static files and return early if !targets.any() { return Ok(targets) @@ -133,7 +137,7 @@ impl StaticFileProducerInner { self.static_file_provider.get_highest_static_files() )); - self.listeners.notify(StaticFileProducerEvent::Started { targets: targets.clone() }); + self.event_sender.notify(StaticFileProducerEvent::Started { targets: targets.clone() }); debug!(target: "static_file", ?targets, "StaticFileProducer started"); let start = Instant::now(); @@ -173,7 +177,7 @@ impl StaticFileProducerInner { let elapsed = start.elapsed(); // TODO(alexey): track in metrics debug!(target: "static_file", ?targets, ?elapsed, "StaticFileProducer finished"); - self.listeners + self.event_sender .notify(StaticFileProducerEvent::Finished { targets: targets.clone(), elapsed }); Ok(targets) @@ -304,7 +308,7 @@ mod tests { fn run() { let (provider_factory, static_file_provider, _temp_static_files_dir) = setup(); - let mut static_file_producer = StaticFileProducerInner::new( + let static_file_producer = StaticFileProducerInner::new( provider_factory, static_file_provider.clone(), PruneModes::default(), @@ -392,7 +396,7 @@ mod tests { let tx = tx.clone(); std::thread::spawn(move || { - let mut locked_producer = producer.lock(); + let locked_producer = producer.lock(); if i == 0 { // Let other threads spawn as well. std::thread::sleep(Duration::from_millis(100)); diff --git a/crates/tokio-util/Cargo.toml b/crates/tokio-util/Cargo.toml index e8c21e0fa05a4..ccace030c0f78 100644 --- a/crates/tokio-util/Cargo.toml +++ b/crates/tokio-util/Cargo.toml @@ -12,7 +12,11 @@ description = "Additional utilities for working with Tokio in reth." workspace = true [dependencies] +tracing.workspace = true # async tokio = { workspace = true, features = ["sync"] } tokio-stream = { workspace = true, features = ["sync"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["full", "macros"] } \ No newline at end of file diff --git a/crates/tokio-util/src/event_listeners.rs b/crates/tokio-util/src/event_listeners.rs deleted file mode 100644 index 3c940e28022ad..0000000000000 --- a/crates/tokio-util/src/event_listeners.rs +++ /dev/null @@ -1,46 +0,0 @@ -use tokio::sync::mpsc; -use tokio_stream::wrappers::UnboundedReceiverStream; - -/// A collection of event listeners for a task. -#[derive(Clone, Debug)] -pub struct EventListeners { - /// All listeners for events - listeners: Vec>, -} - -impl Default for EventListeners { - fn default() -> Self { - Self { listeners: Vec::new() } - } -} - -impl EventListeners { - /// Send an event to all listeners. - /// - /// Channels that were closed are removed. - pub fn notify(&mut self, event: T) { - self.listeners.retain(|listener| listener.send(event.clone()).is_ok()) - } - - /// Add a new event listener. - pub fn new_listener(&mut self) -> UnboundedReceiverStream { - let (sender, receiver) = mpsc::unbounded_channel(); - self.listeners.push(sender); - UnboundedReceiverStream::new(receiver) - } - - /// Push new event listener. - pub fn push_listener(&mut self, listener: mpsc::UnboundedSender) { - self.listeners.push(listener); - } - - /// Returns the number of registered listeners. - pub fn len(&self) -> usize { - self.listeners.len() - } - - /// Returns true if there are no registered listeners. - pub fn is_empty(&self) -> bool { - self.listeners.is_empty() - } -} diff --git a/crates/tokio-util/src/event_sender.rs b/crates/tokio-util/src/event_sender.rs new file mode 100644 index 0000000000000..3ed6e85910d50 --- /dev/null +++ b/crates/tokio-util/src/event_sender.rs @@ -0,0 +1,42 @@ +use crate::EventStream; +use tokio::sync::broadcast::{self, Sender}; +use tracing::error; + +const DEFAULT_SIZE_BROADCAST_CHANNEL: usize = 2000; + +/// A bounded broadcast channel for a task. +#[derive(Debug, Clone)] +pub struct EventSender { + /// The sender part of the broadcast channel + sender: Sender, +} + +impl Default for EventSender +where + T: Clone + Send + Sync + 'static, +{ + fn default() -> Self { + Self::new(DEFAULT_SIZE_BROADCAST_CHANNEL) + } +} + +impl EventSender { + /// Creates a new `EventSender`. + pub fn new(events_channel_size: usize) -> Self { + let (sender, _) = broadcast::channel(events_channel_size); + Self { sender } + } + + /// Broadcasts an event to all listeners. + pub fn notify(&self, event: T) { + if self.sender.send(event).is_err() { + error!("channel closed"); + } + } + + /// Creates a new event stream with a subscriber to the sender as the + /// receiver. + pub fn new_listener(&self) -> EventStream { + EventStream::new(self.sender.subscribe()) + } +} diff --git a/crates/tokio-util/src/event_stream.rs b/crates/tokio-util/src/event_stream.rs new file mode 100644 index 0000000000000..fc7e56a13bbe5 --- /dev/null +++ b/crates/tokio-util/src/event_stream.rs @@ -0,0 +1,92 @@ +//! Event streams related functionality. + +use std::{ + pin::Pin, + task::{Context, Poll}, +}; +use tokio_stream::Stream; +use tracing::warn; + +/// Thin wrapper around tokio's BroadcastStream to allow skipping broadcast errors. +#[derive(Debug)] +pub struct EventStream { + inner: tokio_stream::wrappers::BroadcastStream, +} + +impl EventStream +where + T: Clone + Send + 'static, +{ + /// Creates a new `EventStream`. + pub fn new(receiver: tokio::sync::broadcast::Receiver) -> Self { + let inner = tokio_stream::wrappers::BroadcastStream::new(receiver); + EventStream { inner } + } +} + +impl Stream for EventStream +where + T: Clone + Send + 'static, +{ + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match Pin::new(&mut self.inner).poll_next(cx) { + Poll::Ready(Some(Ok(item))) => return Poll::Ready(Some(item)), + Poll::Ready(Some(Err(e))) => { + warn!("BroadcastStream lagged: {e:?}"); + continue; + } + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::sync::broadcast; + use tokio_stream::StreamExt; + + #[tokio::test] + async fn test_event_stream_yields_items() { + let (tx, _) = broadcast::channel(16); + let my_stream = EventStream::new(tx.subscribe()); + + tx.send(1).unwrap(); + tx.send(2).unwrap(); + tx.send(3).unwrap(); + + // drop the sender to terminate the stream and allow collect to work. + drop(tx); + + let items: Vec = my_stream.collect().await; + + assert_eq!(items, vec![1, 2, 3]); + } + + #[tokio::test] + async fn test_event_stream_skips_lag_errors() { + let (tx, _) = broadcast::channel(2); + let my_stream = EventStream::new(tx.subscribe()); + + let mut _rx2 = tx.subscribe(); + let mut _rx3 = tx.subscribe(); + + tx.send(1).unwrap(); + tx.send(2).unwrap(); + tx.send(3).unwrap(); + tx.send(4).unwrap(); // This will cause lag for the first subscriber + + // drop the sender to terminate the stream and allow collect to work. + drop(tx); + + // Ensure lag errors are skipped and only valid items are collected + let items: Vec = my_stream.collect().await; + + assert_eq!(items, vec![3, 4]); + } +} diff --git a/crates/tokio-util/src/lib.rs b/crates/tokio-util/src/lib.rs index 7db8dcfba16af..2053bf60bc56a 100644 --- a/crates/tokio-util/src/lib.rs +++ b/crates/tokio-util/src/lib.rs @@ -8,5 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod event_listeners; -pub use event_listeners::EventListeners; +mod event_sender; +mod event_stream; +pub use event_sender::EventSender; +pub use event_stream::EventStream; From 7653e81d6f14f060ac5c84c5860c8b5cc5105bfb Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 22 May 2024 22:00:06 +0300 Subject: [PATCH 591/700] perf(primitives): avoid cloning receipts when calculating the root (#8350) --- Cargo.lock | 8 ++++---- crates/ethereum/consensus/src/validation.rs | 15 +++++++------- crates/optimism/consensus/src/validation.rs | 12 +++++------ crates/primitives/src/log.rs | 6 +----- crates/primitives/src/proofs.rs | 20 ++++++++++--------- crates/primitives/src/receipt.rs | 10 ++++++++-- .../bundle_state_with_receipts.rs | 5 ++--- 7 files changed, 39 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e34086b13275a..ce125ced72b09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -342,9 +342,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +checksum = "b155716bab55763c95ba212806cf43d05bcc70e5f35b02bad20cf5ec7fe11fed" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -353,9 +353,9 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" +checksum = "8037e03c7f462a063f28daec9fda285a9a89da003c552f8637a80b9c8fd96241" dependencies = [ "proc-macro2", "quote", diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index 30ff6fee26455..11fe54406af86 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,7 +1,6 @@ use reth_consensus::ConsensusError; use reth_primitives::{ - gas_spent_by_transactions, BlockWithSenders, Bloom, ChainSpec, GotExpected, Receipt, - ReceiptWithBloom, B256, + gas_spent_by_transactions, BlockWithSenders, Bloom, ChainSpec, GotExpected, Receipt, B256, }; /// Validate a block with regard to execution results: @@ -18,7 +17,7 @@ pub fn validate_block_post_execution( // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 if chain_spec.is_byzantium_active_at_block(block.header.number) { - verify_receipts(block.header.receipts_root, block.header.logs_bloom, receipts.iter())?; + verify_receipts(block.header.receipts_root, block.header.logs_bloom, receipts)?; } // Check if gas used matches the value set in header. @@ -36,16 +35,16 @@ pub fn validate_block_post_execution( /// Calculate the receipts root, and compare it against against the expected receipts root and logs /// bloom. -fn verify_receipts<'a>( +fn verify_receipts( expected_receipts_root: B256, expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, + receipts: &[Receipt], ) -> Result<(), ConsensusError> { // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); + let receipts_with_bloom = receipts.iter().map(Receipt::with_bloom_ref).collect::>(); + let receipts_root = reth_primitives::proofs::calculate_receipt_root_ref(&receipts_with_bloom); - // Create header log bloom. + // Calculate header logs bloom. let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); compare_receipts_root_and_logs_bloom( diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 0998cf1b8a0f2..cf9b849af47ec 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,7 +1,7 @@ use reth_consensus::ConsensusError; use reth_primitives::{ gas_spent_by_transactions, proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, - ChainSpec, GotExpected, Receipt, ReceiptWithBloom, B256, + ChainSpec, GotExpected, Receipt, B256, }; /// Validate a block with regard to execution results: @@ -21,7 +21,7 @@ pub fn validate_block_post_execution( verify_receipts( block.header.receipts_root, block.header.logs_bloom, - receipts.iter(), + receipts, chain_spec, block.timestamp, )?; @@ -41,19 +41,19 @@ pub fn validate_block_post_execution( } /// Verify the calculated receipts root against the expected receipts root. -fn verify_receipts<'a>( +fn verify_receipts( expected_receipts_root: B256, expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, + receipts: &[Receipt], chain_spec: &ChainSpec, timestamp: u64, ) -> Result<(), ConsensusError> { // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_with_bloom = receipts.iter().cloned().map(Receipt::with_bloom).collect::>(); let receipts_root = calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - // Create header log bloom. + // Calculate header logs bloom. let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); compare_receipts_root_and_logs_bloom( diff --git a/crates/primitives/src/log.rs b/crates/primitives/src/log.rs index 79227d4f9bd8e..628a20f831c2f 100644 --- a/crates/primitives/src/log.rs +++ b/crates/primitives/src/log.rs @@ -1,13 +1,9 @@ use crate::Bloom; -/// Re-export `Log` from `alloy_primitives`. pub use alloy_primitives::Log; /// Calculate receipt logs bloom. -pub fn logs_bloom<'a, It>(logs: It) -> Bloom -where - It: IntoIterator, -{ +pub fn logs_bloom<'a>(logs: impl IntoIterator) -> Bloom { let mut bloom = Bloom::ZERO; for log in logs { bloom.m3_2048(log.address.as_slice()); diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index d08fc10a63c86..b16fa68793820 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -8,7 +8,6 @@ use crate::{ B256, U256, }; use alloy_rlp::Encodable; -use bytes::BufMut; use itertools::Itertools; /// Adjust the index of an item for rlp encoding. @@ -30,9 +29,8 @@ pub fn ordered_trie_root(items: &[T]) -> B256 { /// Compute a trie root of the collection of items with a custom encoder. pub fn ordered_trie_root_with_encoder(items: &[T], mut encode: F) -> B256 where - F: FnMut(&T, &mut dyn BufMut), + F: FnMut(&T, &mut Vec), { - let mut index_buffer = Vec::new(); let mut value_buffer = Vec::new(); let mut hb = HashBuilder::default(); @@ -40,8 +38,7 @@ where for i in 0..items_len { let index = adjust_index_for_rlp(i, items_len); - index_buffer.clear(); - index.encode(&mut index_buffer); + let index_buffer = alloy_rlp::encode_fixed_size(&index); value_buffer.clear(); encode(&items[index], &mut value_buffer); @@ -104,10 +101,15 @@ pub fn calculate_receipt_root_optimism( ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) } +/// Calculates the receipt root for a header. +pub fn calculate_receipt_root_ref(receipts: &[ReceiptWithBloomRef<'_>]) -> B256 { + ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) +} + /// Calculates the receipt root for a header for the reference type of [Receipt]. /// -/// NOTE: Prefer [calculate_receipt_root] if you have log blooms memoized. -pub fn calculate_receipt_root_ref(receipts: &[&Receipt]) -> B256 { +/// NOTE: Prefer [`calculate_receipt_root`] if you have log blooms memoized. +pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { ordered_trie_root_with_encoder(receipts, |r, buf| { ReceiptWithBloomRef::from(*r).encode_inner(buf, false) }) @@ -115,9 +117,9 @@ pub fn calculate_receipt_root_ref(receipts: &[&Receipt]) -> B256 { /// Calculates the receipt root for a header for the reference type of [Receipt]. /// -/// NOTE: Prefer [calculate_receipt_root] if you have log blooms memoized. +/// NOTE: Prefer [`calculate_receipt_root_optimism`] if you have log blooms memoized. #[cfg(feature = "optimism")] -pub fn calculate_receipt_root_ref_optimism( +pub fn calculate_receipt_root_no_memo_optimism( receipts: &[&Receipt], chain_spec: &crate::ChainSpec, timestamp: u64, diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 74e90363daee7..85470cb2e81f3 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -56,6 +56,12 @@ impl Receipt { pub fn with_bloom(self) -> ReceiptWithBloom { self.into() } + + /// Calculates the bloom filter for the receipt and returns the [ReceiptWithBloomRef] container + /// type. + pub fn with_bloom_ref(&self) -> ReceiptWithBloomRef<'_> { + self.into() + } } /// A collection of receipts organized as a two-dimensional vector. @@ -98,7 +104,7 @@ impl Receipts { /// Retrieves the receipt root for all recorded receipts from index. pub fn root_slow(&self, index: usize) -> Option { - Some(crate::proofs::calculate_receipt_root_ref( + Some(crate::proofs::calculate_receipt_root_no_memo( &self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?, )) } @@ -111,7 +117,7 @@ impl Receipts { chain_spec: &crate::ChainSpec, timestamp: u64, ) -> Option { - Some(crate::proofs::calculate_receipt_root_ref_optimism( + Some(crate::proofs::calculate_receipt_root_no_memo_optimism( &self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?, chain_spec, timestamp, diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 947c6609b961d..fe76714d3045a 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -182,12 +182,11 @@ impl BundleStateWithReceipts { /// Returns the receipt root for all recorded receipts. /// Note: this function calculated Bloom filters for every receipt and created merkle trees /// of receipt. This is a expensive operation. - #[allow(unused_variables)] - pub fn receipts_root_slow(&self, block_number: BlockNumber) -> Option { + pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option { #[cfg(feature = "optimism")] panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); #[cfg(not(feature = "optimism"))] - self.receipts.root_slow(self.block_number_to_index(block_number)?) + self.receipts.root_slow(self.block_number_to_index(_block_number)?) } /// Returns the receipt root for all recorded receipts. From dbc65ad694f012250788e2bdcf7ecd01e47eb813 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 22 May 2024 21:01:27 +0200 Subject: [PATCH 592/700] style: small refactor for `txpool_inspect` (#8348) --- crates/rpc/rpc/src/txpool.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index e6f7d66b51e8a..202d69b575a7f 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -87,10 +87,9 @@ where inspect: &mut BTreeMap>, ) { let entry = inspect.entry(tx.sender()).or_default(); - let key = tx.nonce().to_string(); let tx = tx.to_recovered_transaction(); entry.insert( - key, + tx.nonce().to_string(), TxpoolInspectSummary { to: tx.to(), value: tx.value(), @@ -100,17 +99,18 @@ where ); } - let mut inspect = TxpoolInspect::default(); let AllPoolTransactions { pending, queued } = self.pool.all_transactions(); - for pending in pending { - insert(&pending.transaction, &mut inspect.pending); - } - for queued in queued { - insert(&queued.transaction, &mut inspect.queued); - } - - Ok(inspect) + Ok(TxpoolInspect { + pending: pending.iter().fold(Default::default(), |mut acc, tx| { + insert(&tx.transaction, &mut acc); + acc + }), + queued: queued.iter().fold(Default::default(), |mut acc, tx| { + insert(&tx.transaction, &mut acc); + acc + }), + }) } /// Retrieves the transactions contained within the txpool, returning pending as well as queued From bc914a64d93a552f589e955e3bb0c800a8ed1071 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 22 May 2024 21:11:57 +0200 Subject: [PATCH 593/700] fix: check for files in is_database_empty (#8351) --- crates/storage/db/src/utils.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crates/storage/db/src/utils.rs b/crates/storage/db/src/utils.rs index d3e760f3d9988..cf6a0341ef7a9 100644 --- a/crates/storage/db/src/utils.rs +++ b/crates/storage/db/src/utils.rs @@ -23,9 +23,25 @@ pub fn is_database_empty>(path: P) -> bool { if !path.exists() { true + } else if path.is_file() { + false } else if let Ok(dir) = path.read_dir() { dir.count() == 0 } else { true } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_database_empty_false_if_db_path_is_a_file() { + let db_file = tempfile::NamedTempFile::new().unwrap(); + + let result = is_database_empty(&db_file); + + assert!(!result); + } +} From 3eddaf31d0bb13721ca321c36fc074697af0526e Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Thu, 23 May 2024 05:47:20 +0800 Subject: [PATCH 594/700] feat: implement table range checksums for reth db checksum (#7623) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- bin/reth/src/commands/db/checksum.rs | 79 +++++++++++++++++++++++++--- bin/reth/src/commands/db/get.rs | 4 +- 2 files changed, 74 insertions(+), 9 deletions(-) diff --git a/bin/reth/src/commands/db/checksum.rs b/bin/reth/src/commands/db/checksum.rs index 689b6ca5a94f8..9562c983923af 100644 --- a/bin/reth/src/commands/db/checksum.rs +++ b/bin/reth/src/commands/db/checksum.rs @@ -1,12 +1,15 @@ -use crate::utils::DbTool; -use ahash::AHasher; +use crate::{ + commands::db::get::{maybe_json_value_parser, table_key}, + utils::DbTool, +}; +use ahash::RandomState; use clap::Parser; use reth_db::{ cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx, DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables, }; use std::{ - hash::Hasher, + hash::{BuildHasher, Hasher}, time::{Duration, Instant}, }; use tracing::{info, warn}; @@ -16,35 +19,81 @@ use tracing::{info, warn}; pub struct Command { /// The table name table: Tables, + + /// The start of the range to checksum. + #[arg(long, value_parser = maybe_json_value_parser)] + start_key: Option, + + /// The end of the range to checksum. + #[arg(long, value_parser = maybe_json_value_parser)] + end_key: Option, + + /// The maximum number of records that are queried and used to compute the + /// checksum. + #[arg(long)] + limit: Option, } impl Command { /// Execute `db checksum` command pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { warn!("This command should be run without the node running!"); - self.table.view(&ChecksumViewer { tool }) + self.table.view(&ChecksumViewer { + tool, + start_key: self.start_key, + end_key: self.end_key, + limit: self.limit, + }) } } pub(crate) struct ChecksumViewer<'a, DB: Database> { tool: &'a DbTool, + start_key: Option, + end_key: Option, + limit: Option, } impl ChecksumViewer<'_, DB> { pub(crate) fn new(tool: &'_ DbTool) -> ChecksumViewer<'_, DB> { - ChecksumViewer { tool } + ChecksumViewer { tool, start_key: None, end_key: None, limit: None } } pub(crate) fn get_checksum(&self) -> Result<(u64, Duration), eyre::Report> { let provider = self.tool.provider_factory.provider()?.disable_long_read_transaction_safety(); let tx = provider.tx_ref(); + info!( + "Start computing checksum, start={:?}, end={:?}, limit={:?}", + self.start_key, self.end_key, self.limit + ); let mut cursor = tx.cursor_read::>()?; - let walker = cursor.walk(None)?; + let walker = match (self.start_key.as_deref(), self.end_key.as_deref()) { + (Some(start), Some(end)) => { + let start_key = table_key::(start).map(RawKey::::new)?; + let end_key = table_key::(end).map(RawKey::::new)?; + cursor.walk_range(start_key..=end_key)? + } + (None, Some(end)) => { + let end_key = table_key::(end).map(RawKey::::new)?; + + cursor.walk_range(..=end_key)? + } + (Some(start), None) => { + let start_key = table_key::(start).map(RawKey::::new)?; + cursor.walk_range(start_key..)? + } + (None, None) => cursor.walk_range(..)?, + }; let start_time = Instant::now(); - let mut hasher = AHasher::default(); + let mut hasher = RandomState::with_seeds(1, 2, 3, 4).build_hasher(); + let mut total = 0; + + let limit = self.limit.unwrap_or(usize::MAX); + let mut enumerate_start_key = None; + let mut enumerate_end_key = None; for (index, entry) in walker.enumerate() { let (k, v): (RawKey, RawValue) = entry?; @@ -54,6 +103,22 @@ impl ChecksumViewer<'_, DB> { hasher.write(k.raw_key()); hasher.write(v.raw_value()); + + if enumerate_start_key.is_none() { + enumerate_start_key = Some(k.clone()); + } + enumerate_end_key = Some(k); + + total = index + 1; + if total >= limit { + break + } + } + + info!("Hashed {total} entries."); + if let (Some(s), Some(e)) = (enumerate_start_key, enumerate_end_key) { + info!("start-key: {}", serde_json::to_string(&s.key()?).unwrap_or_default()); + info!("end-key: {}", serde_json::to_string(&e.key()?).unwrap_or_default()); } let checksum = hasher.finish(); diff --git a/bin/reth/src/commands/db/get.rs b/bin/reth/src/commands/db/get.rs index 80e3ae393d1b0..f1f6b963cf1d6 100644 --- a/bin/reth/src/commands/db/get.rs +++ b/bin/reth/src/commands/db/get.rs @@ -125,7 +125,7 @@ impl Command { } /// Get an instance of key for given table -fn table_key(key: &str) -> Result { +pub(crate) fn table_key(key: &str) -> Result { serde_json::from_str::(key).map_err(|e| eyre::eyre!(e)) } @@ -188,7 +188,7 @@ impl TableViewer<()> for GetValueViewer<'_, DB> { } /// Map the user input value to json -fn maybe_json_value_parser(value: &str) -> Result { +pub(crate) fn maybe_json_value_parser(value: &str) -> Result { if serde_json::from_str::(value).is_ok() { Ok(value.to_string()) } else { From 8158c284bcb47c192c5de1f4f00872b61682054d Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 23 May 2024 12:49:41 +0200 Subject: [PATCH 595/700] doc: remove missing link (#8363) --- docs/repo/ci.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/repo/ci.md b/docs/repo/ci.md index a1102b4a77695..18356ddb73200 100644 --- a/docs/repo/ci.md +++ b/docs/repo/ci.md @@ -7,7 +7,6 @@ The CI runs a couple of workflows: - **[ci]**: A catch-all for small jobs. Currently only runs lints (rustfmt, clippy etc.) - **[unit]**: Runs unit tests (tests in `src/`) and doc tests - **[integration]**: Runs integration tests (tests in `tests/` and sync tests) -- **[fuzz]**: Runs fuzz tests - **[bench]**: Runs benchmarks ### Docs @@ -23,10 +22,8 @@ The CI runs a couple of workflows: [ci]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/ci.yml [unit]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/unit.yml [integration]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/integration.yml -[fuzz]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/fuzz.yml [bench]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/bench.yml [book]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/book.yml [deny]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/deny.yml [sanity]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/sanity.yml [release]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/release.yml -[gh-projects]: https://docs.github.com/en/issues/planning-and-tracking-with-projects/automating-your-project/automating-projects-using-actions \ No newline at end of file From c5bc960d31acc65d852fd3a2bbf3672fd1897573 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 23 May 2024 13:16:13 +0200 Subject: [PATCH 596/700] fix(storage): use u8 for NippiJar's DataReader::offset_size (#8360) --- crates/storage/nippy-jar/src/error.rs | 2 +- crates/storage/nippy-jar/src/lib.rs | 9 +++++---- crates/storage/nippy-jar/src/writer.rs | 22 +++++++++++----------- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index 2f7bcf804cf83..d59500842c7a6 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -40,7 +40,7 @@ pub enum NippyJarError { #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] OffsetSizeTooBig { /// The read offset size in number of bytes. - offset_size: u64, + offset_size: u8, }, #[error("attempted to read an out of bounds offset: {index}")] OffsetOutOfBounds { diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index ac9e771b1e76a..2eafe68c409e8 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -476,7 +476,7 @@ pub struct DataReader { /// Mmap handle for offsets. offset_mmap: Mmap, /// Number of bytes that represent one offset. - offset_size: u64, + offset_size: u8, } impl DataReader { @@ -491,7 +491,7 @@ impl DataReader { let offset_mmap = unsafe { Mmap::map(&offset_file)? }; // First byte is the size of one offset in bytes - let offset_size = offset_mmap[0] as u64; + let offset_size = offset_mmap[0]; // Ensure that the size of an offset is at most 8 bytes. if offset_size > 8 { @@ -525,7 +525,8 @@ impl DataReader { /// Returns total number of offsets in the file. /// The size of one offset is determined by the file itself. pub fn offsets_count(&self) -> Result { - Ok((self.offset_file.metadata()?.len().saturating_sub(1) / self.offset_size) as usize) + Ok((self.offset_file.metadata()?.len().saturating_sub(1) / self.offset_size as u64) + as usize) } /// Reads one offset-sized (determined by the offset file) u64 at the provided index. @@ -542,7 +543,7 @@ impl DataReader { } /// Returns number of bytes that represent one offset. - pub fn offset_size(&self) -> u64 { + pub fn offset_size(&self) -> u8 { self.offset_size } diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 6417e60076cf1..bd56b4a6b386b 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -7,7 +7,7 @@ use std::{ }; /// Size of one offset in bytes. -const OFFSET_SIZE_BYTES: u64 = 8; +const OFFSET_SIZE_BYTES: u8 = 8; /// Writer of [`NippyJar`]. Handles table data and offsets only. /// @@ -112,7 +112,7 @@ impl NippyJarWriter { let mut offsets_file = OpenOptions::new().read(true).write(true).open(offsets)?; // First byte of the offset file is the size of one offset in bytes - offsets_file.write_all(&[OFFSET_SIZE_BYTES as u8])?; + offsets_file.write_all(&[OFFSET_SIZE_BYTES])?; offsets_file.seek(SeekFrom::End(0))?; Ok((data_file, offsets_file, is_created)) @@ -133,9 +133,9 @@ impl NippyJarWriter { return Err(NippyJarError::FrozenJar) } - let expected_offsets_file_size = 1 + // first byte is the size of one offset - OFFSET_SIZE_BYTES * self.jar.rows as u64 * self.jar.columns as u64 + // `offset size * num rows * num columns` - OFFSET_SIZE_BYTES; // expected size of the data file + let expected_offsets_file_size: u64 = (1 + // first byte is the size of one offset + OFFSET_SIZE_BYTES as usize* self.jar.rows * self.jar.columns + // `offset size * num rows * num columns` + OFFSET_SIZE_BYTES as usize) as u64; // expected size of the data file let actual_offsets_file_size = self.offsets_file.get_ref().metadata()?.len(); // Offsets configuration wasn't properly committed @@ -151,9 +151,9 @@ impl NippyJarWriter { // `num rows = (file size - 1 - size of one offset) / num columns` self.jar.rows = ((actual_offsets_file_size. saturating_sub(1). // first byte is the size of one offset - saturating_sub(OFFSET_SIZE_BYTES) / // expected size of the data file + saturating_sub(OFFSET_SIZE_BYTES as u64) / // expected size of the data file (self.jar.columns as u64)) / - OFFSET_SIZE_BYTES) as usize; + OFFSET_SIZE_BYTES as u64) as usize; // Freeze row count changed self.jar.freeze_config()?; @@ -183,7 +183,7 @@ impl NippyJarWriter { .get_ref() .metadata()? .len() - .saturating_sub(OFFSET_SIZE_BYTES * (index as u64 + 1)); + .saturating_sub(OFFSET_SIZE_BYTES as u64 * (index as u64 + 1)); self.offsets_file.get_mut().set_len(new_len)?; drop(reader); @@ -318,7 +318,7 @@ impl NippyJarWriter { // Handle non-empty offset file if length > 1 { // first byte is reserved for `bytes_per_offset`, which is 8 initially. - let num_offsets = (length - 1) / OFFSET_SIZE_BYTES; + let num_offsets = (length - 1) / OFFSET_SIZE_BYTES as u64; if remaining_to_prune as u64 > num_offsets { return Err(NippyJarError::InvalidPruning( @@ -336,10 +336,10 @@ impl NippyJarWriter { self.data_file.get_mut().set_len(0)?; } else { // Calculate the new length for the on-disk offset list - let new_len = 1 + new_num_offsets * OFFSET_SIZE_BYTES; + let new_len = 1 + new_num_offsets * OFFSET_SIZE_BYTES as u64; // Seek to the position of the last offset self.offsets_file - .seek(SeekFrom::Start(new_len.saturating_sub(OFFSET_SIZE_BYTES)))?; + .seek(SeekFrom::Start(new_len.saturating_sub(OFFSET_SIZE_BYTES as u64)))?; // Read the last offset value let mut last_offset = [0u8; OFFSET_SIZE_BYTES as usize]; self.offsets_file.get_ref().read_exact(&mut last_offset)?; From 155876d28c91e172d692155b658e15514d733469 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 23 May 2024 07:17:48 -0400 Subject: [PATCH 597/700] chore: improve HaveNotReceivedUpdatesForAWhile warning (#8356) --- crates/node/events/src/node.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 383da986b4a14..8c01c0a737a76 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -315,7 +315,7 @@ impl NodeState { warn!("Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!") } ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(period) => { - warn!(?period, "Beacon client online, but no consensus updates received for a while. Please fix your beacon client to follow the chain!") + warn!(?period, "Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs!") } } } From 4250c33da1db2b0c1925ca6d8a4e10b4c7bb754a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 May 2024 13:24:20 +0200 Subject: [PATCH 598/700] chore: clippy happy (#8362) Co-authored-by: Alexey Shekhirin --- Cargo.toml | 2 +- Makefile | 6 +++--- bin/reth/src/commands/db/stats.rs | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- .../consensus/beacon/src/engine/hooks/prune.rs | 8 ++++---- .../beacon/src/engine/hooks/static_file.rs | 14 +++++++------- crates/consensus/beacon/src/engine/mod.rs | 10 +++++----- crates/exex/src/manager.rs | 2 +- crates/interfaces/src/blockchain_tree/mod.rs | 1 + crates/net/ecies/src/algorithm.rs | 4 ++-- crates/net/eth-wire-types/src/message.rs | 2 +- crates/node-core/src/args/utils.rs | 2 +- crates/node-core/src/dirs.rs | 1 + crates/payload/validator/src/lib.rs | 4 ++-- crates/primitives/src/prune/target.rs | 4 ++-- crates/primitives/src/revm/env.rs | 4 ++-- crates/primitives/src/transaction/mod.rs | 2 +- crates/stages/src/stages/execution.rs | 3 +-- crates/stages/src/stages/hashing_account.rs | 9 ++++----- crates/stages/src/stages/sender_recovery.rs | 5 ++--- crates/stages/src/stages/tx_lookup.rs | 5 ++--- .../db/src/implementation/mdbx/cursor.rs | 3 +-- .../src/providers/database/provider.rs | 18 +++++++++--------- crates/tokio-util/src/event_stream.rs | 2 +- crates/transaction-pool/src/pool/mod.rs | 16 ++++++++-------- crates/transaction-pool/src/pool/pending.rs | 6 +++--- crates/transaction-pool/src/pool/txpool.rs | 1 + examples/node-event-hooks/src/main.rs | 8 ++------ 28 files changed, 70 insertions(+), 76 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index bf7ea50bde171..a44a474e092c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -105,7 +105,7 @@ rust.missing_debug_implementations = "warn" rust.missing_docs = "warn" rust.unreachable_pub = "warn" rust.unused_must_use = "deny" -rust.rust_2018_idioms = "deny" +rust.rust_2018_idioms = { level = "deny", priority = -1 } rustdoc.all = "warn" [workspace.lints.clippy] diff --git a/Makefile b/Makefile index bfa56011c1a0c..f62ef19f33463 100644 --- a/Makefile +++ b/Makefile @@ -413,9 +413,9 @@ fix-lint-other-targets: -- -D warnings fix-lint: - make lint-reth && \ - make lint-op-reth && \ - make lint-other-targets && \ + make fix-lint-reth && \ + make fix-lint-op-reth && \ + make fix-lint-other-targets && \ make fmt .PHONY: rustdocs diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index a59a904eb7424..03c384b2ffc4d 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -377,7 +377,7 @@ impl Command { let max_widths = table.column_max_content_widths(); let mut separator = Row::new(); for width in max_widths { - separator.add_cell(Cell::new(&"-".repeat(width as usize))); + separator.add_cell(Cell::new("-".repeat(width as usize))); } table.add_row(separator); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index ef8fe65e72a97..e6694447bbca9 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -307,7 +307,7 @@ where *key_value.0 } else { debug!(target: "blockchain_tree", ?chain_id, "No blockhashes stored"); - return None; + return None }; let canonical_chain = canonical_chain .iter() diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index a9bb4f05bd427..d2c2e2d33a1e9 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -78,10 +78,10 @@ impl PruneHook { /// This will try to spawn the pruner if it is idle: /// 1. Check if pruning is needed through [Pruner::is_pruning_needed]. - /// 2. - /// 1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a - /// separate task. Set pruner state to [PrunerState::Running]. - /// 2. If pruning is not needed, set pruner state back to [PrunerState::Idle]. + /// + /// 2.1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a + /// separate task. Set pruner state to [PrunerState::Running]. + /// 2.2. If pruning is not needed, set pruner state back to [PrunerState::Idle]. /// /// If pruner is already running, do nothing. fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option { diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 01b7056c37f02..3786e29f87f33 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -71,13 +71,13 @@ impl StaticFileHook { /// 1. Check if producing static files is needed through /// [StaticFileProducer::get_static_file_targets](reth_static_file::StaticFileProducerInner::get_static_file_targets) /// and then [StaticFileTargets::any](reth_static_file::StaticFileTargets::any). - /// 2. - /// 1. If producing static files is needed, pass static file request to the - /// [StaticFileProducer::run](reth_static_file::StaticFileProducerInner::run) and spawn - /// it in a separate task. Set static file producer state to - /// [StaticFileProducerState::Running]. - /// 2. If producing static files is not needed, set static file producer state back to - /// [StaticFileProducerState::Idle]. + /// + /// 2.1. If producing static files is needed, pass static file request to the + /// [StaticFileProducer::run](reth_static_file::StaticFileProducerInner::run) and + /// spawn it in a separate task. Set static file producer state to + /// [StaticFileProducerState::Running]. + /// 2.2. If producing static files is not needed, set static file producer state back to + /// [StaticFileProducerState::Idle]. /// /// If static_file_producer is already running, do nothing. fn try_spawn_static_file_producer( diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 8139a0c577314..5f7f583902dd0 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -703,13 +703,13 @@ where /// If validation fails, the response MUST contain the latest valid hash: /// /// - The block hash of the ancestor of the invalid payload satisfying the following two - /// conditions: + /// conditions: /// - It is fully validated and deemed VALID /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above - /// conditions are satisfied by a PoW block. + /// conditions are satisfied by a PoW block. /// - null if client software cannot determine the ancestor of the invalid payload satisfying - /// the above conditions. + /// the above conditions. fn latest_valid_hash_for_invalid_payload( &mut self, parent_hash: B256, @@ -1103,8 +1103,8 @@ where /// - invalid extra data /// - invalid transactions /// - incorrect hash - /// - the versioned hashes passed with the payload do not exactly match transaction - /// versioned hashes + /// - the versioned hashes passed with the payload do not exactly match transaction versioned + /// hashes /// - the block does not contain blob transactions if it is pre-cancun /// /// This validates the following engine API rule: diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 1de8c102e3be8..088328c860556 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -160,7 +160,7 @@ pub struct ExExManagerMetrics { /// The manager is responsible for: /// /// - Receiving relevant events from the rest of the node, and sending these to the execution -/// extensions +/// extensions /// - Backpressure /// - Error handling /// - Monitoring diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index 7d2b50e418e1c..0c1a9553dc30b 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -210,6 +210,7 @@ pub enum BlockStatus { /// This is required to: /// - differentiate whether trie state updates should be cached. /// - inform other +/// /// This is required because the state root check can only be performed if the targeted block can be /// traced back to the canonical __head__. #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 52398de4fe42e..65d74627e27f9 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -45,8 +45,8 @@ fn ecdh_x(public_key: &PublicKey, secret_key: &SecretKey) -> B256 { /// # Panics /// * If the `dest` is empty /// * If the `dest` len is greater than or equal to the hash output len * the max counter value. In -/// this case, the hash output len is 32 bytes, and the max counter value is 2^32 - 1. So the dest -/// cannot have a len greater than 32 * 2^32 - 1. +/// this case, the hash output len is 32 bytes, and the max counter value is 2^32 - 1. So the dest +/// cannot have a len greater than 32 * 2^32 - 1. fn kdf(secret: B256, s1: &[u8], dest: &mut [u8]) { concat_kdf::derive_key_into::(secret.as_slice(), s1, dest).unwrap(); } diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index dc8011879ba1f..c4101e852d2cb 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -169,7 +169,7 @@ impl From for ProtocolBroadcastMessage { /// The ethereum wire protocol is a set of messages that are broadcast to the network in two /// styles: /// * A request message sent by a peer (such as [`GetPooledTransactions`]), and an associated -/// response message (such as [`PooledTransactions`]). +/// response message (such as [`PooledTransactions`]). /// * A message that is broadcast to the network, without a corresponding request. /// /// The newer `eth/66` is an efficiency upgrade on top of `eth/65`, introducing a request id to diff --git a/crates/node-core/src/args/utils.rs b/crates/node-core/src/args/utils.rs index 72b84914f5ba0..4f49bf1349e75 100644 --- a/crates/node-core/src/args/utils.rs +++ b/crates/node-core/src/args/utils.rs @@ -141,7 +141,7 @@ pub enum SocketAddressParsingError { /// The following formats are checked: /// /// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the -/// hostname is set to `localhost`. +/// hostname is set to `localhost`. /// - If the value contains `:` it is assumed to be the format `:` /// - Otherwise it is assumed to be a hostname /// diff --git a/crates/node-core/src/dirs.rs b/crates/node-core/src/dirs.rs index 75919f6f0fcae..b33df18f26f34 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node-core/src/dirs.rs @@ -257,6 +257,7 @@ impl From for MaybePlatformPath { /// * mainnet: `/mainnet` /// * goerli: `/goerli` /// * sepolia: `/sepolia` +/// /// Otherwise, the path will be dependent on the chain ID: /// * `/` #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 7f85a0177ae6a..13c20541f03ab 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -84,8 +84,8 @@ impl ExecutionPayloadValidator { /// - invalid extra data /// - invalid transactions /// - incorrect hash - /// - the versioned hashes passed with the payload do not exactly match transaction - /// versioned hashes + /// - the versioned hashes passed with the payload do not exactly match transaction versioned + /// hashes /// - the block does not contain blob transactions if it is pre-cancun /// /// The checks are done in the order that conforms with the engine-API specification. diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 1300b9b0b9694..7f39c8d74ce3f 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -70,8 +70,8 @@ impl PruneModes { /// /// 1. For [PruneMode::Full], it fails if `MIN_BLOCKS > 0`. /// 2. For [PruneMode::Distance(distance)], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed -/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we -/// have one block in the database. +/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we +/// have one block in the database. fn deserialize_opt_prune_mode_with_min_blocks<'de, const MIN_BLOCKS: u64, D: Deserializer<'de>>( deserializer: D, ) -> Result, D::Error> { diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index 0c16f5482f8d1..49b9f609c2b20 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -138,8 +138,8 @@ pub fn tx_env_with_recovered(transaction: &TransactionSignedEcRecovered) -> TxEn /// and therefore: /// * the call must execute to completion /// * the call does not count against the block’s gas limit -/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as -/// part of the call +/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as part +/// of the call /// * if no code exists at `BEACON_ROOTS_ADDRESS`, the call must fail silently pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_block_root: B256) { env.tx = TxEnv { diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c823a21577873..d481bed166f2a 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1346,7 +1346,7 @@ impl TransactionSigned { }; if !input_data.is_empty() { - return Err(RlpError::UnexpectedLength); + return Err(RlpError::UnexpectedLength) } Ok(output_data) diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index e16b9e8b6d0c6..d3bcfba171191 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -59,8 +59,7 @@ use tracing::*; /// - [tables::BlockBodyIndices] get tx index to know what needs to be unwinded /// - [tables::AccountsHistory] to remove change set and apply old values to /// - [tables::PlainAccountState] [tables::StoragesHistory] to remove change set and apply old -/// values -/// to [tables::PlainStorageState] +/// values to [tables::PlainStorageState] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] pub struct ExecutionStage { diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 1a63f6d893c65..db0e3ca625828 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -70,11 +70,10 @@ impl Default for AccountHashingStage { /// /// In order to check the "full hashing" mode of the stage you want to generate more /// transitions than `AccountHashingStage.clean_threshold`. This requires: -/// 1. Creating enough blocks so there's enough transactions to generate -/// the required transition keys in the `BlockTransitionIndex` (which depends on the -/// `TxTransitionIndex` internally) -/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually -/// take the 2nd codepath +/// 1. Creating enough blocks so there's enough transactions to generate the required transition +/// keys in the `BlockTransitionIndex` (which depends on the `TxTransitionIndex` internally) +/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually take the +/// 2nd codepath #[derive(Clone, Debug)] pub struct SeedOpts { /// The range of blocks to be generated diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 0bb05e0c40e02..2695fb074c27e 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -507,10 +507,9 @@ mod tests { /// # Panics /// /// 1. If there are any entries in the [tables::TransactionSenders] table above a given - /// block number. - /// + /// block number. /// 2. If the is no requested block entry in the bodies table, but - /// [tables::TransactionSenders] is not empty. + /// [tables::TransactionSenders] is not empty. fn ensure_no_senders_by_block(&self, block: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index fae08e854dc02..332bcf8e70008 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -428,10 +428,9 @@ mod tests { /// # Panics /// /// 1. If there are any entries in the [tables::TransactionHashNumbers] table above a given - /// block number. - /// + /// block number. /// 2. If the is no requested block entry in the bodies table, but - /// [tables::TransactionHashNumbers] is not empty. + /// [tables::TransactionHashNumbers] is not empty. fn ensure_no_hash_by_block(&self, number: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 43adc249272f8..3d1a8815291ba 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -191,8 +191,7 @@ impl DbDupCursorRO for Cursor { /// - Some(key), Some(subkey): a `key` item whose data is >= than `subkey` /// - Some(key), None: first item of a specified `key` /// - None, Some(subkey): like first case, but in the first key - /// - None, None: first item in the table - /// of a DUPSORT table. + /// - None, None: first item in the table of a DUPSORT table. fn walk_dup( &mut self, key: Option, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 6e07b7c46a1b7..643bc23e65e4e 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -375,20 +375,20 @@ impl DatabaseProvider { /// /// If UNWIND is false we will just read the state/blocks and return them. /// - /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all - /// the transaction ids. - /// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table - /// and the [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to - /// reconstruct the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. + /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all the + /// transaction ids. + /// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table and the + /// [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to reconstruct + /// the changesets. + /// - In order to have both the old and new values in the changesets, we also access the + /// plain state tables. /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: + /// we: /// 1. Take the old value from the changeset /// 2. Take the new value from the plain state /// 3. Save the old value to the local state /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: + /// have seen before we: /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset diff --git a/crates/tokio-util/src/event_stream.rs b/crates/tokio-util/src/event_stream.rs index fc7e56a13bbe5..67bc72a97d07d 100644 --- a/crates/tokio-util/src/event_stream.rs +++ b/crates/tokio-util/src/event_stream.rs @@ -36,7 +36,7 @@ where Poll::Ready(Some(Ok(item))) => return Poll::Ready(Some(item)), Poll::Ready(Some(Err(e))) => { warn!("BroadcastStream lagged: {e:?}"); - continue; + continue } Poll::Ready(None) => return Poll::Ready(None), Poll::Pending => return Poll::Pending, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index a27b9d02167bb..163f30ea6b498 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -34,16 +34,16 @@ //! //! In essence the transaction pool is made of three separate sub-pools: //! -//! - Pending Pool: Contains all transactions that are valid on the current state and satisfy -//! (3. a)(1): _No_ nonce gaps. A _pending_ transaction is considered _ready_ when it has the lowest -//! nonce of all transactions from the same sender. Once a _ready_ transaction with nonce `n` has -//! been executed, the next highest transaction from the same sender `n + 1` becomes ready. +//! - Pending Pool: Contains all transactions that are valid on the current state and satisfy (3. +//! a)(1): _No_ nonce gaps. A _pending_ transaction is considered _ready_ when it has the lowest +//! nonce of all transactions from the same sender. Once a _ready_ transaction with nonce `n` has +//! been executed, the next highest transaction from the same sender `n + 1` becomes ready. //! -//! - Queued Pool: Contains all transactions that are currently blocked by missing -//! transactions: (3. a)(2): _With_ nonce gaps or due to lack of funds. +//! - Queued Pool: Contains all transactions that are currently blocked by missing transactions: +//! (3. a)(2): _With_ nonce gaps or due to lack of funds. //! -//! - Basefee Pool: To account for the dynamic base fee requirement (3. b) which could render -//! an EIP-1559 and all subsequent transactions of the sender currently invalid. +//! - Basefee Pool: To account for the dynamic base fee requirement (3. b) which could render an +//! EIP-1559 and all subsequent transactions of the sender currently invalid. //! //! The classification of transactions is always dependent on the current state that is changed as //! soon as a new block is mined. Once a new block is mined, the account changeset must be applied diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 7e733a6593c9a..d78af79085b9d 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -90,9 +90,9 @@ impl PendingPool { /// Returns an iterator over all transactions that are _currently_ ready. /// /// 1. The iterator _always_ returns transaction in order: It never returns a transaction with - /// an unsatisfied dependency and only returns them if dependency transaction were yielded - /// previously. In other words: The nonces of transactions with the same sender will _always_ - /// increase by exactly 1. + /// an unsatisfied dependency and only returns them if dependency transaction were yielded + /// previously. In other words: The nonces of transactions with the same sender will _always_ + /// increase by exactly 1. /// /// The order of transactions which satisfy (1.) is determent by their computed priority: A /// transaction with a higher priority is returned before a transaction with a lower priority. diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index bcad71edbd4a0..4e35733d4bc63 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1002,6 +1002,7 @@ impl AllTransactions { /// For all transactions: /// - decreased basefee: promotes from `basefee` to `pending` sub-pool. /// - increased basefee: demotes from `pending` to `basefee` sub-pool. + /// /// Individually: /// - decreased sender allowance: demote from (`basefee`|`pending`) to `queued`. /// - increased sender allowance: promote from `queued` to diff --git a/examples/node-event-hooks/src/main.rs b/examples/node-event-hooks/src/main.rs index b9cd53298b4b8..e8a751840e0da 100644 --- a/examples/node-event-hooks/src/main.rs +++ b/examples/node-event-hooks/src/main.rs @@ -8,12 +8,8 @@ //! ``` //! //! This launch the regular reth node and also print: -//! -//! > "All components initialized" -//! once all components have been initialized and -//! -//! > "Node started" -//! once the node has been started. +//! > "All components initialized" – once all components have been initialized +//! > "Node started" – once the node has been started. use reth::cli::Cli; use reth_node_ethereum::EthereumNode; From f447db1eee20c04f76d30e32e027179e4f59a416 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 May 2024 13:39:11 +0200 Subject: [PATCH 599/700] feat: add static-file-types (#8361) --- Cargo.lock | 14 +++++++++-- Cargo.toml | 2 ++ crates/primitives/Cargo.toml | 5 ++-- crates/primitives/src/lib.rs | 2 +- crates/static-file-types/Cargo.toml | 23 +++++++++++++++++++ .../src}/compression.rs | 0 .../src}/filters.rs | 0 .../mod.rs => static-file-types/src/lib.rs} | 10 +++++++- .../src}/segment.rs | 12 ++++------ 9 files changed, 54 insertions(+), 14 deletions(-) create mode 100644 crates/static-file-types/Cargo.toml rename crates/{primitives/src/static_file => static-file-types/src}/compression.rs (100%) rename crates/{primitives/src/static_file => static-file-types/src}/filters.rs (100%) rename crates/{primitives/src/static_file/mod.rs => static-file-types/src/lib.rs} (84%) rename crates/{primitives/src/static_file => static-file-types/src}/segment.rs (97%) diff --git a/Cargo.lock b/Cargo.lock index ce125ced72b09..413d83fc9f20c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7548,7 +7548,6 @@ dependencies = [ "byteorder", "bytes", "c-kzg", - "clap", "criterion", "derive_more", "hash-db", @@ -7565,13 +7564,13 @@ dependencies = [ "reth-codecs", "reth-ethereum-forks", "reth-network-types", + "reth-static-file-types", "revm", "revm-primitives", "roaring", "secp256k1 0.28.2", "serde", "serde_json", - "strum", "sucds", "tempfile", "test-fuzz", @@ -7932,6 +7931,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-static-file-types" +version = "0.2.0-beta.7" +dependencies = [ + "alloy-primitives", + "clap", + "derive_more", + "serde", + "strum", +] + [[package]] name = "reth-tasks" version = "0.2.0-beta.7" diff --git a/Cargo.toml b/Cargo.toml index a44a474e092c9..a779905e6978e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,6 +60,7 @@ members = [ "crates/stages/", "crates/stages-api", "crates/static-file/", + "crates/static-file-types/", "crates/storage/codecs/", "crates/storage/codecs/derive/", "crates/storage/db/", @@ -271,6 +272,7 @@ reth-rpc-layer = { path = "crates/rpc/rpc-layer" } reth-stages = { path = "crates/stages" } reth-stages-api = { path = "crates/stages-api" } reth-static-file = { path = "crates/static-file" } +reth-static-file-types = { path = "crates/static-file-types" } reth-tasks = { path = "crates/tasks" } reth-tokio-util = { path = "crates/tokio-util" } reth-tracing = { path = "crates/tracing" } diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 34100b24b70e1..f44db3cae1e2e 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-codecs.workspace = true reth-ethereum-forks.workspace = true reth-network-types.workspace = true +reth-static-file-types.workspace = true revm.workspace = true revm-primitives = { workspace = true, features = ["serde"] } @@ -41,7 +42,6 @@ c-kzg = { workspace = true, features = ["serde"], optional = true } # misc bytes.workspace = true byteorder = "1" -clap = { workspace = true, features = ["derive"], optional = true } derive_more.workspace = true itertools.workspace = true modular-bitfield.workspace = true @@ -62,7 +62,6 @@ plain_hasher = { version = "0.2", optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -strum = { workspace = true, features = ["derive"] } [dev-dependencies] # eth @@ -116,7 +115,7 @@ c-kzg = [ "alloy-eips/kzg", ] zstd-codec = ["dep:zstd"] -clap = ["dep:clap"] +clap = ["reth-static-file-types/clap"] optimism = [ "reth-codecs/optimism", "reth-ethereum-forks/optimism", diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 35dfc14915a23..b10582cf9e522 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -42,7 +42,7 @@ mod receipt; /// Helpers for working with revm pub mod revm; pub mod stage; -pub mod static_file; +pub use reth_static_file_types as static_file; mod storage; /// Helpers for working with transactions pub mod transaction; diff --git a/crates/static-file-types/Cargo.toml b/crates/static-file-types/Cargo.toml new file mode 100644 index 0000000000000..63ba40c8f5256 --- /dev/null +++ b/crates/static-file-types/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "reth-static-file-types" +version.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true +description = "Commonly used types for static file usage in reth." + +[lints] +workspace = true + +[dependencies] +alloy-primitives.workspace = true + +clap = { workspace = true, features = ["derive"], optional = true } +derive_more.workspace = true +serde = { workspace = true, features = ["derive"] } +strum = { workspace = true, features = ["derive"] } + +[features] +clap = ["dep:clap"] \ No newline at end of file diff --git a/crates/primitives/src/static_file/compression.rs b/crates/static-file-types/src/compression.rs similarity index 100% rename from crates/primitives/src/static_file/compression.rs rename to crates/static-file-types/src/compression.rs diff --git a/crates/primitives/src/static_file/filters.rs b/crates/static-file-types/src/filters.rs similarity index 100% rename from crates/primitives/src/static_file/filters.rs rename to crates/static-file-types/src/filters.rs diff --git a/crates/primitives/src/static_file/mod.rs b/crates/static-file-types/src/lib.rs similarity index 84% rename from crates/primitives/src/static_file/mod.rs rename to crates/static-file-types/src/lib.rs index e7e9e47fd2588..26d2496948b97 100644 --- a/crates/primitives/src/static_file/mod.rs +++ b/crates/static-file-types/src/lib.rs @@ -1,4 +1,12 @@ -//! StaticFile primitives. +//! Commonly used types for static file usage. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod compression; mod filters; diff --git a/crates/primitives/src/static_file/segment.rs b/crates/static-file-types/src/segment.rs similarity index 97% rename from crates/primitives/src/static_file/segment.rs rename to crates/static-file-types/src/segment.rs index a9ad2a075f988..82b937f29442c 100644 --- a/crates/primitives/src/static_file/segment.rs +++ b/crates/static-file-types/src/segment.rs @@ -1,7 +1,5 @@ -use crate::{ - static_file::{Compression, Filters, InclusionFilter}, - BlockNumber, TxNumber, -}; +use crate::{BlockNumber, Compression, Filters, InclusionFilter}; +use alloy_primitives::TxNumber; use derive_more::Display; use serde::{Deserialize, Serialize}; use std::{ops::RangeInclusive, str::FromStr}; @@ -385,7 +383,7 @@ mod tests { Compression::Lz4, Filters::WithFilters( InclusionFilter::Cuckoo, - crate::static_file::PerfectHashingFunction::Fmph, + crate::PerfectHashingFunction::Fmph, ), )), ), @@ -397,7 +395,7 @@ mod tests { Compression::Zstd, Filters::WithFilters( InclusionFilter::Cuckoo, - crate::static_file::PerfectHashingFunction::Fmph, + crate::PerfectHashingFunction::Fmph, ), )), ), @@ -409,7 +407,7 @@ mod tests { Compression::ZstdWithDictionary, Filters::WithFilters( InclusionFilter::Cuckoo, - crate::static_file::PerfectHashingFunction::Fmph, + crate::PerfectHashingFunction::Fmph, ), )), ), From 3312dc26cbb881b9b8a78dcb6b17c6e204ec6ac4 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 23 May 2024 08:27:21 -0400 Subject: [PATCH 600/700] chore: make unknown block error lowercase (#8355) --- crates/rpc/rpc/src/eth/error.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 77bffee400b78..90ed87facc0c1 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -40,11 +40,11 @@ pub enum EthApiError { /// Thrown when querying for `finalized` or `safe` block before the merge transition is /// finalized, /// - /// op-node uses case sensitive string comparison to parse this error: - /// + /// op-node now checks for either `Unknown block` OR `unknown block`: + /// /// /// TODO(#8045): Temporary, until a version of is pushed through that doesn't require this to figure out the EL sync status. - #[error("Unknown block")] + #[error("unknown block")] UnknownSafeOrFinalizedBlock, /// Thrown when an unknown block or transaction index is encountered #[error("unknown block or tx index")] From b8eebbd3d42a1bcfc1f05cce91b4fc9dc5c4d4e5 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 23 May 2024 08:27:34 -0400 Subject: [PATCH 601/700] chore: add docs for using personal tag in kurtosis (#8354) --- book/run/private-testnet.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index 958d769e348d3..c85ff7d547862 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -35,6 +35,10 @@ First, in your home directory, create a file with the name `network_params.json` "launch_additional_services": false } ``` + +> [!TIP] +> If you would like to use a modified reth node, you can build an image locally with a custom tag. The tag can then be used in the `el_image` field in the `network_params.json` file. + ### Step 2: Spin up your network Next, run the following command from your command line: From bc4dd37872e4aad257831d10aa3fe59f0ac41701 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 23 May 2024 20:51:25 +0800 Subject: [PATCH 602/700] chore(editor): set indent size=2 for the yaml files (#8366) Signed-off-by: jsvisa --- .editorconfig | 3 +++ .github/ISSUE_TEMPLATE/config.yml | 2 +- .github/ISSUE_TEMPLATE/feature.yml | 2 +- .github/workflows/assertoor.yml | 9 ++++----- .github/workflows/label-pr.yml | 1 - 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/.editorconfig b/.editorconfig index d53c0e8dded96..19fe6f5ad6eae 100644 --- a/.editorconfig +++ b/.editorconfig @@ -15,6 +15,9 @@ indent_size = 4 [*.rs] max_line_length = 100 +[*.{yml,yaml}] +indent_size = 2 + [*.md] # double whitespace at end of line # denotes a line break in Markdown diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index c096920040757..ee2646490db50 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -5,4 +5,4 @@ contact_links: about: Please ask and answer questions here to keep the issue tracker clean. - name: Security url: mailto:georgios@paradigm.xyz - about: Please report security vulnerabilities here. \ No newline at end of file + about: Please report security vulnerabilities here. diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml index 2e33e3bc6ee3d..005c33ae3faa8 100644 --- a/.github/ISSUE_TEMPLATE/feature.yml +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -11,7 +11,7 @@ body: label: Describe the feature description: | Please describe the feature and what it is aiming to solve, if relevant. - + If the feature is for a crate, please include a proposed API surface. validations: required: true diff --git a/.github/workflows/assertoor.yml b/.github/workflows/assertoor.yml index 3d1ee58c48000..becbf4a3a59bb 100644 --- a/.github/workflows/assertoor.yml +++ b/.github/workflows/assertoor.yml @@ -29,7 +29,7 @@ jobs: id: services run: | export github_sha=${{ github.sha }} - export github_repository=${{ github.repository }} + export github_repository=${{ github.repository }} cat etc/assertoor/assertoor-template.yaml | envsubst > etc/assertoor/assertoor.yaml @@ -92,7 +92,7 @@ jobs: elif [ "$task_result" == "failure" ]; then task_result="${RED}failure${NC}" fi - + echo -e " $(printf '%-4s' "$task_id")\t$task_status\t$task_result\t$(printf '%-50s' "$task_graph$task_name") \t$task_title" done <<< $(echo "$tasks") } @@ -153,7 +153,7 @@ jobs: echo "$task_lines" fi - if [ $failed_tests -gt 0 ]; then + if [ $failed_tests -gt 0 ]; then final_test_result="failure" break fi @@ -197,7 +197,7 @@ jobs: with: name: "kurtosis-enclave-dump-${{ github.run_id }}" path: ./temp/dump - + - name: Return test result shell: bash run: | @@ -227,4 +227,3 @@ jobs: exit 1 # fail action fi - diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml index e52721b9cc832..857d354a8fb8c 100644 --- a/.github/workflows/label-pr.yml +++ b/.github/workflows/label-pr.yml @@ -21,4 +21,3 @@ jobs: script: | const label_pr = require('./.github/scripts/label_pr.js') await label_pr({github, context}) - From c73af6298ecfc869e04c33f269c8a5109d42deaf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 May 2024 14:59:06 +0200 Subject: [PATCH 603/700] chore: remove network setup from config (#8364) Co-authored-by: DaniPopes <57450786+DaniPopes@users.noreply.github.com> --- Cargo.lock | 3 --- bin/reth/src/commands/p2p/mod.rs | 6 ++++-- crates/config/Cargo.toml | 5 ----- crates/config/src/config.rs | 28 +++++++++------------------- crates/config/src/lib.rs | 1 + crates/net/network/src/config.rs | 15 ++++++++++++++- crates/node-core/src/args/network.rs | 7 +++++-- 7 files changed, 33 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 413d83fc9f20c..ec86a945a966e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6591,11 +6591,8 @@ version = "0.2.0-beta.7" dependencies = [ "confy", "humantime-serde", - "reth-discv4", - "reth-net-nat", "reth-network", "reth-primitives", - "secp256k1 0.28.2", "serde", "tempfile", "toml", diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index 8ad8fadf1d34f..b6710a363a991 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -15,6 +15,7 @@ use discv5::ListenConfig; use reth_config::Config; use reth_db::create_db; use reth_interfaces::p2p::bodies::client::BodiesClient; +use reth_network::NetworkConfigBuilder; use reth_primitives::{BlockHashOrNumber, ChainSpec}; use reth_provider::ProviderFactory; use std::{ @@ -112,8 +113,9 @@ impl Command { let rlpx_socket = (self.network.addr, self.network.port).into(); let boot_nodes = self.chain.bootnodes().unwrap_or_default(); - let mut network_config_builder = config - .network_config(self.network.nat, None, p2p_secret_key) + let mut network_config_builder = NetworkConfigBuilder::new(p2p_secret_key) + .peer_config(config.peers_config_with_basic_nodes_from_file(None)) + .external_ip_resolver(self.network.nat) .chain_spec(self.chain.clone()) .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .boot_nodes(boot_nodes.clone()); diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index d9147d7b7a7cb..9e9aa48064c81 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -13,17 +13,12 @@ workspace = true [dependencies] # reth reth-network.workspace = true -reth-net-nat.workspace = true -reth-discv4.workspace = true reth-primitives.workspace = true # serde serde.workspace = true humantime-serde.workspace = true -# crypto -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } - # toml confy.workspace = true diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 4215f89a438c7..7847ae202e3c3 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -1,9 +1,7 @@ //! Configuration files. -use reth_discv4::Discv4Config; -use reth_network::{NetworkConfigBuilder, PeersConfig, SessionsConfig}; +use reth_network::{PeersConfig, SessionsConfig}; use reth_primitives::PruneModes; -use secp256k1::SecretKey; use serde::{Deserialize, Deserializer, Serialize}; use std::{ ffi::OsStr, @@ -30,25 +28,17 @@ pub struct Config { } impl Config { - /// Initializes network config from read data - pub fn network_config( + /// Returns the [PeersConfig] for the node. + /// + /// If a peers file is provided, the basic nodes from the file are added to the configuration. + pub fn peers_config_with_basic_nodes_from_file( &self, - nat_resolution_method: reth_net_nat::NatResolver, - peers_file: Option, - secret_key: SecretKey, - ) -> NetworkConfigBuilder { - let peer_config = self - .peers + peers_file: Option<&Path>, + ) -> PeersConfig { + self.peers .clone() .with_basic_nodes_from_file(peers_file) - .unwrap_or_else(|_| self.peers.clone()); - - let discv4 = - Discv4Config::builder().external_ip_resolver(Some(nat_resolution_method)).clone(); - NetworkConfigBuilder::new(secret_key) - .sessions_config(self.sessions.clone()) - .peer_config(peer_config) - .discovery(discv4) + .unwrap_or_else(|_| self.peers.clone()) } /// Save the configuration to toml file. diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index 362e814632e3a..1e81e18ec4291 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -5,6 +5,7 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod config; diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 4bd1dab8835e2..368f958b2a309 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -8,7 +8,7 @@ use crate::{ transactions::TransactionsManagerConfig, NetworkHandle, NetworkManager, }; -use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; +use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; @@ -314,6 +314,19 @@ impl NetworkConfigBuilder { self } + /// Sets the external ip resolver to use for discovery v4. + /// + /// If no [Discv4ConfigBuilder] is set via [Self::discovery], this will create a new one. + /// + /// This is a convenience function for setting the external ip resolver on the default + /// [Discv4Config] config. + pub fn external_ip_resolver(mut self, resolver: NatResolver) -> Self { + self.discovery_v4_builder + .get_or_insert_with(Discv4Config::builder) + .external_ip_resolver(Some(resolver)); + self + } + /// Sets the discv4 config to use. pub fn discovery(mut self, builder: Discv4ConfigBuilder) -> Self { self.discovery_v4_builder = Some(builder); diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 350e7c4a1b1a4..115ec8517ab40 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -145,8 +145,11 @@ impl NetworkArgs { ), }; // Configure basic network stack - let mut network_config_builder = config - .network_config(self.nat, self.persistent_peers_file(peers_file), secret_key) + let mut network_config_builder = NetworkConfigBuilder::new(secret_key) + .peer_config(config.peers_config_with_basic_nodes_from_file( + self.persistent_peers_file(peers_file).as_deref(), + )) + .external_ip_resolver(self.nat) .sessions_config( SessionsConfig::default().with_upscaled_event_buffer(peers_config.max_peers()), ) From a4933e72a3d55b085e7771d10ada43c1efd85afc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 May 2024 16:02:28 +0200 Subject: [PATCH 604/700] chore: remove peer types dep (#8368) --- Cargo.lock | 2 +- crates/net/common/Cargo.toml | 4 ++-- crates/net/common/src/ban_list.rs | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec86a945a966e..1f8160c450f54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7128,8 +7128,8 @@ dependencies = [ name = "reth-net-common" version = "0.2.0-beta.7" dependencies = [ + "alloy-primitives", "pin-project", - "reth-network-types", "tokio", ] diff --git a/crates/net/common/Cargo.toml b/crates/net/common/Cargo.toml index 0c3b253a50ad0..3d73f480f5704 100644 --- a/crates/net/common/Cargo.toml +++ b/crates/net/common/Cargo.toml @@ -12,8 +12,8 @@ description = "Types shared across network code" workspace = true [dependencies] -# reth -reth-network-types.workspace = true +# ethereum +alloy-primitives.workspace = true # async pin-project.workspace = true diff --git a/crates/net/common/src/ban_list.rs b/crates/net/common/src/ban_list.rs index 11d4c6049b40f..1cde15ef2b98f 100644 --- a/crates/net/common/src/ban_list.rs +++ b/crates/net/common/src/ban_list.rs @@ -1,6 +1,7 @@ //! Support for banning peers. -use reth_network_types::PeerId; +type PeerId = alloy_primitives::B512; + use std::{collections::HashMap, net::IpAddr, time::Instant}; /// Determines whether or not the IP is globally routable. From 39d24b495f7799eed6d4d184516deaa3e75165e8 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 23 May 2024 10:37:39 -0400 Subject: [PATCH 605/700] feat: return parent beacon block root in payload conversion (#8349) --- .../ethereum/engine-primitives/src/payload.rs | 2 +- crates/optimism/payload/src/payload.rs | 2 +- crates/rpc/rpc-engine-api/tests/it/payload.rs | 1 + .../rpc-types-compat/src/engine/payload.rs | 25 +++++++++++-------- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index ed3f484b8d0c0..6e753dac9626a 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -112,7 +112,7 @@ impl From for ExecutionPayloadEnvelopeV3 { let EthBuiltPayload { block, fees, sidecars, .. } = value; ExecutionPayloadEnvelopeV3 { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(block).0, block_value: fees, // From the engine API spec: // diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 182dadfed9b2e..41a3eec9be102 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -256,7 +256,7 @@ impl From for OptimismExecutionPayloadEnvelopeV3 { B256::ZERO }; OptimismExecutionPayloadEnvelopeV3 { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(block).0, block_value: fees, // From the engine API spec: // diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 22219584c7e14..e2b39691afea3 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -29,6 +29,7 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi ommers: transformed.ommers, withdrawals: transformed.withdrawals, }) + .0 } #[test] diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index f3478d189ab33..9f968a1a4ebd6 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -97,18 +97,20 @@ pub fn try_payload_v4_to_block(payload: ExecutionPayloadV4) -> Result ExecutionPayload { +/// Converts [SealedBlock] to [ExecutionPayload], returning additional data (the parent beacon block +/// root) if the block is a V3 payload +pub fn block_to_payload(value: SealedBlock) -> (ExecutionPayload, Option) { // todo(onbjerg): check for requests_root here and return payload v4 if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 - ExecutionPayload::V3(block_to_payload_v3(value)) + let (payload, beacon_block_root) = block_to_payload_v3(value); + (ExecutionPayload::V3(payload), beacon_block_root) } else if value.withdrawals.is_some() { // block with withdrawals: V2 - ExecutionPayload::V2(block_to_payload_v2(value)) + (ExecutionPayload::V2(block_to_payload_v2(value)), None) } else { // otherwise V1 - ExecutionPayload::V1(block_to_payload_v1(value)) + (ExecutionPayload::V1(block_to_payload_v1(value)), None) } } @@ -158,11 +160,12 @@ pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { } } -/// Converts [SealedBlock] to [ExecutionPayloadV3] -pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { +/// Converts [SealedBlock] to [ExecutionPayloadV3], and returns the parent beacon block root. +pub fn block_to_payload_v3(value: SealedBlock) -> (ExecutionPayloadV3, Option) { let transactions = value.raw_transactions(); - ExecutionPayloadV3 { + let parent_beacon_block_root = value.header.parent_beacon_block_root; + let payload = ExecutionPayloadV3 { blob_gas_used: value.blob_gas_used.unwrap_or_default(), excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), payload_inner: ExecutionPayloadV2 { @@ -184,7 +187,9 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { }, withdrawals: value.withdrawals.unwrap_or_default().into_inner(), }, - } + }; + + (payload, parent_beacon_block_root) } /// Converts [SealedBlock] to [ExecutionPayloadFieldV2] @@ -374,7 +379,7 @@ mod tests { let converted_payload = block_to_payload_v3(block.seal_slow()); // ensure the payloads are the same - assert_eq!(new_payload, converted_payload); + assert_eq!((new_payload, Some(parent_beacon_block_root.into())), converted_payload); } #[test] From 62e05b505a82170f48ba78f4b9d41a193fe3b0e3 Mon Sep 17 00:00:00 2001 From: William Law Date: Thu, 23 May 2024 11:17:09 -0700 Subject: [PATCH 606/700] fix: disable timeout on DbTool level (#8357) --- bin/reth/src/commands/db/list.rs | 2 -- bin/reth/src/utils.rs | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/commands/db/list.rs b/bin/reth/src/commands/db/list.rs index 1c1839188d247..df05638bc9e95 100644 --- a/bin/reth/src/commands/db/list.rs +++ b/bin/reth/src/commands/db/list.rs @@ -90,8 +90,6 @@ impl TableViewer<()> for ListTableViewer<'_> { fn view(&self) -> Result<(), Self::Error> { self.tool.provider_factory.db_ref().view(|tx| { - // Disable timeout because we are entering a TUI which might read for a long time - tx.inner.disable_timeout(); let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?; let total_entries = stats.entries(); diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index 025b059bcef12..f312b2d1b777f 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -36,6 +36,9 @@ pub struct DbTool { impl DbTool { /// Takes a DB where the tables have already been created. pub fn new(provider_factory: ProviderFactory, chain: Arc) -> eyre::Result { + // Disable timeout because we are entering a TUI which might read for a long time. We + // disable on the [`DbTool`] level since it's only used in the CLI. + provider_factory.provider()?.disable_long_read_transaction_safety(); Ok(Self { provider_factory, chain }) } From 0e87ff4da66bb383d6211006498c47848a893141 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 May 2024 20:59:58 +0200 Subject: [PATCH 607/700] chore: sort workspace members (#8374) --- Cargo.toml | 94 +++++++++++++++++++++++++++--------------------------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a779905e6978e..b7d65b0a7be11 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,14 +6,17 @@ members = [ "crates/config/", "crates/consensus/auto-seal/", "crates/consensus/beacon/", - "crates/ethereum/consensus/", "crates/consensus/common/", "crates/consensus/consensus/", - "crates/ethereum-forks/", "crates/e2e-test-utils/", + "crates/engine-primitives/", + "crates/ethereum-forks/", + "crates/ethereum/consensus/", + "crates/ethereum/engine-primitives/", + "crates/ethereum/evm", + "crates/ethereum/node", "crates/etl/", "crates/evm/", - "crates/ethereum/evm", "crates/exex/", "crates/interfaces/", "crates/metrics/", @@ -24,12 +27,20 @@ members = [ "crates/net/dns/", "crates/net/downloaders/", "crates/net/ecies/", - "crates/net/eth-wire/", "crates/net/eth-wire-types", + "crates/net/eth-wire/", "crates/net/nat/", - "crates/net/network/", "crates/net/network-api/", + "crates/net/network/", "crates/net/types/", + "crates/node-core/", + "crates/node/api/", + "crates/node/builder/", + "crates/node/events/", + "crates/optimism/consensus", + "crates/optimism/evm/", + "crates/optimism/node/", + "crates/optimism/payload/", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/ethereum/", @@ -37,30 +48,19 @@ members = [ "crates/primitives/", "crates/prune/", "crates/revm/", - "crates/node/events/", "crates/rpc/ipc/", - "crates/rpc/rpc/", "crates/rpc/rpc-api/", "crates/rpc/rpc-builder/", "crates/rpc/rpc-engine-api/", + "crates/rpc/rpc-layer", "crates/rpc/rpc-testing-util/", - "crates/rpc/rpc-types/", "crates/rpc/rpc-types-compat/", - "crates/rpc/rpc-layer", - "crates/engine-primitives/", - "crates/ethereum/engine-primitives/", - "crates/ethereum/node", - "crates/node/builder/", - "crates/optimism/consensus", - "crates/optimism/evm/", - "crates/optimism/node/", - "crates/optimism/payload/", - "crates/node-core/", - "crates/node/api/", - "crates/stages/", + "crates/rpc/rpc-types/", + "crates/rpc/rpc/", "crates/stages-api", - "crates/static-file/", + "crates/stages/", "crates/static-file-types/", + "crates/static-file/", "crates/storage/codecs/", "crates/storage/codecs/derive/", "crates/storage/db/", @@ -72,26 +72,26 @@ members = [ "crates/tokio-util/", "crates/tracing/", "crates/transaction-pool/", - "crates/trie/", "crates/trie-parallel/", - "examples/node-custom-rpc/", + "crates/trie/", "examples/beacon-api-sse/", - "examples/node-event-hooks/", - "examples/custom-evm/", + "examples/bsc-p2p", + "examples/custom-dev-node/", "examples/custom-engine-types/", + "examples/custom-evm/", + "examples/custom-inspector/", "examples/custom-node-components/", - "examples/custom-dev-node/", "examples/custom-payload-builder/", + "examples/db-access", + "examples/exex/*", "examples/manual-p2p/", - "examples/network/", "examples/network-txpool/", + "examples/network/", + "examples/node-custom-rpc/", + "examples/node-event-hooks/", + "examples/polygon-p2p/", "examples/rpc-db/", "examples/txpool-tracing/", - "examples/polygon-p2p/", - "examples/custom-inspector/", - "examples/exex/*", - "examples/db-access", - "examples/bsc-p2p", "testing/ef-tests/", "testing/testing-utils", ] @@ -211,7 +211,6 @@ reth = { path = "bin/reth" } reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } -reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } @@ -222,27 +221,22 @@ reth-db = { path = "crates/storage/db" } reth-discv4 = { path = "crates/net/discv4" } reth-discv5 = { path = "crates/net/discv5" } reth-dns-discovery = { path = "crates/net/dns" } -reth-e2e-test-utils = { path = "crates/e2e-test-utils" } -reth-engine-primitives = { path = "crates/engine-primitives" } -reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } -reth-node-builder = { path = "crates/node/builder" } -reth-node-ethereum = { path = "crates/ethereum/node" } -reth-node-optimism = { path = "crates/optimism/node" } -reth-evm-optimism = { path = "crates/optimism/evm" } -reth-node-core = { path = "crates/node-core" } -reth-node-api = { path = "crates/node/api" } reth-downloaders = { path = "crates/net/downloaders" } +reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-ecies = { path = "crates/net/ecies" } +reth-engine-primitives = { path = "crates/engine-primitives" } reth-eth-wire = { path = "crates/net/eth-wire" } reth-eth-wire-types = { path = "crates/net/eth-wire-types" } +reth-ethereum-consensus = { path = "crates/ethereum/consensus" } +reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-ethereum-forks = { path = "crates/ethereum-forks" } reth-ethereum-payload-builder = { path = "crates/payload/ethereum" } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } +reth-evm-optimism = { path = "crates/optimism/evm" } reth-exex = { path = "crates/exex" } reth-fs-util = { path = "crates/fs-util" } -reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-interfaces = { path = "crates/interfaces" } reth-ipc = { path = "crates/rpc/ipc" } reth-libmdbx = { path = "crates/storage/libmdbx-rs" } @@ -255,6 +249,14 @@ reth-network = { path = "crates/net/network" } reth-network-api = { path = "crates/net/network-api" } reth-network-types = { path = "crates/net/types" } reth-nippy-jar = { path = "crates/storage/nippy-jar" } +reth-node-api = { path = "crates/node/api" } +reth-node-builder = { path = "crates/node/builder" } +reth-node-core = { path = "crates/node-core" } +reth-node-ethereum = { path = "crates/ethereum/node" } +reth-node-events = { path = "crates/node/events" } +reth-node-optimism = { path = "crates/optimism/node" } +reth-optimism-consensus = { path = "crates/optimism/consensus" } +reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-validator = { path = "crates/payload/validator" } reth-primitives = { path = "crates/primitives" } @@ -266,22 +268,20 @@ reth-rpc-api = { path = "crates/rpc/rpc-api" } reth-rpc-api-testing-util = { path = "crates/rpc/rpc-testing-util" } reth-rpc-builder = { path = "crates/rpc/rpc-builder" } reth-rpc-engine-api = { path = "crates/rpc/rpc-engine-api" } +reth-rpc-layer = { path = "crates/rpc/rpc-layer" } reth-rpc-types = { path = "crates/rpc/rpc-types" } reth-rpc-types-compat = { path = "crates/rpc/rpc-types-compat" } -reth-rpc-layer = { path = "crates/rpc/rpc-layer" } reth-stages = { path = "crates/stages" } reth-stages-api = { path = "crates/stages-api" } reth-static-file = { path = "crates/static-file" } reth-static-file-types = { path = "crates/static-file-types" } reth-tasks = { path = "crates/tasks" } +reth-testing-utils = { path = "testing/testing-utils" } reth-tokio-util = { path = "crates/tokio-util" } reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } -reth-optimism-consensus = { path = "crates/optimism/consensus" } -reth-node-events = { path = "crates/node/events" } -reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "9.0.0", features = [ From 8263480a72fdbc622f258a597fc9158d57bc27bc Mon Sep 17 00:00:00 2001 From: Delweng Date: Fri, 24 May 2024 03:20:40 +0800 Subject: [PATCH 608/700] chore(github): run update-book-cli in the lint workflow (#8335) Signed-off-by: jsvisa --- .github/workflows/lint.yml | 21 ++++++++++++++++++++- book/cli/help.py | 16 ++++++++++++++-- book/cli/reth/db/checksum.md | 9 +++++++++ book/cli/reth/node.md | 2 +- 4 files changed, 44 insertions(+), 4 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4f3632875afcc..bd73e09e84154 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -107,6 +107,25 @@ jobs: components: rustfmt - run: cargo fmt --all --check + book: + name: book + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: "1.76" # MSRV + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - run: cargo build --bin reth --workspace --features ethereum + env: + RUSTFLAGS: -D warnings + - run: ./book/cli/update.sh target/debug/reth + - name: Check book changes + run: git diff --exit-code + codespell: runs-on: ubuntu-latest timeout-minutes: 30 @@ -127,7 +146,7 @@ jobs: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana] + needs: [clippy-binaries, clippy, crate-checks, docs, fmt, book, codespell, grafana] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/book/cli/help.py b/book/cli/help.py index 26ce5e69198e3..3f40a5e0b566f 100755 --- a/book/cli/help.py +++ b/book/cli/help.py @@ -262,9 +262,13 @@ def preprocess_help(s: str): "default: ", s, ) - # Remove the commit SHA and target architecture triple + # Remove the commit SHA and target architecture triple or fourth + # rustup available targets: + # aarch64-apple-darwin + # x86_64-unknown-linux-gnu + # x86_64-pc-windows-gnu s = re.sub( - r"default: reth/.*-[0-9A-Fa-f]{6,10}/\w+-\w*-\w+", + r"default: reth/.*-[0-9A-Fa-f]{6,10}/([_\w]+)-(\w+)-(\w+)(-\w+)?", "default: reth/-/", s, ) @@ -275,6 +279,14 @@ def preprocess_help(s: str): s, ) + # Remove rpc.max-tracing-requests default value + s = re.sub( + r"(rpc.max-tracing-requests \n.*\n.*\n.*)\[default: \d+\]", + r"\1[default: ]", + s, + flags=re.MULTILINE, + ) + return s diff --git a/book/cli/reth/db/checksum.md b/book/cli/reth/db/checksum.md index a8147b04a4d12..66f0a86a9572f 100644 --- a/book/cli/reth/db/checksum.md +++ b/book/cli/reth/db/checksum.md @@ -22,6 +22,9 @@ Options: [default: default] + --start-key + The start of the range to checksum + --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. @@ -31,6 +34,12 @@ Options: [default: mainnet] + --end-key + The end of the range to checksum + + --limit + The maximum number of records that are queried and used to compute the checksum + --instance Add a new instance of a node. diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index c73b7dd32e6e4..999601f044bd1 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -287,7 +287,7 @@ RPC: --rpc.max-tracing-requests Maximum number of concurrent tracing requests - [default: 6] + [default: ] --rpc.max-blocks-per-filter Maximum number of blocks that could be scanned per filter request. (0 = entire chain) From 1287bbcac77bbd3c5e5f8803c5e1ee4e579ecb3c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 May 2024 21:24:23 +0200 Subject: [PATCH 609/700] chore: extract provider types (#8372) --- Cargo.lock | 12 ++++++++++- Cargo.toml | 2 ++ crates/interfaces/Cargo.toml | 4 ++-- crates/interfaces/src/lib.rs | 7 ++----- crates/storage/errors/Cargo.toml | 21 +++++++++++++++++++ .../{interfaces => storage/errors}/src/db.rs | 0 crates/storage/errors/src/lib.rs | 15 +++++++++++++ .../errors}/src/provider.rs | 0 8 files changed, 53 insertions(+), 8 deletions(-) create mode 100644 crates/storage/errors/Cargo.toml rename crates/{interfaces => storage/errors}/src/db.rs (100%) create mode 100644 crates/storage/errors/src/lib.rs rename crates/{interfaces => storage/errors}/src/provider.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 1f8160c450f54..fa7dc00775e68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7030,7 +7030,6 @@ name = "reth-interfaces" version = "0.2.0-beta.7" dependencies = [ "auto_impl", - "clap", "futures", "parking_lot 0.12.2", "rand 0.8.5", @@ -7040,6 +7039,7 @@ dependencies = [ "reth-network-api", "reth-network-types", "reth-primitives", + "reth-storage-errors", "secp256k1 0.28.2", "thiserror", "tokio", @@ -7939,6 +7939,16 @@ dependencies = [ "strum", ] +[[package]] +name = "reth-storage-errors" +version = "0.2.0-beta.7" +dependencies = [ + "clap", + "reth-fs-util", + "reth-primitives", + "thiserror", +] + [[package]] name = "reth-tasks" version = "0.2.0-beta.7" diff --git a/Cargo.toml b/Cargo.toml index b7d65b0a7be11..7b872c7956044 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,6 +64,7 @@ members = [ "crates/storage/codecs/", "crates/storage/codecs/derive/", "crates/storage/db/", + "crates/storage/errors/", "crates/storage/libmdbx-rs/", "crates/storage/libmdbx-rs/mdbx-sys/", "crates/storage/nippy-jar/", @@ -275,6 +276,7 @@ reth-stages = { path = "crates/stages" } reth-stages-api = { path = "crates/stages-api" } reth-static-file = { path = "crates/static-file" } reth-static-file-types = { path = "crates/static-file-types" } +reth-storage-errors = { path = "crates/storage/errors" } reth-tasks = { path = "crates/tasks" } reth-testing-utils = { path = "testing/testing-utils" } reth-tokio-util = { path = "crates/tokio-util" } diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 27e2d8f390eca..4dd845314b778 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -17,6 +17,7 @@ reth-network-api.workspace = true reth-eth-wire-types.workspace = true reth-consensus.workspace = true reth-network-types.workspace = true +reth-storage-errors.workspace = true # async futures.workspace = true @@ -31,7 +32,6 @@ secp256k1 = { workspace = true, default-features = false, features = [ "recovery", "rand", ], optional = true } -clap = { workspace = true, features = ["derive"], optional = true } parking_lot = { workspace = true, optional = true } rand = { workspace = true, optional = true } @@ -45,5 +45,5 @@ secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] test-utils = ["reth-consensus/test-utils", "secp256k1", "rand", "parking_lot"] -cli = ["clap"] +clap = ["reth-storage-errors/clap"] optimism = ["reth-eth-wire-types/optimism"] diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index e60d4a621648a..a5d38965e2b38 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -12,8 +12,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// Database error -pub mod db; +/// Storage error types +pub use reth_storage_errors::{db, provider}; /// Block Execution traits. pub mod executor; @@ -28,9 +28,6 @@ pub mod p2p; /// Trie error pub mod trie; -/// Provider error -pub mod provider; - /// Syncing related traits. pub mod sync; diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml new file mode 100644 index 0000000000000..c1ce595ea927d --- /dev/null +++ b/crates/storage/errors/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "reth-storage-errors" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-primitives.workspace = true +reth-fs-util.workspace = true + +thiserror.workspace = true +clap = { workspace = true, features = ["derive"], optional = true } + +[features] +clap = ["dep:clap"] \ No newline at end of file diff --git a/crates/interfaces/src/db.rs b/crates/storage/errors/src/db.rs similarity index 100% rename from crates/interfaces/src/db.rs rename to crates/storage/errors/src/db.rs diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs new file mode 100644 index 0000000000000..6bab8f051afd7 --- /dev/null +++ b/crates/storage/errors/src/lib.rs @@ -0,0 +1,15 @@ +//! Commonly used error types used when interacting with storage. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// Database error +pub mod db; + +/// Provider error +pub mod provider; diff --git a/crates/interfaces/src/provider.rs b/crates/storage/errors/src/provider.rs similarity index 100% rename from crates/interfaces/src/provider.rs rename to crates/storage/errors/src/provider.rs From d7172b6605c43b5847ce0d3429a5ee85bf88b0b2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 May 2024 23:05:00 +0200 Subject: [PATCH 610/700] chore(deps): shrink some deps (#8376) --- Cargo.lock | 3 +-- crates/net/network-api/Cargo.toml | 7 ++++--- crates/net/network-api/src/lib.rs | 11 ++++++----- crates/net/network-api/src/noop.rs | 11 +++++------ 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fa7dc00775e68..85508ee1b3530 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7208,11 +7208,10 @@ dependencies = [ name = "reth-network-api" version = "0.2.0-beta.7" dependencies = [ + "alloy-primitives", "enr", - "reth-discv4", "reth-eth-wire", "reth-network-types", - "reth-primitives", "reth-rpc-types", "serde", "thiserror", diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 81536aad985ed..3e8ed584fc3e6 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -6,19 +6,20 @@ rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true -description = "Network interfaces" +description = "Network interfaces and commonly used types" [lints] workspace = true [dependencies] # reth -reth-primitives.workspace = true reth-eth-wire.workspace = true reth-rpc-types.workspace = true -reth-discv4.workspace = true reth-network-types.workspace = true +# ethereum +alloy-primitives.workspace = true + # eth enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 6c3040bd9b82f..417525b7d4a37 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -13,15 +13,16 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_eth_wire::{DisconnectReason, EthVersion, Status}; -use reth_network_types::PeerId; -use reth_primitives::NodeRecord; +use reth_eth_wire::{capability::Capabilities, DisconnectReason, EthVersion, Status}; +use reth_rpc_types::NetworkStatus; use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; pub use error::NetworkError; pub use reputation::{Reputation, ReputationChangeKind}; -use reth_eth_wire::capability::Capabilities; -use reth_rpc_types::NetworkStatus; +use reth_network_types::NodeRecord; + +/// The PeerId type. +pub type PeerId = alloy_primitives::B512; /// Network Error pub mod error; diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 2ace603e348e0..b022ced4bc3b2 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -4,14 +4,12 @@ //! generic over it. use crate::{ - NetworkError, NetworkInfo, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, + NetworkError, NetworkInfo, PeerId, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; use enr::{secp256k1::SecretKey, Enr}; -use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_eth_wire::{DisconnectReason, ProtocolVersion}; -use reth_network_types::PeerId; -use reth_primitives::{Chain, NodeRecord}; +use reth_network_types::NodeRecord; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use std::net::{IpAddr, SocketAddr}; @@ -24,7 +22,7 @@ pub struct NoopNetwork; impl NetworkInfo for NoopNetwork { fn local_addr(&self) -> SocketAddr { - (IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), DEFAULT_DISCOVERY_PORT).into() + (IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), 30303).into() } async fn network_status(&self) -> Result { @@ -42,7 +40,8 @@ impl NetworkInfo for NoopNetwork { } fn chain_id(&self) -> u64 { - Chain::mainnet().into() + // mainnet + 1 } fn is_syncing(&self) -> bool { From df7c9ee3101f89b4ed10f2f78686bb16bf31d6f3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 24 May 2024 10:34:09 +0200 Subject: [PATCH 611/700] chore: extract p2p types from interfaces (#8382) --- Cargo.lock | 29 +++++++---- Cargo.toml | 2 + bin/reth/Cargo.toml | 1 - crates/blockchain-tree/Cargo.toml | 2 +- crates/consensus/beacon/Cargo.toml | 1 - crates/interfaces/Cargo.toml | 27 ++--------- crates/interfaces/src/lib.rs | 6 +-- crates/net/network-api/src/lib.rs | 2 +- crates/net/p2p/Cargo.toml | 48 +++++++++++++++++++ .../src/p2p => net/p2p/src}/bodies/client.rs | 2 +- .../p2p => net/p2p/src}/bodies/downloader.rs | 4 +- .../src/p2p => net/p2p/src}/bodies/mod.rs | 0 .../p2p => net/p2p/src}/bodies/response.rs | 0 .../src/p2p => net/p2p/src}/download.rs | 0 .../src/p2p => net/p2p/src}/either.rs | 2 +- .../src/p2p => net/p2p/src}/error.rs | 2 +- .../src/p2p => net/p2p/src}/full_block.rs | 5 +- .../src/p2p => net/p2p/src}/headers/client.rs | 2 +- .../p2p => net/p2p/src}/headers/downloader.rs | 4 +- .../src/p2p => net/p2p/src}/headers/error.rs | 0 .../src/p2p => net/p2p/src}/headers/mod.rs | 0 .../src/p2p/mod.rs => net/p2p/src/lib.rs} | 19 +++++++- .../src/p2p => net/p2p/src}/priority.rs | 0 .../p2p}/src/test_utils/bodies.rs | 2 +- .../p2p}/src/test_utils/full_block.rs | 2 +- .../p2p}/src/test_utils/generators.rs | 9 +--- .../p2p}/src/test_utils/headers.rs | 26 +++++----- .../p2p}/src/test_utils/mod.rs | 0 crates/node-core/Cargo.toml | 1 - crates/optimism/evm/Cargo.toml | 1 - crates/storage/provider/Cargo.toml | 2 +- 31 files changed, 124 insertions(+), 77 deletions(-) create mode 100644 crates/net/p2p/Cargo.toml rename crates/{interfaces/src/p2p => net/p2p/src}/bodies/client.rs (95%) rename crates/{interfaces/src/p2p => net/p2p/src}/bodies/downloader.rs (82%) rename crates/{interfaces/src/p2p => net/p2p/src}/bodies/mod.rs (100%) rename crates/{interfaces/src/p2p => net/p2p/src}/bodies/response.rs (100%) rename crates/{interfaces/src/p2p => net/p2p/src}/download.rs (100%) rename crates/{interfaces/src/p2p => net/p2p/src}/either.rs (99%) rename crates/{interfaces/src/p2p => net/p2p/src}/error.rs (99%) rename crates/{interfaces/src/p2p => net/p2p/src}/full_block.rs (99%) rename crates/{interfaces/src/p2p => net/p2p/src}/headers/client.rs (96%) rename crates/{interfaces/src/p2p => net/p2p/src}/headers/downloader.rs (95%) rename crates/{interfaces/src/p2p => net/p2p/src}/headers/error.rs (100%) rename crates/{interfaces/src/p2p => net/p2p/src}/headers/mod.rs (100%) rename crates/{interfaces/src/p2p/mod.rs => net/p2p/src/lib.rs} (53%) rename crates/{interfaces/src/p2p => net/p2p/src}/priority.rs (100%) rename crates/{interfaces => net/p2p}/src/test_utils/bodies.rs (98%) rename crates/{interfaces => net/p2p}/src/test_utils/full_block.rs (99%) rename crates/{interfaces => net/p2p}/src/test_utils/generators.rs (98%) rename crates/{interfaces => net/p2p}/src/test_utils/headers.rs (99%) rename crates/{interfaces => net/p2p}/src/test_utils/mod.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 85508ee1b3530..385bc912f4f96 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7030,20 +7030,13 @@ name = "reth-interfaces" version = "0.2.0-beta.7" dependencies = [ "auto_impl", - "futures", - "parking_lot 0.12.2", - "rand 0.8.5", "reth-consensus", - "reth-eth-wire-types", "reth-fs-util", "reth-network-api", - "reth-network-types", + "reth-network-p2p", "reth-primitives", "reth-storage-errors", - "secp256k1 0.28.2", "thiserror", - "tokio", - "tracing", ] [[package]] @@ -7218,6 +7211,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "reth-network-p2p" +version = "0.2.0-beta.7" +dependencies = [ + "auto_impl", + "futures", + "parking_lot 0.12.2", + "rand 0.8.5", + "reth-consensus", + "reth-eth-wire-types", + "reth-network-api", + "reth-network-types", + "reth-primitives", + "reth-storage-errors", + "secp256k1 0.28.2", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "reth-network-types" version = "0.2.0-beta.7" diff --git a/Cargo.toml b/Cargo.toml index 7b872c7956044..a3ac684920a01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,6 +32,7 @@ members = [ "crates/net/nat/", "crates/net/network-api/", "crates/net/network/", + "crates/net/p2p/", "crates/net/types/", "crates/node-core/", "crates/node/api/", @@ -249,6 +250,7 @@ reth-net-nat = { path = "crates/net/nat" } reth-network = { path = "crates/net/network" } reth-network-api = { path = "crates/net/network-api" } reth-network-types = { path = "crates/net/types" } +reth-network-p2p = { path = "crates/net/p2p" } reth-nippy-jar = { path = "crates/storage/nippy-jar" } reth-node-api = { path = "crates/node/api" } reth-node-builder = { path = "crates/node/builder" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index b95140aadfa4a..0bde1f4de8b56 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -134,7 +134,6 @@ min-trace-logs = ["tracing/release_max_level_trace"] optimism = [ "reth-primitives/optimism", - "reth-interfaces/optimism", "reth-rpc/optimism", "reth-provider/optimism", "reth-beacon-consensus/optimism", diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index dc9e13866e3ef..fae2b37358909 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -50,4 +50,4 @@ assert_matches.workspace = true [features] test-utils = [] -optimism = ["reth-primitives/optimism", "reth-interfaces/optimism", "reth-provider/optimism"] +optimism = ["reth-primitives/optimism", "reth-provider/optimism"] diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 659ef02c175f0..a5cef8e342777 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -67,7 +67,6 @@ assert_matches.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-interfaces/optimism", "reth-provider/optimism", "reth-blockchain-tree/optimism", "reth-ethereum-consensus/optimism", diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 4dd845314b778..6aec8a4137f81 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -14,36 +14,15 @@ workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true reth-network-api.workspace = true -reth-eth-wire-types.workspace = true reth-consensus.workspace = true -reth-network-types.workspace = true reth-storage-errors.workspace = true - -# async -futures.workspace = true -tokio = { workspace = true, features = ["sync"] } +reth-network-p2p.workspace = true # misc auto_impl.workspace = true thiserror.workspace = true -tracing.workspace = true -secp256k1 = { workspace = true, default-features = false, features = [ - "alloc", - "recovery", - "rand", -], optional = true } -parking_lot = { workspace = true, optional = true } -rand = { workspace = true, optional = true } - -[dev-dependencies] -reth-consensus = { workspace = true, features = ["test-utils"] } -parking_lot.workspace = true -rand.workspace = true -tokio = { workspace = true, features = ["full"] } -secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] -test-utils = ["reth-consensus/test-utils", "secp256k1", "rand", "parking_lot"] -clap = ["reth-storage-errors/clap"] -optimism = ["reth-eth-wire-types/optimism"] +test-utils = ["reth-consensus/test-utils", "reth-network-p2p/test-utils"] +clap = ["reth-storage-errors/clap"] \ No newline at end of file diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index a5d38965e2b38..bd92f9d8df013 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -23,7 +23,7 @@ mod error; pub use error::{RethError, RethResult}; /// P2P traits. -pub mod p2p; +pub use reth_network_p2p as p2p; /// Trie error pub mod trie; @@ -34,6 +34,6 @@ pub mod sync; /// BlockchainTree related traits. pub mod blockchain_tree; -#[cfg(any(test, feature = "test-utils"))] /// Common test helpers for mocking out Consensus, Downloaders and Header Clients. -pub mod test_utils; +#[cfg(feature = "test-utils")] +pub use reth_network_p2p::test_utils; diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 417525b7d4a37..10ffddb6a3074 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -1,4 +1,4 @@ -//! Reth network interface definitions. +//! Reth interface definitions and commonly used types for the reth-network crate. //! //! Provides abstractions for the reth-network crate. //! diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml new file mode 100644 index 0000000000000..34705d78e5225 --- /dev/null +++ b/crates/net/p2p/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "reth-network-p2p" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "traits and commonly used types for p2p and network communication" + +[lints] +workspace = true + +[dependencies] +reth-primitives.workspace = true +reth-network-api.workspace = true +reth-eth-wire-types.workspace = true +reth-consensus.workspace = true +reth-network-types.workspace = true +reth-storage-errors.workspace = true + +# async +futures.workspace = true +tokio = { workspace = true, features = ["sync"] } + +# misc +auto_impl.workspace = true +thiserror.workspace = true +tracing.workspace = true + +secp256k1 = { workspace = true, default-features = false, features = [ + "alloc", + "recovery", + "rand", +], optional = true } +parking_lot = { workspace = true, optional = true } +rand = { workspace = true, optional = true } + +[dev-dependencies] +reth-consensus = { workspace = true, features = ["test-utils"] } + +parking_lot.workspace = true +rand.workspace = true +tokio = { workspace = true, features = ["full"] } +secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } + +[features] +test-utils = ["reth-consensus/test-utils", "secp256k1", "rand", "parking_lot"] diff --git a/crates/interfaces/src/p2p/bodies/client.rs b/crates/net/p2p/src/bodies/client.rs similarity index 95% rename from crates/interfaces/src/p2p/bodies/client.rs rename to crates/net/p2p/src/bodies/client.rs index 4b7f3366a2490..3a36da50016e5 100644 --- a/crates/interfaces/src/p2p/bodies/client.rs +++ b/crates/net/p2p/src/bodies/client.rs @@ -3,7 +3,7 @@ use std::{ task::{ready, Context, Poll}, }; -use crate::p2p::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; +use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::{Future, FutureExt}; use reth_primitives::{BlockBody, B256}; diff --git a/crates/interfaces/src/p2p/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs similarity index 82% rename from crates/interfaces/src/p2p/bodies/downloader.rs rename to crates/net/p2p/src/bodies/downloader.rs index 86a7698ae84a6..f7f5e9c92ebc8 100644 --- a/crates/interfaces/src/p2p/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -1,5 +1,5 @@ use super::response::BlockResponse; -use crate::p2p::error::DownloadResult; +use crate::error::DownloadResult; use futures::Stream; use reth_primitives::BlockNumber; use std::ops::RangeInclusive; @@ -10,7 +10,7 @@ pub type BodyDownloaderResult = DownloadResult>; /// A downloader capable of fetching and yielding block bodies from block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block bodies, -/// while a [BodiesClient][crate::p2p::bodies::client::BodiesClient] represents a client capable of +/// while a [BodiesClient][crate::bodies::client::BodiesClient] represents a client capable of /// fulfilling these requests. pub trait BodyDownloader: Send + Sync + Stream + Unpin { /// Method for setting the download range. diff --git a/crates/interfaces/src/p2p/bodies/mod.rs b/crates/net/p2p/src/bodies/mod.rs similarity index 100% rename from crates/interfaces/src/p2p/bodies/mod.rs rename to crates/net/p2p/src/bodies/mod.rs diff --git a/crates/interfaces/src/p2p/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs similarity index 100% rename from crates/interfaces/src/p2p/bodies/response.rs rename to crates/net/p2p/src/bodies/response.rs diff --git a/crates/interfaces/src/p2p/download.rs b/crates/net/p2p/src/download.rs similarity index 100% rename from crates/interfaces/src/p2p/download.rs rename to crates/net/p2p/src/download.rs diff --git a/crates/interfaces/src/p2p/either.rs b/crates/net/p2p/src/either.rs similarity index 99% rename from crates/interfaces/src/p2p/either.rs rename to crates/net/p2p/src/either.rs index ed9d50c736f13..36e95d487a6d8 100644 --- a/crates/interfaces/src/p2p/either.rs +++ b/crates/net/p2p/src/either.rs @@ -1,6 +1,6 @@ //! Support for different download types. -use crate::p2p::{ +use crate::{ bodies::client::BodiesClient, download::DownloadClient, headers::client::{HeadersClient, HeadersRequest}, diff --git a/crates/interfaces/src/p2p/error.rs b/crates/net/p2p/src/error.rs similarity index 99% rename from crates/interfaces/src/p2p/error.rs rename to crates/net/p2p/src/error.rs index 1a847b6494920..3bd469e605649 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/net/p2p/src/error.rs @@ -1,11 +1,11 @@ use super::headers::client::HeadersRequest; -use crate::{db::DatabaseError, provider::ProviderError}; use reth_consensus::ConsensusError; use reth_network_api::ReputationChangeKind; use reth_network_types::WithPeerId; use reth_primitives::{ BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, B256, }; +use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; use std::ops::RangeInclusive; use thiserror::Error; use tokio::sync::{mpsc, oneshot}; diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/net/p2p/src/full_block.rs similarity index 99% rename from crates/interfaces/src/p2p/full_block.rs rename to crates/net/p2p/src/full_block.rs index dd8cfff4d4cf9..997ab74bb8961 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -1,5 +1,5 @@ use super::headers::client::HeadersRequest; -use crate::p2p::{ +use crate::{ bodies::client::{BodiesClient, SingleBodyRequest}, error::PeerRequestResult, headers::client::{HeadersClient, SingleHeaderRequest}, @@ -727,11 +727,10 @@ enum RangeResponseResult { #[cfg(test)] mod tests { - use std::ops::Range; - use super::*; use crate::test_utils::TestFullBlockClient; use futures::StreamExt; + use std::ops::Range; #[tokio::test] async fn download_single_full_block() { diff --git a/crates/interfaces/src/p2p/headers/client.rs b/crates/net/p2p/src/headers/client.rs similarity index 96% rename from crates/interfaces/src/p2p/headers/client.rs rename to crates/net/p2p/src/headers/client.rs index cf5355308695b..5b70aa1e52827 100644 --- a/crates/interfaces/src/p2p/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -1,4 +1,4 @@ -use crate::p2p::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; +use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::{Future, FutureExt}; pub use reth_eth_wire_types::BlockHeaders; use reth_primitives::{BlockHashOrNumber, Header, HeadersDirection}; diff --git a/crates/interfaces/src/p2p/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs similarity index 95% rename from crates/interfaces/src/p2p/headers/downloader.rs rename to crates/net/p2p/src/headers/downloader.rs index 500a1a1bc844b..b52a8487710fa 100644 --- a/crates/interfaces/src/p2p/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -1,12 +1,12 @@ use super::error::HeadersDownloaderResult; -use crate::p2p::error::{DownloadError, DownloadResult}; +use crate::error::{DownloadError, DownloadResult}; use futures::Stream; use reth_consensus::Consensus; use reth_primitives::{BlockHashOrNumber, SealedHeader, B256}; /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, -/// while a [HeadersClient][crate::p2p::headers::client::HeadersClient] represents a client capable +/// while a [HeadersClient][crate::headers::client::HeadersClient] represents a client capable /// of fulfilling these requests. /// /// A [HeaderDownloader] is a [Stream] that returns batches of headers. diff --git a/crates/interfaces/src/p2p/headers/error.rs b/crates/net/p2p/src/headers/error.rs similarity index 100% rename from crates/interfaces/src/p2p/headers/error.rs rename to crates/net/p2p/src/headers/error.rs diff --git a/crates/interfaces/src/p2p/headers/mod.rs b/crates/net/p2p/src/headers/mod.rs similarity index 100% rename from crates/interfaces/src/p2p/headers/mod.rs rename to crates/net/p2p/src/headers/mod.rs diff --git a/crates/interfaces/src/p2p/mod.rs b/crates/net/p2p/src/lib.rs similarity index 53% rename from crates/interfaces/src/p2p/mod.rs rename to crates/net/p2p/src/lib.rs index 75f3a8fc4c5fb..ed20ab849d8c3 100644 --- a/crates/interfaces/src/p2p/mod.rs +++ b/crates/net/p2p/src/lib.rs @@ -1,3 +1,16 @@ +//! Provides abstractions and commonly used types for p2p. +//! +//! ## Feature Flags +//! +//! - `test-utils`: Export utilities for testing +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + /// Shared abstractions for downloader implementations. pub mod download; @@ -15,7 +28,7 @@ pub mod full_block; /// [`HeadersClient`]. /// /// [`Consensus`]: reth_consensus::Consensus -/// [`HeadersClient`]: crate::p2p::headers::client::HeadersClient +/// [`HeadersClient`]: crate::headers::client::HeadersClient pub mod headers; /// Error types broadly used by p2p interfaces for any operation which may produce an error when @@ -24,3 +37,7 @@ pub mod error; /// Priority enum for BlockHeader and BlockBody requests pub mod priority; + +/// Common test helpers for mocking out Consensus, Downloaders and Header Clients. +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; diff --git a/crates/interfaces/src/p2p/priority.rs b/crates/net/p2p/src/priority.rs similarity index 100% rename from crates/interfaces/src/p2p/priority.rs rename to crates/net/p2p/src/priority.rs diff --git a/crates/interfaces/src/test_utils/bodies.rs b/crates/net/p2p/src/test_utils/bodies.rs similarity index 98% rename from crates/interfaces/src/test_utils/bodies.rs rename to crates/net/p2p/src/test_utils/bodies.rs index 8f0bfcef09fec..46bd3ec9b8889 100644 --- a/crates/interfaces/src/test_utils/bodies.rs +++ b/crates/net/p2p/src/test_utils/bodies.rs @@ -1,4 +1,4 @@ -use crate::p2p::{ +use crate::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, error::PeerRequestResult, diff --git a/crates/interfaces/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs similarity index 99% rename from crates/interfaces/src/test_utils/full_block.rs rename to crates/net/p2p/src/test_utils/full_block.rs index 95c1c2b3a0fbc..c0a26539f87ec 100644 --- a/crates/interfaces/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -1,4 +1,4 @@ -use crate::p2p::{ +use crate::{ bodies::client::BodiesClient, download::DownloadClient, error::PeerRequestResult, diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/net/p2p/src/test_utils/generators.rs similarity index 98% rename from crates/interfaces/src/test_utils/generators.rs rename to crates/net/p2p/src/test_utils/generators.rs index 506358276c740..9da1429ea5193 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/net/p2p/src/test_utils/generators.rs @@ -14,9 +14,6 @@ use std::{ ops::{Range, RangeInclusive}, }; -// TODO(onbjerg): Maybe we should split this off to its own crate, or move the helpers to the -// relevant crates? - /// Returns a random number generator that can be seeded using the `SEED` environment variable. /// /// If `SEED` is not set, a random seed is used. @@ -353,6 +350,7 @@ pub fn random_receipt( ) -> Receipt { let success = rng.gen::(); let logs_count = logs_count.unwrap_or_else(|| rng.gen::()); + #[allow(clippy::needless_update)] // side-effect of optimism fields Receipt { tx_type: transaction.tx_type(), success, @@ -362,10 +360,7 @@ pub fn random_receipt( } else { vec![] }, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, + ..Default::default() } } diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs similarity index 99% rename from crates/interfaces/src/test_utils/headers.rs rename to crates/net/p2p/src/test_utils/headers.rs index 0272c68d3048f..354732c2d2f0b 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -1,19 +1,6 @@ //! Testing support for headers related interfaces. -use std::{ - fmt, - pin::Pin, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, - }, - task::{ready, Context, Poll}, -}; - -use futures::{Future, FutureExt, Stream, StreamExt}; -use tokio::sync::Mutex; - -use crate::p2p::{ +use crate::{ download::DownloadClient, error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, headers::{ @@ -23,9 +10,20 @@ use crate::p2p::{ }, priority::Priority, }; +use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_network_types::{PeerId, WithPeerId}; use reth_primitives::{Header, HeadersDirection, SealedHeader}; +use std::{ + fmt, + pin::Pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + task::{ready, Context, Poll}, +}; +use tokio::sync::Mutex; /// A test downloader which just returns the values that have been pushed to it. #[derive(Debug)] diff --git a/crates/interfaces/src/test_utils/mod.rs b/crates/net/p2p/src/test_utils/mod.rs similarity index 100% rename from crates/interfaces/src/test_utils/mod.rs rename to crates/net/p2p/src/test_utils/mod.rs diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index b0ed1fae159cd..c24060943a832 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -104,7 +104,6 @@ assert_matches.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-interfaces/optimism", "reth-rpc/optimism", "reth-rpc-engine-api/optimism", "reth-provider/optimism", diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 5af74476117e8..f9008dc37f363 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -37,7 +37,6 @@ reth-revm = { workspace = true, features = ["test-utils"] } optimism = [ "reth-primitives/optimism", "reth-provider/optimism", - "reth-interfaces/optimism", "revm-primitives/optimism", "reth-optimism-consensus/optimism", ] diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 4fe4ffbb95994..52f41c7f80c2f 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -65,4 +65,4 @@ rand.workspace = true [features] test-utils = ["alloy-rlp", "reth-db/test-utils", "reth-nippy-jar/test-utils"] -optimism = ["reth-primitives/optimism", "reth-interfaces/optimism"] +optimism = ["reth-primitives/optimism"] From 05fddd3454f595224762962e250dfb7d52deecc6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 24 May 2024 11:41:39 +0200 Subject: [PATCH 612/700] chore: replace interfaces dep with storage-errors (#8384) --- Cargo.lock | 1 + crates/storage/db/Cargo.toml | 2 +- crates/storage/db/src/implementation/mdbx/cursor.rs | 2 +- crates/storage/db/src/implementation/mdbx/mod.rs | 4 ++-- crates/storage/db/src/implementation/mdbx/tx.rs | 4 ++-- crates/storage/db/src/lib.rs | 2 +- crates/storage/db/src/static_file/cursor.rs | 2 +- crates/storage/db/src/static_file/generation.rs | 2 +- 8 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 385bc912f4f96..82542633fce76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6646,6 +6646,7 @@ dependencies = [ "reth-metrics", "reth-nippy-jar", "reth-primitives", + "reth-storage-errors", "reth-tracing", "rustc-hash", "serde", diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index a764f270d618e..34de306f69534 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-fs-util.workspace = true -reth-interfaces.workspace = true +reth-storage-errors.workspace = true reth-codecs.workspace = true reth-libmdbx = { workspace = true, optional = true, features = [ "return-borrowed", diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 3d1a8815291ba..a34525f20e0e4 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -11,8 +11,8 @@ use crate::{ tables::utils::*, DatabaseError, }; -use reth_interfaces::db::{DatabaseErrorInfo, DatabaseWriteError, DatabaseWriteOperation}; use reth_libmdbx::{Error as MDBXError, TransactionKind, WriteFlags, RO, RW}; +use reth_storage_errors::db::{DatabaseErrorInfo, DatabaseWriteError, DatabaseWriteOperation}; use std::{borrow::Cow, collections::Bound, marker::PhantomData, ops::RangeBounds, sync::Arc}; /// Read only Cursor. diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 1db86bc54f423..58977811f2387 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -13,11 +13,11 @@ use crate::{ }; use eyre::Context; use metrics::{gauge, Label}; -use reth_interfaces::db::LogLevel; use reth_libmdbx::{ DatabaseFlags, Environment, EnvironmentFlags, Geometry, MaxReadTransactionDuration, Mode, PageSize, SyncMode, RO, RW, }; +use reth_storage_errors::db::LogLevel; use reth_tracing::tracing::error; use std::{ ops::Deref, @@ -455,9 +455,9 @@ mod tests { test_utils::*, AccountChangeSets, }; - use reth_interfaces::db::{DatabaseWriteError, DatabaseWriteOperation}; use reth_libmdbx::Error; use reth_primitives::{Account, Address, Header, IntegerList, StorageEntry, B256, U256}; + use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; use tempfile::TempDir; diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 62380861980b6..184ca4d1cc38a 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -9,8 +9,8 @@ use crate::{ DatabaseError, }; use once_cell::sync::OnceCell; -use reth_interfaces::db::{DatabaseWriteError, DatabaseWriteOperation}; use reth_libmdbx::{ffi::DBI, CommitLatency, Transaction, TransactionKind, WriteFlags, RW}; +use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use reth_tracing::tracing::{debug, trace, warn}; use std::{ backtrace::Backtrace, @@ -395,8 +395,8 @@ mod tests { database::Database, mdbx::DatabaseArguments, models::client_version::ClientVersion, tables, transaction::DbTx, DatabaseEnv, DatabaseEnvKind, }; - use reth_interfaces::db::DatabaseError; use reth_libmdbx::MaxReadTransactionDuration; + use reth_storage_errors::db::DatabaseError; use std::{sync::atomic::Ordering, thread::sleep, time::Duration}; use tempfile::tempdir; diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index 6b6a22319f84f..102374c3bc344 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -81,7 +81,7 @@ pub mod mdbx { } pub use abstraction::*; -pub use reth_interfaces::db::{DatabaseError, DatabaseWriteOperation}; +pub use reth_storage_errors::db::{DatabaseError, DatabaseWriteOperation}; pub use tables::*; pub use utils::is_database_empty; diff --git a/crates/storage/db/src/static_file/cursor.rs b/crates/storage/db/src/static_file/cursor.rs index 89337b56e12ef..ac08430d4541e 100644 --- a/crates/storage/db/src/static_file/cursor.rs +++ b/crates/storage/db/src/static_file/cursor.rs @@ -1,9 +1,9 @@ use super::mask::{ColumnSelectorOne, ColumnSelectorThree, ColumnSelectorTwo}; use crate::table::Decompress; use derive_more::{Deref, DerefMut}; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::{DataReader, NippyJar, NippyJarCursor}; use reth_primitives::{static_file::SegmentHeader, B256}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::sync::Arc; /// Cursor of a static file segment. diff --git a/crates/storage/db/src/static_file/generation.rs b/crates/storage/db/src/static_file/generation.rs index 50db32adb2aeb..b663f14626844 100644 --- a/crates/storage/db/src/static_file/generation.rs +++ b/crates/storage/db/src/static_file/generation.rs @@ -5,8 +5,8 @@ use crate::{ RawKey, RawTable, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::{ColumnResult, NippyJar, NippyJarHeader, PHFKey}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_tracing::tracing::*; use std::{error::Error as StdError, ops::RangeInclusive}; From 76d7f4e1b9ecd10cca4cf1ceaa59ef105eaab07f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 24 May 2024 11:41:48 +0200 Subject: [PATCH 613/700] chore: import codecs directly (#8385) --- Cargo.lock | 2 +- Cargo.toml | 1 + crates/net/eth-wire-types/Cargo.toml | 2 +- crates/net/eth-wire-types/src/blocks.rs | 2 +- crates/net/eth-wire-types/src/broadcast.rs | 2 +- crates/net/eth-wire-types/src/receipts.rs | 2 +- crates/net/eth-wire-types/src/state.rs | 2 +- crates/net/eth-wire-types/src/status.rs | 2 +- crates/net/eth-wire-types/src/transactions.rs | 2 +- 9 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82542633fce76..bde7478c0eca7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6882,7 +6882,7 @@ dependencies = [ "proptest", "proptest-derive", "rand 0.8.5", - "reth-codecs", + "reth-codecs-derive", "reth-net-common", "reth-primitives", "reth-tracing", diff --git a/Cargo.toml b/Cargo.toml index a3ac684920a01..2fd78aeebd82b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -216,6 +216,7 @@ reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } +reth-codecs-derive = { path = "crates/storage/codecs/derive" } reth-config = { path = "crates/config" } reth-consensus = { path = "crates/consensus/consensus" } reth-consensus-common = { path = "crates/consensus/common" } diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index d68fbbd1f0c46..713744d5554c8 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-codecs.workspace = true +reth-codecs-derive.workspace = true reth-primitives.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 36b8e6e8ca9eb..d8c13062d2693 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -2,7 +2,7 @@ //! types. use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs::{add_arbitrary_tests, derive_arbitrary}; +use reth_codecs_derive::{add_arbitrary_tests, derive_arbitrary}; use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, B256}; #[cfg(any(test, feature = "arbitrary"))] diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 693625239220a..b648f5a22d609 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -6,7 +6,7 @@ use alloy_rlp::{ }; use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ Block, Bytes, PooledTransactionsElement, TransactionSigned, TxHash, B256, U128, }; diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index 87a0e10deac54..ea2c21dd090ee 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -1,7 +1,7 @@ //! Implements the `GetReceipts` and `Receipts` message types. use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ReceiptWithBloom, B256}; #[cfg(feature = "serde")] diff --git a/crates/net/eth-wire-types/src/state.rs b/crates/net/eth-wire-types/src/state.rs index 334184b3b72b2..5f3dc833950f8 100644 --- a/crates/net/eth-wire-types/src/state.rs +++ b/crates/net/eth-wire-types/src/state.rs @@ -1,7 +1,7 @@ //! Implements the `GetNodeData` and `NodeData` message types. use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{Bytes, B256}; #[cfg(feature = "serde")] diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index 6dd3f7eb42a05..fc6f7fd2c7d7b 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -1,6 +1,6 @@ use crate::EthVersion; use alloy_rlp::{RlpDecodable, RlpEncodable}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ hex, Chain, ChainSpec, ForkId, Genesis, Hardfork, Head, NamedChain, B256, MAINNET, U256, }; diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index f19bbdcc74431..2a7313ad1f31c 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -2,7 +2,7 @@ use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{Constructor, Deref, IntoIterator}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ transaction::TransactionConversionError, PooledTransactionsElement, TransactionSigned, B256, }; From 6df2b1c1e9dfcbc36500a38579543b571a2363be Mon Sep 17 00:00:00 2001 From: bsh98 <31482749+bsh98@users.noreply.github.com> Date: Fri, 24 May 2024 02:54:32 -0700 Subject: [PATCH 614/700] fix(book): rethdb usage with op-node (#8375) --- book/run/optimism.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/book/run/optimism.md b/book/run/optimism.md index 8a5392d631e47..004cc8abb75ee 100644 --- a/book/run/optimism.md +++ b/book/run/optimism.md @@ -92,7 +92,7 @@ op-node \ --l1.trustrpc ``` -If you opted to build the `op-node` with the `rethdb` build tag, this "`RPCKind`" can be enabled via appending two extra flags to the `op-node` invocation: +If you opted to build the `op-node` with the `rethdb` build tag, this feature can be enabled by appending one extra flag to the `op-node` invocation: > Note, the `reth_db_path` is the path to the `db` folder inside of the reth datadir, not the `mdbx.dat` file itself. This can be fetched from `op-reth db path [--chain ]`, or if you are using a custom datadir location via the `--datadir` flag, > by appending `/db` to the end of the path. @@ -100,7 +100,6 @@ If you opted to build the `op-node` with the `rethdb` build tag, this "`RPCKind` ```sh op-node \ # ... - --l1.rpckind=reth_db \ --l1.rethdb= ``` From 9f61d1856f3c8ae69066802e4c22880e9ba56c74 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 24 May 2024 12:11:08 +0200 Subject: [PATCH 615/700] chore: extract block execution errors (#8386) --- Cargo.lock | 15 +++++++++++++-- Cargo.toml | 2 ++ crates/ethereum/evm/Cargo.toml | 1 - crates/ethereum/evm/src/execute.rs | 8 ++------ crates/evm/Cargo.toml | 4 +++- crates/evm/execution-errors/Cargo.toml | 18 ++++++++++++++++++ .../execution-errors/src/lib.rs} | 15 ++++++++++++++- .../execution-errors/src/trie.rs} | 6 ++++-- crates/evm/src/either.rs | 3 ++- crates/evm/src/execute.rs | 4 +++- crates/evm/src/test_utils.rs | 3 ++- crates/interfaces/Cargo.toml | 7 ++++--- crates/interfaces/src/blockchain_tree/error.rs | 8 +++----- crates/interfaces/src/error.rs | 9 +++------ crates/interfaces/src/lib.rs | 4 ++-- crates/optimism/evm/src/error.rs | 2 +- crates/optimism/evm/src/execute.rs | 8 ++------ 17 files changed, 78 insertions(+), 39 deletions(-) create mode 100644 crates/evm/execution-errors/Cargo.toml rename crates/{interfaces/src/executor.rs => evm/execution-errors/src/lib.rs} (90%) rename crates/{interfaces/src/trie/mod.rs => evm/execution-errors/src/trie.rs} (86%) diff --git a/Cargo.lock b/Cargo.lock index bde7478c0eca7..a32af6233007e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6962,8 +6962,9 @@ version = "0.2.0-beta.7" dependencies = [ "futures-util", "parking_lot 0.12.2", - "reth-interfaces", + "reth-execution-errors", "reth-primitives", + "reth-storage-errors", "revm", "revm-primitives", ] @@ -6975,7 +6976,6 @@ dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "reth-ethereum-consensus", "reth-evm", - "reth-interfaces", "reth-primitives", "reth-revm", "revm-primitives", @@ -6998,6 +6998,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-execution-errors" +version = "0.2.0-beta.7" +dependencies = [ + "reth-consensus", + "reth-primitives", + "reth-storage-errors", + "thiserror", +] + [[package]] name = "reth-exex" version = "0.2.0-beta.7" @@ -7032,6 +7042,7 @@ version = "0.2.0-beta.7" dependencies = [ "auto_impl", "reth-consensus", + "reth-execution-errors", "reth-fs-util", "reth-network-api", "reth-network-p2p", diff --git a/Cargo.toml b/Cargo.toml index 2fd78aeebd82b..b1ab9e1eb9573 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ members = [ "crates/ethereum/node", "crates/etl/", "crates/evm/", + "crates/evm/execution-errors", "crates/exex/", "crates/interfaces/", "crates/metrics/", @@ -238,6 +239,7 @@ reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } reth-evm-optimism = { path = "crates/optimism/evm" } +reth-execution-errors = { path = "crates/evm/execution-errors" } reth-exex = { path = "crates/exex" } reth-fs-util = { path = "crates/fs-util" } reth-interfaces = { path = "crates/interfaces" } diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index c4811b59f481b..88e5967e5d8d4 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -15,7 +15,6 @@ workspace = true reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true -reth-interfaces.workspace = true reth-ethereum-consensus.workspace = true # Ethereum diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 5addc45ac7d1b..9998927061bcb 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -7,15 +7,11 @@ use crate::{ use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ - BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, Executor, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionError, BlockExecutionInput, + BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, ConfigureEvm, }; -use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError}, - provider::ProviderError, -}; use reth_primitives::{ BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Withdrawals, MAINNET, U256, diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 854dcd95a20f6..183d9f694c553 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -12,10 +12,12 @@ workspace = true [dependencies] # reth +reth-execution-errors.workspace = true reth-primitives.workspace = true revm-primitives.workspace = true +reth-storage-errors.workspace = true + revm.workspace = true -reth-interfaces.workspace = true futures-util.workspace = true parking_lot = { workspace = true, optional = true } diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml new file mode 100644 index 0000000000000..c04b2b1224fd6 --- /dev/null +++ b/crates/evm/execution-errors/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "reth-execution-errors" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-consensus.workspace = true +reth-primitives.workspace = true +reth-storage-errors.workspace = true + +thiserror.workspace = true diff --git a/crates/interfaces/src/executor.rs b/crates/evm/execution-errors/src/lib.rs similarity index 90% rename from crates/interfaces/src/executor.rs rename to crates/evm/execution-errors/src/lib.rs index 0620d032a9ba0..5013e5387595e 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -1,8 +1,21 @@ -use crate::{provider::ProviderError, trie::StateRootError}; +//! Commonly used error types used when doing block execution. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + use reth_consensus::ConsensusError; use reth_primitives::{revm_primitives::EVMError, BlockNumHash, PruneSegmentError, B256}; +use reth_storage_errors::provider::ProviderError; use thiserror::Error; +pub mod trie; +pub use trie::{StateRootError, StorageRootError}; + /// Transaction validation errors #[derive(Error, Debug, Clone, PartialEq, Eq)] pub enum BlockValidationError { diff --git a/crates/interfaces/src/trie/mod.rs b/crates/evm/execution-errors/src/trie.rs similarity index 86% rename from crates/interfaces/src/trie/mod.rs rename to crates/evm/execution-errors/src/trie.rs index d2dba7c272a7e..146f72f481427 100644 --- a/crates/interfaces/src/trie/mod.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -1,7 +1,9 @@ -use crate::db::DatabaseError; +//! Errors when computing the state root. + +use reth_storage_errors::db::DatabaseError; use thiserror::Error; -/// State root error. +/// State root errors. #[derive(Error, Debug, PartialEq, Eq, Clone)] pub enum StateRootError { /// Internal database error. diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index ae1c95461be8c..7d8320c315c0a 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -4,8 +4,9 @@ use crate::execute::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; // re-export Either diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 69351226868ec..6fdd6ebfd0cf6 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,10 +1,12 @@ //! Traits for execution. -use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, Receipts, U256}; use revm::db::BundleState; use revm_primitives::db::Database; +pub use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +pub use reth_storage_errors::provider::ProviderError; + /// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). /// diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index 8d5b526827407..910f9d08b2e82 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -5,8 +5,9 @@ use crate::execute::{ BlockExecutorProvider, Executor, }; use parking_lot::Mutex; -use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; use std::sync::Arc; diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 6aec8a4137f81..67891f5c71666 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -11,12 +11,13 @@ repository.workspace = true workspace = true [dependencies] -reth-primitives.workspace = true +reth-consensus.workspace = true +reth-execution-errors.workspace = true reth-fs-util.workspace = true reth-network-api.workspace = true -reth-consensus.workspace = true -reth-storage-errors.workspace = true reth-network-p2p.workspace = true +reth-primitives.workspace = true +reth-storage-errors.workspace = true # misc auto_impl.workspace = true diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index 379b9f141a708..122b857437e25 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -1,12 +1,10 @@ //! Error handling for the blockchain tree -use crate::{ - executor::{BlockExecutionError, BlockValidationError}, - provider::ProviderError, - RethError, -}; +use crate::RethError; use reth_consensus::ConsensusError; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_primitives::{BlockHash, BlockNumber, SealedBlock}; +use reth_storage_errors::provider::ProviderError; /// Various error cases that can occur when a block violates tree assumptions. #[derive(Debug, Clone, Copy, thiserror::Error, Eq, PartialEq)] diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index ec3da8ad01b76..f38742ab51fbb 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -1,12 +1,9 @@ -use crate::{ - blockchain_tree::error::{BlockchainTreeError, CanonicalError}, - db::DatabaseError, - executor::BlockExecutionError, - provider::ProviderError, -}; +use crate::blockchain_tree::error::{BlockchainTreeError, CanonicalError}; use reth_consensus::ConsensusError; +use reth_execution_errors::BlockExecutionError; use reth_fs_util::FsPathError; use reth_network_api::NetworkError; +use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; /// Result alias for [`RethError`]. pub type RethResult = Result; diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index bd92f9d8df013..dd96e2358085a 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -16,7 +16,7 @@ pub use reth_storage_errors::{db, provider}; /// Block Execution traits. -pub mod executor; +pub use reth_execution_errors as executor; /// Possible errors when interacting with the chain. mod error; @@ -26,7 +26,7 @@ pub use error::{RethError, RethResult}; pub use reth_network_p2p as p2p; /// Trie error -pub mod trie; +pub use reth_execution_errors::trie; /// Syncing related traits. pub mod sync; diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index de923d44ca4b8..1041f30c8112d 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -1,6 +1,6 @@ //! Error types for the Optimism EVM module. -use reth_interfaces::executor::BlockExecutionError; +use reth_evm::execute::BlockExecutionError; /// Optimism Block Executor Errors #[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 44bef823dc3fb..7df033dc55fe9 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -3,15 +3,11 @@ use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; use reth_evm::{ execute::{ - BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, Executor, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionError, BlockExecutionInput, + BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, ConfigureEvm, }; -use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError}, - provider::ProviderError, -}; use reth_optimism_consensus::validate_block_post_execution; use reth_primitives::{ BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Receipts, From e0a93193f0acf8c3da7261b72f3c597a049cb7ad Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 24 May 2024 13:17:12 +0200 Subject: [PATCH 616/700] chore(deps): rm reth-interfaces dep from reth-revm (#8387) --- Cargo.lock | 3 ++- crates/revm/Cargo.toml | 3 ++- crates/revm/src/batch.rs | 2 +- crates/revm/src/database.rs | 3 ++- crates/revm/src/state_change.rs | 2 +- crates/revm/src/test_utils.rs | 2 +- 6 files changed, 9 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a32af6233007e..7acc4841539a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7662,9 +7662,10 @@ version = "0.2.0-beta.7" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "reth-consensus-common", - "reth-interfaces", + "reth-execution-errors", "reth-primitives", "reth-provider", + "reth-storage-errors", "reth-trie", "revm", "tracing", diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 87d30ca6f2959..fe93edd506f4b 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -14,8 +14,9 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-interfaces.workspace = true reth-provider.workspace = true +reth-storage-errors.workspace = true +reth-execution-errors.workspace = true reth-consensus-common.workspace = true reth-trie = { workspace = true, optional = true } diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 544a74a5c0968..77f747cd9a85c 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,7 +1,7 @@ //! Helper for handling execution of multiple blocks. use crate::{precompile::Address, primitives::alloy_primitives::BlockNumber}; -use reth_interfaces::executor::BlockExecutionError; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{ PruneMode, PruneModes, PruneSegmentError, Receipt, Receipts, MINIMUM_PRUNING_DISTANCE, }; diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 93a22a06834d5..b7cf362fee11d 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,5 +1,6 @@ use reth_primitives::{Address, B256, KECCAK_EMPTY, U256}; -use reth_provider::{ProviderError, StateProvider}; +use reth_provider::StateProvider; +use reth_storage_errors::provider::ProviderError; use revm::{ db::DatabaseRef, primitives::{AccountInfo, Bytecode}, diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index 2799734254730..00f135490e2b1 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -1,5 +1,5 @@ use reth_consensus_common::calc; -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ revm::env::fill_tx_env_with_beacon_root_contract_call, Address, ChainSpec, Header, Withdrawal, B256, U256, diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 8c4d1894c5d06..d2045c45932b9 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,9 +1,9 @@ -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, B256, U256, }; use reth_provider::{AccountReader, BlockHashReader, StateProvider, StateRootProvider}; +use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; use std::collections::HashMap; From 91f288d485f522fd7ef7f15768521a0ed9910c97 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 24 May 2024 13:55:12 +0200 Subject: [PATCH 617/700] chore: remove wire-types optimism feature (#8383) --- crates/net/eth-wire-types/Cargo.toml | 1 - crates/net/eth-wire-types/src/lib.rs | 1 + crates/net/eth-wire-types/src/receipts.rs | 21 +++------------------ 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 713744d5554c8..9954dba10fc00 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -55,4 +55,3 @@ arbitrary = [ "dep:proptest", "dep:proptest-derive", ] -optimism = ["reth-primitives/optimism"] diff --git a/crates/net/eth-wire-types/src/lib.rs b/crates/net/eth-wire-types/src/lib.rs index 18c1bd59fddec..a60fa4c8c1e96 100644 --- a/crates/net/eth-wire-types/src/lib.rs +++ b/crates/net/eth-wire-types/src/lib.rs @@ -8,6 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] // TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged #![allow(unknown_lints, non_local_definitions)] +#![allow(clippy::needless_lifetimes)] // side effect of optimism fields #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod status; diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index ea2c21dd090ee..3d653b594befb 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -41,16 +41,7 @@ mod tests { #[test] fn roundtrip_eip1559() { let receipts = Receipts(vec![vec![ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Eip1559, - success: false, - cumulative_gas_used: 0, - logs: vec![], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, + receipt: Receipt { tx_type: TxType::Eip1559, ..Default::default() }, bloom: Default::default(), }]]); @@ -119,10 +110,7 @@ mod tests { ), ], success: false, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, + ..Default::default() }, bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, @@ -158,10 +146,7 @@ mod tests { ), ], success: false, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, + ..Default::default() }, bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, From 4ee75d57ad1a1869418066a237dbebdd9a9749c7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 24 May 2024 14:14:23 +0200 Subject: [PATCH 618/700] chore: move sync to p2p crate (#8389) --- Cargo.lock | 1 - crates/interfaces/Cargo.toml | 2 -- crates/interfaces/src/lib.rs | 2 +- crates/net/p2p/src/lib.rs | 3 +++ crates/{interfaces => net/p2p}/src/sync.rs | 0 5 files changed, 4 insertions(+), 4 deletions(-) rename crates/{interfaces => net/p2p}/src/sync.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 7acc4841539a1..773067f9d4f6d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7040,7 +7040,6 @@ dependencies = [ name = "reth-interfaces" version = "0.2.0-beta.7" dependencies = [ - "auto_impl", "reth-consensus", "reth-execution-errors", "reth-fs-util", diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 67891f5c71666..1d7483691bf48 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -20,10 +20,8 @@ reth-primitives.workspace = true reth-storage-errors.workspace = true # misc -auto_impl.workspace = true thiserror.workspace = true - [features] test-utils = ["reth-consensus/test-utils", "reth-network-p2p/test-utils"] clap = ["reth-storage-errors/clap"] \ No newline at end of file diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index dd96e2358085a..461413a1e2f1c 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -29,7 +29,7 @@ pub use reth_network_p2p as p2p; pub use reth_execution_errors::trie; /// Syncing related traits. -pub mod sync; +pub use reth_network_p2p::sync; /// BlockchainTree related traits. pub mod blockchain_tree; diff --git a/crates/net/p2p/src/lib.rs b/crates/net/p2p/src/lib.rs index ed20ab849d8c3..310afc7998138 100644 --- a/crates/net/p2p/src/lib.rs +++ b/crates/net/p2p/src/lib.rs @@ -38,6 +38,9 @@ pub mod error; /// Priority enum for BlockHeader and BlockBody requests pub mod priority; +/// Syncing related traits. +pub mod sync; + /// Common test helpers for mocking out Consensus, Downloaders and Header Clients. #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/interfaces/src/sync.rs b/crates/net/p2p/src/sync.rs similarity index 100% rename from crates/interfaces/src/sync.rs rename to crates/net/p2p/src/sync.rs From 789260416d6209ccd02ebd6d08dedf50f873773f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 24 May 2024 16:41:01 +0200 Subject: [PATCH 619/700] chore: simplify tree result types (#8390) --- Cargo.lock | 2 ++ crates/blockchain-tree/Cargo.toml | 2 ++ crates/blockchain-tree/src/blockchain_tree.rs | 27 +++++++++---------- crates/blockchain-tree/src/chain.rs | 12 ++++----- crates/blockchain-tree/src/externals.rs | 4 +-- crates/blockchain-tree/src/shareable.rs | 6 ++--- 6 files changed, 26 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 773067f9d4f6d..4689de9884be1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6534,6 +6534,7 @@ dependencies = [ "reth-db", "reth-evm", "reth-evm-ethereum", + "reth-execution-errors", "reth-interfaces", "reth-metrics", "reth-network", @@ -6541,6 +6542,7 @@ dependencies = [ "reth-provider", "reth-revm", "reth-stages-api", + "reth-storage-errors", "reth-trie", "reth-trie-parallel", "tokio", diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index fae2b37358909..1b8a53394b636 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -14,6 +14,8 @@ workspace = true # reth reth-primitives.workspace = true reth-interfaces.workspace = true +reth-storage-errors.workspace = true +reth-execution-errors.workspace = true reth-db.workspace = true reth-evm.workspace = true reth-revm.workspace = true diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index e6694447bbca9..c031a5749bfd6 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -8,14 +8,10 @@ use crate::{ use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_evm::execute::BlockExecutorProvider; -use reth_interfaces::{ - blockchain_tree::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, - }, - executor::{BlockExecutionError, BlockValidationError}, - provider::RootMismatch, - RethResult, +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_interfaces::blockchain_tree::{ + error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, + BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, }; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, PruneModes, Receipt, @@ -29,6 +25,7 @@ use reth_provider::{ StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; +use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use std::{ collections::{btree_map::Entry, BTreeMap, HashSet}, sync::Arc, @@ -120,7 +117,7 @@ where externals: TreeExternals, config: BlockchainTreeConfig, prune_modes: Option, - ) -> RethResult { + ) -> ProviderResult { let max_reorg_depth = config.max_reorg_depth() as usize; // The size of the broadcast is twice the maximum reorg depth, because at maximum reorg // depth at least N blocks must be sent at once. @@ -843,7 +840,7 @@ where pub fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &mut self, last_finalized_block: BlockNumber, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.finalize_block(last_finalized_block); let last_canonical_hashes = self.update_block_hashes()?; @@ -855,7 +852,7 @@ where /// Update all block hashes. iterate over present and new list of canonical hashes and compare /// them. Remove all mismatches, disconnect them and removes all chains. - pub fn update_block_hashes(&mut self) -> RethResult> { + pub fn update_block_hashes(&mut self) -> ProviderResult> { let last_canonical_hashes = self .externals .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; @@ -878,7 +875,7 @@ where /// blocks before the tip. pub fn update_block_hashes_and_clear_buffered( &mut self, - ) -> RethResult> { + ) -> ProviderResult> { let chain = self.update_block_hashes()?; if let Some((block, _)) = chain.last_key_value() { @@ -893,7 +890,7 @@ where /// /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the /// `BLOCKHASH` opcode in the EVM. - pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> RethResult<()> { + pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> ProviderResult<()> { let last_canonical_hashes = self .externals .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; @@ -905,7 +902,7 @@ where fn connect_buffered_blocks_to_hashes( &mut self, hashes: impl IntoIterator>, - ) -> RethResult<()> { + ) -> ProviderResult<()> { // check unconnected block buffer for children of the canonical hashes for added_block in hashes.into_iter() { self.try_connect_buffered_blocks(added_block.into()) @@ -1264,7 +1261,7 @@ where } /// Unwind tables and put it inside state - pub fn unwind(&mut self, unwind_to: BlockNumber) -> RethResult<()> { + pub fn unwind(&mut self, unwind_to: BlockNumber) -> Result<(), CanonicalError> { // nothing to be done if unwind_to is higher then the tip if self.block_indices().canonical_tip().number <= unwind_to { return Ok(()) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 9b3c52cf82cb8..e73b1757666ea 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -8,12 +8,10 @@ use crate::BundleStateDataRef; use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; -use reth_interfaces::{ - blockchain_tree::{ - error::{BlockchainTreeError, InsertBlockErrorKind}, - BlockAttachment, BlockValidationKind, - }, - RethResult, +use reth_execution_errors::BlockExecutionError; +use reth_interfaces::blockchain_tree::{ + error::{BlockchainTreeError, InsertBlockErrorKind}, + BlockAttachment, BlockValidationKind, }; use reth_primitives::{ BlockHash, BlockNumber, ForkBlock, GotExpected, Receipts, SealedBlockWithSenders, SealedHeader, @@ -176,7 +174,7 @@ impl AppendableChain { externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, - ) -> RethResult<(BundleStateWithReceipts, Option)> + ) -> Result<(BundleStateWithReceipts, Option), BlockExecutionError> where BSDP: FullBundleStateDataProvider, DB: Database + Clone, diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index a311281c94253..439b9d4a9b0b9 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -4,9 +4,9 @@ use reth_consensus::Consensus; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; -use reth_interfaces::RethResult; use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; use reth_provider::{ProviderFactory, StaticFileProviderFactory, StatsReader}; +use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; /// A container for external components. @@ -46,7 +46,7 @@ impl TreeExternals { pub(crate) fn fetch_latest_canonical_hashes( &self, num_hashes: usize, - ) -> RethResult> { + ) -> ProviderResult> { // Fetch the latest canonical hashes from the database let mut hashes = self .provider_factory diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 66f76b0916f1b..624dfd0e3afba 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -80,7 +80,7 @@ where let res = tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block); tree.update_chains_metrics(); - res + Ok(res?) } fn update_block_hashes_and_clear_buffered( @@ -89,7 +89,7 @@ where let mut tree = self.tree.write(); let res = tree.update_block_hashes_and_clear_buffered(); tree.update_chains_metrics(); - res + Ok(res?) } fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()> { @@ -97,7 +97,7 @@ where let mut tree = self.tree.write(); let res = tree.connect_buffered_blocks_to_canonical_hashes(); tree.update_chains_metrics(); - res + Ok(res?) } fn make_canonical(&self, block_hash: BlockHash) -> Result { From b06433c9e2507b29b47484113469d222a806525c Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 24 May 2024 18:29:21 +0200 Subject: [PATCH 620/700] chore(trie): account specific hashed storage cursor (#8377) --- crates/trie/src/hashed_cursor/default.rs | 45 ++++++++--- crates/trie/src/hashed_cursor/mod.rs | 14 ++-- crates/trie/src/hashed_cursor/post_state.rs | 88 +++++++++------------ crates/trie/src/node_iter.rs | 8 +- crates/trie/src/proof.rs | 8 +- crates/trie/src/trie.rs | 8 +- 6 files changed, 87 insertions(+), 84 deletions(-) diff --git a/crates/trie/src/hashed_cursor/default.rs b/crates/trie/src/hashed_cursor/default.rs index 298c5ce2e7565..1e5068870d7d6 100644 --- a/crates/trie/src/hashed_cursor/default.rs +++ b/crates/trie/src/hashed_cursor/default.rs @@ -8,14 +8,21 @@ use reth_primitives::{Account, StorageEntry, B256}; impl<'a, TX: DbTx> HashedCursorFactory for &'a TX { type AccountCursor = ::Cursor; - type StorageCursor = ::DupCursor; + type StorageCursor = + DatabaseHashedStorageCursor<::DupCursor>; fn hashed_account_cursor(&self) -> Result { self.cursor_read::() } - fn hashed_storage_cursor(&self) -> Result { - self.cursor_dup_read::() + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result { + Ok(DatabaseHashedStorageCursor::new( + self.cursor_dup_read::()?, + hashed_address, + )) } } @@ -32,23 +39,35 @@ where } } -impl HashedStorageCursor for C +/// The structure wrapping a database cursor for hashed storage and +/// a target hashed address. Implements [HashedStorageCursor] for iterating +/// hashed state +#[derive(Debug)] +pub struct DatabaseHashedStorageCursor { + cursor: C, + hashed_address: B256, +} + +impl DatabaseHashedStorageCursor { + /// Create new [DatabaseHashedStorageCursor]. + pub fn new(cursor: C, hashed_address: B256) -> Self { + Self { cursor, hashed_address } + } +} + +impl HashedStorageCursor for DatabaseHashedStorageCursor where C: DbCursorRO + DbDupCursorRO, { - fn is_storage_empty(&mut self, key: B256) -> Result { - Ok(self.seek_exact(key)?.is_none()) + fn is_storage_empty(&mut self) -> Result { + Ok(self.cursor.seek_exact(self.hashed_address)?.is_none()) } - fn seek( - &mut self, - key: B256, - subkey: B256, - ) -> Result, reth_db::DatabaseError> { - self.seek_by_key_subkey(key, subkey) + fn seek(&mut self, subkey: B256) -> Result, reth_db::DatabaseError> { + self.cursor.seek_by_key_subkey(self.hashed_address, subkey) } fn next(&mut self) -> Result, reth_db::DatabaseError> { - self.next_dup_val() + self.cursor.next_dup_val() } } diff --git a/crates/trie/src/hashed_cursor/mod.rs b/crates/trie/src/hashed_cursor/mod.rs index 72caee26aaa29..916dd6f424174 100644 --- a/crates/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/src/hashed_cursor/mod.rs @@ -2,6 +2,7 @@ use reth_primitives::{Account, StorageEntry, B256}; /// Default implementation of the hashed state cursor traits. mod default; +pub use default::DatabaseHashedStorageCursor; /// Implementation of hashed state cursor traits for the post state. mod post_state; @@ -18,7 +19,10 @@ pub trait HashedCursorFactory { fn hashed_account_cursor(&self) -> Result; /// Returns a cursor for iterating over all hashed storage entries in the state. - fn hashed_storage_cursor(&self) -> Result; + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result; } /// The cursor for iterating over hashed accounts. @@ -33,14 +37,10 @@ pub trait HashedAccountCursor { /// The cursor for iterating over hashed storage entries. pub trait HashedStorageCursor { /// Returns `true` if there are no entries for a given key. - fn is_storage_empty(&mut self, key: B256) -> Result; + fn is_storage_empty(&mut self) -> Result; /// Seek an entry greater or equal to the given key/subkey and position the cursor there. - fn seek( - &mut self, - key: B256, - subkey: B256, - ) -> Result, reth_db::DatabaseError>; + fn seek(&mut self, subkey: B256) -> Result, reth_db::DatabaseError>; /// Move the cursor to the next entry and return it. fn next(&mut self) -> Result, reth_db::DatabaseError>; diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index be623da741d36..379b08c2cb6b0 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -25,9 +25,12 @@ impl<'a, CF: HashedCursorFactory> HashedCursorFactory for HashedPostStateCursorF Ok(HashedPostStateAccountCursor::new(cursor, self.post_state)) } - fn hashed_storage_cursor(&self) -> Result { - let cursor = self.cursor_factory.hashed_storage_cursor()?; - Ok(HashedPostStateStorageCursor::new(cursor, self.post_state)) + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result { + let cursor = self.cursor_factory.hashed_storage_cursor(hashed_address)?; + Ok(HashedPostStateStorageCursor::new(cursor, self.post_state, hashed_address)) } } @@ -179,10 +182,10 @@ pub struct HashedPostStateStorageCursor<'b, C> { cursor: C, /// The reference to the post state. post_state: &'b HashedPostStateSorted, + /// The current hashed account key. + hashed_address: B256, /// The post state index where the cursor is currently at. post_state_storage_index: usize, - /// The current hashed account key. - account: Option, /// The last slot that has been returned by the cursor. /// De facto, this is the cursor's position for the given account key. last_slot: Option, @@ -190,14 +193,14 @@ pub struct HashedPostStateStorageCursor<'b, C> { impl<'b, C> HashedPostStateStorageCursor<'b, C> { /// Create new instance of [HashedPostStateStorageCursor]. - pub fn new(cursor: C, post_state: &'b HashedPostStateSorted) -> Self { - Self { cursor, post_state, account: None, last_slot: None, post_state_storage_index: 0 } + pub fn new(cursor: C, post_state: &'b HashedPostStateSorted, hashed_address: B256) -> Self { + Self { cursor, post_state, hashed_address, last_slot: None, post_state_storage_index: 0 } } /// Returns `true` if the storage for the given /// The database is not checked since it already has no wiped storage entries. - fn is_db_storage_wiped(&self, account: &B256) -> bool { - match self.post_state.storages.get(account) { + fn is_db_storage_wiped(&self) -> bool { + match self.post_state.storages.get(&self.hashed_address) { Some(storage) => storage.wiped, None => false, } @@ -205,10 +208,10 @@ impl<'b, C> HashedPostStateStorageCursor<'b, C> { /// Check if the slot was zeroed out in the post state. /// The database is not checked since it already has no zero-valued slots. - fn is_slot_zero_valued(&self, account: &B256, slot: &B256) -> bool { + fn is_slot_zero_valued(&self, slot: &B256) -> bool { self.post_state .storages - .get(account) + .get(&self.hashed_address) .map(|storage| storage.zero_valued_slots.contains(slot)) .unwrap_or_default() } @@ -247,34 +250,24 @@ where /// /// This function should be called before attempting to call [HashedStorageCursor::seek] or /// [HashedStorageCursor::next]. - fn is_storage_empty(&mut self, key: B256) -> Result { - let is_empty = match self.post_state.storages.get(&key) { + fn is_storage_empty(&mut self) -> Result { + let is_empty = match self.post_state.storages.get(&self.hashed_address) { Some(storage) => { // If the storage has been wiped at any point storage.wiped && // and the current storage does not contain any non-zero values storage.non_zero_valued_slots.is_empty() } - None => self.cursor.is_storage_empty(key)?, + None => self.cursor.is_storage_empty()?, }; Ok(is_empty) } /// Seek the next account storage entry for a given hashed key pair. - fn seek( - &mut self, - account: B256, - subkey: B256, - ) -> Result, reth_db::DatabaseError> { - if self.account.map_or(true, |acc| acc != account) { - self.account = Some(account); - self.last_slot = None; - self.post_state_storage_index = 0; - } - + fn seek(&mut self, subkey: B256) -> Result, reth_db::DatabaseError> { // Attempt to find the account's storage in post state. let mut post_state_entry = None; - if let Some(storage) = self.post_state.storages.get(&account) { + if let Some(storage) = self.post_state.storages.get(&self.hashed_address) { post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); while post_state_entry.map(|(slot, _)| slot < &subkey).unwrap_or_default() { @@ -293,14 +286,14 @@ where } // It's not an exact match, reposition to the first greater or equal account. - let db_entry = if self.is_db_storage_wiped(&account) { + let db_entry = if self.is_db_storage_wiped() { None } else { - let mut db_entry = self.cursor.seek(account, subkey)?; + let mut db_entry = self.cursor.seek(subkey)?; while db_entry .as_ref() - .map(|entry| self.is_slot_zero_valued(&account, &entry.key)) + .map(|entry| self.is_slot_zero_valued(&entry.key)) .unwrap_or_default() { db_entry = self.cursor.next()?; @@ -322,25 +315,21 @@ where /// If the account key is not set. [HashedStorageCursor::seek] must be called first in order to /// position the cursor. fn next(&mut self) -> Result, reth_db::DatabaseError> { - let account = self.account.expect("`seek` must be called first"); - let last_slot = match self.last_slot.as_ref() { Some(slot) => slot, None => return Ok(None), // no previous entry was found }; - let db_entry = if self.is_db_storage_wiped(&account) { + let db_entry = if self.is_db_storage_wiped() { None } else { // If post state was given precedence, move the cursor forward. - let mut db_entry = self.cursor.seek(account, *last_slot)?; + let mut db_entry = self.cursor.seek(*last_slot)?; // If the entry was already returned or is zero-values, move to the next. while db_entry .as_ref() - .map(|entry| { - &entry.key == last_slot || self.is_slot_zero_valued(&account, &entry.key) - }) + .map(|entry| &entry.key == last_slot || self.is_slot_zero_valued(&entry.key)) .unwrap_or_default() { db_entry = self.cursor.next()?; @@ -351,7 +340,7 @@ where // Attempt to find the account's storage in post state. let mut post_state_entry = None; - if let Some(storage) = self.post_state.storages.get(&account) { + if let Some(storage) = self.post_state.storages.get(&self.hashed_address) { post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); while post_state_entry.map(|(slot, _)| slot <= last_slot).unwrap_or_default() { self.post_state_storage_index += 1; @@ -397,12 +386,11 @@ mod tests { factory: &impl HashedCursorFactory, expected: impl Iterator)>, ) { - let mut cursor = factory.hashed_storage_cursor().unwrap(); - for (account, storage) in expected { + let mut cursor = factory.hashed_storage_cursor(account).unwrap(); let mut expected_storage = storage.into_iter(); - let first_storage = cursor.seek(account, B256::default()).unwrap(); + let first_storage = cursor.seek(B256::default()).unwrap(); assert_eq!(first_storage.map(|e| (e.key, e.value)), expected_storage.next()); for expected_entry in expected_storage { @@ -577,8 +565,8 @@ mod tests { let sorted = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(cursor.is_storage_empty().unwrap()); } let db_storage = @@ -600,8 +588,8 @@ mod tests { let sorted = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(!cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(!cursor.is_storage_empty().unwrap()); } // wiped storage, must be empty @@ -615,8 +603,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(cursor.is_storage_empty().unwrap()); } // wiped storage, but post state has zero-value entries @@ -631,8 +619,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(cursor.is_storage_empty().unwrap()); } // wiped storage, but post state has non-zero entries @@ -647,8 +635,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(!cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(!cursor.is_storage_empty().unwrap()); } } diff --git a/crates/trie/src/node_iter.rs b/crates/trie/src/node_iter.rs index 742896140bc0b..3a621a38b1f59 100644 --- a/crates/trie/src/node_iter.rs +++ b/crates/trie/src/node_iter.rs @@ -166,8 +166,6 @@ pub struct StorageNodeIter { pub walker: TrieWalker, /// The cursor for the hashed storage entries. pub hashed_storage_cursor: H, - /// The hashed address this storage trie belongs to. - hashed_address: B256, /// Current hashed storage entry. current_hashed_entry: Option, @@ -177,11 +175,10 @@ pub struct StorageNodeIter { impl StorageNodeIter { /// Creates a new instance of StorageNodeIter. - pub fn new(walker: TrieWalker, hashed_storage_cursor: H, hashed_address: B256) -> Self { + pub fn new(walker: TrieWalker, hashed_storage_cursor: H) -> Self { Self { walker, hashed_storage_cursor, - hashed_address, current_walker_key_checked: false, current_hashed_entry: None, } @@ -238,8 +235,7 @@ where // Attempt to get the next unprocessed key from the walker. if let Some(seek_key) = self.walker.next_unprocessed_key() { // Seek and update the current hashed entry based on the new seek key. - self.current_hashed_entry = - self.hashed_storage_cursor.seek(self.hashed_address, seek_key)?; + self.current_hashed_entry = self.hashed_storage_cursor.seek(seek_key)?; self.walker.advance()?; } else { // No more keys to process, break the loop. diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs index 80f0c552e3892..094372a851fa2 100644 --- a/crates/trie/src/proof.rs +++ b/crates/trie/src/proof.rs @@ -109,12 +109,13 @@ where hashed_address: B256, slots: &[B256], ) -> Result<(B256, Vec), StorageRootError> { - let mut hashed_storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor()?; + let mut hashed_storage_cursor = + self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; let mut proofs = slots.iter().copied().map(StorageProof::new).collect::>(); // short circuit on empty storage - if hashed_storage_cursor.is_storage_empty(hashed_address)? { + if hashed_storage_cursor.is_storage_empty()? { return Ok((EMPTY_ROOT_HASH, proofs)) } @@ -128,8 +129,7 @@ where let retainer = ProofRetainer::from_iter(target_nibbles); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - let mut storage_node_iter = - StorageNodeIter::new(walker, hashed_storage_cursor, hashed_address); + let mut storage_node_iter = StorageNodeIter::new(walker, hashed_storage_cursor); while let Some(node) = storage_node_iter.try_next()? { match node { StorageNode::Branch(node) => { diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index faf693f3398db..55ee1bebdbed6 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -483,10 +483,11 @@ where ) -> Result<(B256, usize, TrieUpdates), StorageRootError> { trace!(target: "trie::storage_root", hashed_address = ?self.hashed_address, "calculating storage root"); - let mut hashed_storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor()?; + let mut hashed_storage_cursor = + self.hashed_cursor_factory.hashed_storage_cursor(self.hashed_address)?; // short circuit on empty storage - if hashed_storage_cursor.is_storage_empty(self.hashed_address)? { + if hashed_storage_cursor.is_storage_empty()? { return Ok(( EMPTY_ROOT_HASH, 0, @@ -500,8 +501,7 @@ where let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let mut storage_node_iter = - StorageNodeIter::new(walker, hashed_storage_cursor, self.hashed_address); + let mut storage_node_iter = StorageNodeIter::new(walker, hashed_storage_cursor); while let Some(node) = storage_node_iter.try_next()? { match node { StorageNode::Branch(node) => { From 59622dbd481792e24629ba2629af1c3840f954e6 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 24 May 2024 13:32:34 -0400 Subject: [PATCH 621/700] chore: add 0BSD to deny.toml (#8391) --- deny.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/deny.toml b/deny.toml index 38994d1974557..99b2c8d4f97f5 100644 --- a/deny.toml +++ b/deny.toml @@ -38,6 +38,7 @@ allow = [ "Apache-2.0 WITH LLVM-exception", "BSD-2-Clause", "BSD-3-Clause", + "0BSD", "ISC", "Unicode-DFS-2016", "Unlicense", From a8e5eb6186bcd5ea9e63044ebf19e87a7c531c3e Mon Sep 17 00:00:00 2001 From: guha-rahul <52607971+guha-rahul@users.noreply.github.com> Date: Fri, 24 May 2024 23:08:06 +0530 Subject: [PATCH 622/700] feat: implement clientVersionV1 in engine API (#8016) --- Cargo.lock | 1 + crates/node-core/src/version.rs | 8 +++- crates/node/builder/Cargo.toml | 2 +- crates/node/builder/src/launch/mod.rs | 9 ++++ crates/rpc/rpc-api/src/engine.rs | 21 +++++++- crates/rpc/rpc-builder/tests/it/utils.rs | 9 ++++ crates/rpc/rpc-engine-api/src/engine_api.rs | 53 +++++++++++++++++++-- 7 files changed, 96 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4689de9884be1..a1a91c6b50115 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7325,6 +7325,7 @@ dependencies = [ "reth-rpc", "reth-rpc-engine-api", "reth-rpc-layer", + "reth-rpc-types", "reth-stages", "reth-static-file", "reth-tasks", diff --git a/crates/node-core/src/version.rs b/crates/node-core/src/version.rs index 868fa933ea4c8..79190531b300f 100644 --- a/crates/node-core/src/version.rs +++ b/crates/node-core/src/version.rs @@ -1,6 +1,12 @@ //! Version information for reth. - use reth_db::models::client_version::ClientVersion; +use reth_rpc_types::engine::ClientCode; + +/// The client code for Reth +pub const CLIENT_CODE: ClientCode = ClientCode::RH; + +/// The human readable name of the client +pub const NAME_CLIENT: &str = "Reth"; /// The latest version from Cargo.toml. pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index e36ac2e2c396a..77f2cd2e8856a 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -39,7 +39,7 @@ reth-config.workspace = true reth-downloaders.workspace = true reth-node-events.workspace = true reth-consensus.workspace = true - +reth-rpc-types.workspace = true ## async futures.workspace = true tokio = { workspace = true, features = [ diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 4987586bc9f3f..fec043c7fa0ea 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -25,11 +25,13 @@ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, engine::EngineMessageStreamExt, exit::NodeExitFuture, + version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::format_ether; use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; use reth_rpc_engine_api::EngineApi; +use reth_rpc_types::engine::ClientVersionV1; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; @@ -407,12 +409,19 @@ where ), ); + let client = ClientVersionV1 { + code: CLIENT_CODE, + name: NAME_CLIENT.to_string(), + version: CARGO_PKG_VERSION.to_string(), + commit: VERGEN_GIT_SHA.to_string(), + }; let engine_api = EngineApi::new( blockchain_db.clone(), ctx.chain_spec(), beacon_engine_handle, node_adapter.components.payload_builder().clone().into(), Box::new(ctx.task_executor().clone()), + client, ); info!(target: "reth::cli", "Engine API handler initialized"); diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index d320c74601d08..be20a4fbe088d 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -8,8 +8,9 @@ use reth_engine_primitives::EngineTypes; use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, B256, U256, U64}; use reth_rpc_types::{ engine::{ - ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, - ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, + ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + TransitionConfiguration, }, state::StateOverride, BlockOverrides, Filter, Log, RichBlock, SyncStatus, TransactionRequest, @@ -154,6 +155,22 @@ pub trait EngineApi { transition_configuration: TransitionConfiguration, ) -> RpcResult; + /// This function will return the ClientVersionV1 object. + /// See also: + /// make fmt + /// + /// + /// - When connected to a single execution client, the consensus client **MUST** receive an + /// array with a single `ClientVersionV1` object. + /// - When connected to multiple execution clients via a multiplexer, the multiplexer **MUST** + /// concatenate the responses from each execution client into a single, + /// flat array before returning the response to the consensus client. + #[method(name = "getClientVersionV1")] + async fn get_client_version_v1( + &self, + client_version: ClientVersionV1, + ) -> RpcResult>; + /// See also #[method(name = "exchangeCapabilities")] async fn exchange_capabilities(&self, capabilities: Vec) -> RpcResult>; diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index dd58bf2de299d..819a3a863acbd 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -12,6 +12,7 @@ use reth_rpc_builder::{ }; use reth_rpc_engine_api::EngineApi; use reth_rpc_layer::JwtSecret; +use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_tasks::TokioTaskExecutor; use reth_transaction_pool::test_utils::{TestPool, TestPoolBuilder}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; @@ -28,12 +29,20 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { let (tx, _rx) = unbounded_channel(); let beacon_engine_handle = BeaconConsensusEngineHandle::::new(tx, Default::default()); + let client = ClientVersionV1 { + code: ClientCode::RH, + name: "Reth".to_string(), + version: "v0.2.0-beta.5".to_string(), + commit: "defa64b2".to_string(), + }; + let engine_api = EngineApi::new( NoopProvider::default(), MAINNET.clone(), beacon_engine_handle, spawn_test_payload_service().into(), Box::::default(), + client, ); let module = AuthRpcModule::new(engine_api); module.start_server(config).await.unwrap() diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index a2275281e63ae..8d51884d59887 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -11,9 +11,10 @@ use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hard use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, - PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, + CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, + ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, + CAPABILITIES, }; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, @@ -48,6 +49,8 @@ struct EngineApiInner { task_spawner: Box, /// The latency and response type metrics for engine api calls metrics: EngineApiMetrics, + /// Identification of the execution client used by the consensus client + client: ClientVersionV1, } impl EngineApi @@ -62,6 +65,7 @@ where beacon_consensus: BeaconConsensusEngineHandle, payload_store: PayloadStore, task_spawner: Box, + client: ClientVersionV1, ) -> Self { let inner = Arc::new(EngineApiInner { provider, @@ -70,10 +74,18 @@ where payload_store, task_spawner, metrics: EngineApiMetrics::default(), + client, }); Self { inner } } + /// Fetches the client version. + async fn get_client_version_v1( + &self, + _client: ClientVersionV1, + ) -> EngineApiResult> { + Ok(vec![self.inner.client.clone()]) + } /// Fetches the attributes for the payload with the given id. async fn get_payload_attributes( &self, @@ -749,6 +761,18 @@ where self.inner.metrics.latency.exchange_transition_configuration.record(start.elapsed()); Ok(res?) } + /// Handler for `engine_getClientVersionV1` + /// + /// See also + async fn get_client_version_v1( + &self, + client: ClientVersionV1, + ) -> RpcResult> { + trace!(target: "rpc::engine", "Serving engine_getClientVersionV1"); + let res = EngineApi::get_client_version_v1(self, client).await; + + Ok(res?) + } /// Handler for `engine_exchangeCapabilitiesV1` /// See also @@ -773,9 +797,11 @@ mod tests { use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_interfaces::test_utils::generators::random_block; + use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{SealedBlock, B256, MAINNET}; use reth_provider::test_utils::MockEthProvider; + use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_rpc_types_compat::engine::payload::execution_payload_from_sealed_block; use reth_tasks::TokioTaskExecutor; use reth_tokio_util::EventSender; @@ -783,6 +809,13 @@ mod tests { fn setup_engine_api() -> (EngineApiTestHandle, EngineApi, EthEngineTypes>) { + let client = ClientVersionV1 { + code: ClientCode::RH, + name: "Reth".to_string(), + version: "v0.2.0-beta.5".to_string(), + commit: "defa64b2".to_string(), + }; + let chain_spec: Arc = MAINNET.clone(); let provider = Arc::new(MockEthProvider::default()); let payload_store = spawn_test_payload_service(); @@ -795,11 +828,25 @@ mod tests { BeaconConsensusEngineHandle::new(to_engine, event_sender), payload_store.into(), task_executor, + client, ); let handle = EngineApiTestHandle { chain_spec, provider, from_api: engine_rx }; (handle, api) } + #[tokio::test] + async fn engine_client_version_v1() { + let client = ClientVersionV1 { + code: ClientCode::RH, + name: "Reth".to_string(), + version: "v0.2.0-beta.5".to_string(), + commit: "defa64b2".to_string(), + }; + let (_, api) = setup_engine_api(); + let res = api.get_client_version_v1(client.clone()).await; + assert_eq!(res.unwrap(), vec![client]); + } + struct EngineApiTestHandle { chain_spec: Arc, provider: Arc, From 46fd454d80b4fe544cc471007b7048b33f695e2c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 25 May 2024 10:58:42 +0200 Subject: [PATCH 623/700] chore: reorder struct account hashing struct defs (#8392) --- crates/stages/src/stages/hashing_account.rs | 62 ++++++++++----------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index db0e3ca625828..6ae7fc5221b94 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -53,37 +53,6 @@ impl AccountHashingStage { } } -impl Default for AccountHashingStage { - fn default() -> Self { - Self { - clean_threshold: 500_000, - commit_threshold: 100_000, - etl_config: EtlConfig::default(), - } - } -} - -// TODO: Rewrite this -/// `SeedOpts` provides configuration parameters for calling `AccountHashingStage::seed` -/// in unit tests or benchmarks to generate an initial database state for running the -/// stage. -/// -/// In order to check the "full hashing" mode of the stage you want to generate more -/// transitions than `AccountHashingStage.clean_threshold`. This requires: -/// 1. Creating enough blocks so there's enough transactions to generate the required transition -/// keys in the `BlockTransitionIndex` (which depends on the `TxTransitionIndex` internally) -/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually take the -/// 2nd codepath -#[derive(Clone, Debug)] -pub struct SeedOpts { - /// The range of blocks to be generated - pub blocks: RangeInclusive, - /// The number of accounts to be generated - pub accounts: usize, - /// The range of transactions to be generated per block. - pub txs: Range, -} - #[cfg(any(test, feature = "test-utils"))] impl AccountHashingStage { /// Initializes the `PlainAccountState` table with `num_accounts` having some random state @@ -144,6 +113,16 @@ impl AccountHashingStage { } } +impl Default for AccountHashingStage { + fn default() -> Self { + Self { + clean_threshold: 500_000, + commit_threshold: 100_000, + etl_config: EtlConfig::default(), + } + } +} + impl Stage for AccountHashingStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -280,6 +259,27 @@ fn collect( Ok(()) } +// TODO: Rewrite this +/// `SeedOpts` provides configuration parameters for calling `AccountHashingStage::seed` +/// in unit tests or benchmarks to generate an initial database state for running the +/// stage. +/// +/// In order to check the "full hashing" mode of the stage you want to generate more +/// transitions than `AccountHashingStage.clean_threshold`. This requires: +/// 1. Creating enough blocks so there's enough transactions to generate the required transition +/// keys in the `BlockTransitionIndex` (which depends on the `TxTransitionIndex` internally) +/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually take the +/// 2nd codepath +#[derive(Clone, Debug)] +pub struct SeedOpts { + /// The range of blocks to be generated + pub blocks: RangeInclusive, + /// The number of accounts to be generated + pub accounts: usize, + /// The range of transactions to be generated per block. + pub txs: Range, +} + fn stage_checkpoint_progress( provider: &DatabaseProviderRW, ) -> ProviderResult { From 0056f2f097f572a6089da5d733cfea8bcbd83537 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 25 May 2024 12:01:29 +0200 Subject: [PATCH 624/700] chore(deps): use error imports directly (#8388) --- Cargo.lock | 2 ++ crates/storage/provider/Cargo.toml | 2 ++ .../bundle_state/bundle_state_with_receipts.rs | 2 +- .../provider/src/bundle_state/state_changes.rs | 2 +- .../provider/src/bundle_state/state_reverts.rs | 2 +- crates/storage/provider/src/chain.rs | 7 +++---- crates/storage/provider/src/lib.rs | 2 +- .../src/providers/bundle_state_provider.rs | 2 +- .../provider/src/providers/consistent_view.rs | 4 ++-- .../provider/src/providers/database/mod.rs | 5 +++-- .../provider/src/providers/state/historical.rs | 4 ++-- .../provider/src/providers/state/latest.rs | 2 +- .../provider/src/providers/state/macros.rs | 16 ++++++++-------- .../provider/src/providers/static_file/jar.rs | 2 +- .../src/providers/static_file/manager.rs | 2 +- .../provider/src/providers/static_file/mod.rs | 2 +- .../provider/src/providers/static_file/writer.rs | 2 +- crates/storage/provider/src/test_utils/mock.rs | 2 +- crates/storage/provider/src/test_utils/noop.rs | 2 +- crates/storage/provider/src/traits/account.rs | 2 +- crates/storage/provider/src/traits/block.rs | 2 +- crates/storage/provider/src/traits/block_hash.rs | 2 +- crates/storage/provider/src/traits/block_id.rs | 2 +- .../provider/src/traits/database_provider.rs | 2 +- crates/storage/provider/src/traits/evm_env.rs | 2 +- crates/storage/provider/src/traits/hashing.rs | 2 +- crates/storage/provider/src/traits/header.rs | 2 +- crates/storage/provider/src/traits/history.rs | 2 +- .../provider/src/traits/prune_checkpoint.rs | 2 +- crates/storage/provider/src/traits/receipts.rs | 2 +- .../provider/src/traits/stage_checkpoint.rs | 2 +- crates/storage/provider/src/traits/state.rs | 2 +- crates/storage/provider/src/traits/stats.rs | 2 +- crates/storage/provider/src/traits/storage.rs | 2 +- .../storage/provider/src/traits/transactions.rs | 2 +- crates/storage/provider/src/traits/trie.rs | 2 +- .../storage/provider/src/traits/withdrawals.rs | 2 +- 37 files changed, 52 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a1a91c6b50115..a1e9d1de31e18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7621,11 +7621,13 @@ dependencies = [ "reth-codecs", "reth-db", "reth-evm", + "reth-execution-errors", "reth-fs-util", "reth-interfaces", "reth-metrics", "reth-nippy-jar", "reth-primitives", + "reth-storage-errors", "reth-trie", "revm", "strum", diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 52f41c7f80c2f..d9a555161988a 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -13,8 +13,10 @@ workspace = true [dependencies] # reth +reth-execution-errors.workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true +reth-storage-errors.workspace = true reth-interfaces.workspace = true reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index fe76714d3045a..52c9366fde89a 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -8,13 +8,13 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, }; use reth_evm::execute::BatchBlockExecutionOutput; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ logs_bloom, revm::compat::{into_reth_acc, into_revm_acc}, Account, Address, BlockHash, BlockNumber, Bloom, Bytecode, Log, Receipt, Receipts, StaticFileSegment, StorageEntry, B256, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::HashedPostState; pub use revm::db::states::OriginalValuesKnown; use revm::{ diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs index 7f7bde79e3f8b..71551fe69234d 100644 --- a/crates/storage/provider/src/bundle_state/state_changes.rs +++ b/crates/storage/provider/src/bundle_state/state_changes.rs @@ -4,8 +4,8 @@ use reth_db::{ tables, transaction::{DbTx, DbTxMut}, }; -use reth_interfaces::db::DatabaseError; use reth_primitives::{revm::compat::into_reth_acc, Bytecode, StorageEntry, U256}; +use reth_storage_errors::db::DatabaseError; use revm::db::states::{PlainStorageChangeset, StateChangeset}; /// A change to the state of the world. diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index cc16a50ccabd4..1fe7a348198fd 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -5,8 +5,8 @@ use reth_db::{ tables, transaction::{DbTx, DbTxMut}, }; -use reth_interfaces::db::DatabaseError; use reth_primitives::{revm::compat::into_reth_acc, BlockNumber, StorageEntry, B256, U256}; +use reth_storage_errors::db::DatabaseError; use revm::db::states::{PlainStateReverts, PlainStorageRevert, RevertToSlot}; use std::iter::Peekable; diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 2ff70bc4add74..bc419aa3f0d4e 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -1,7 +1,7 @@ //! Contains [Chain], a chain of blocks and their final state. use crate::bundle_state::BundleStateWithReceipts; -use reth_interfaces::{executor::BlockExecutionError, RethResult}; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{ Address, BlockHash, BlockNumHash, BlockNumber, ForkBlock, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, TxHash, @@ -235,15 +235,14 @@ impl Chain { /// Merge two chains by appending the given chain into the current one. /// /// The state of accounts for this chain is set to the state of the newest chain. - pub fn append_chain(&mut self, other: Chain) -> RethResult<()> { + pub fn append_chain(&mut self, other: Chain) -> Result<(), BlockExecutionError> { let chain_tip = self.tip(); let other_fork_block = other.fork_block(); if chain_tip.hash() != other_fork_block.hash { return Err(BlockExecutionError::AppendChainDoesntConnect { chain_tip: Box::new(chain_tip.num_hash()), other_chain_fork: Box::new(other_fork_block), - } - .into()) + }) } // Insert blocks from other chain diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 2b146245efb82..864a962414e94 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -29,7 +29,7 @@ pub use providers::{ pub mod test_utils; /// Re-export provider error. -pub use reth_interfaces::provider::ProviderError; +pub use reth_storage_errors::provider::ProviderError; pub mod chain; pub use chain::{Chain, DisplayBlocksChain}; diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index e3364cadb28d8..bc042b32764b8 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -1,8 +1,8 @@ use crate::{ AccountReader, BlockHashReader, BundleStateDataProvider, StateProvider, StateRootProvider, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{trie::AccountProof, Account, Address, BlockNumber, Bytecode, B256}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; diff --git a/crates/storage/provider/src/providers/consistent_view.rs b/crates/storage/provider/src/providers/consistent_view.rs index 07d3614efe78a..b956768130d69 100644 --- a/crates/storage/provider/src/providers/consistent_view.rs +++ b/crates/storage/provider/src/providers/consistent_view.rs @@ -1,10 +1,10 @@ use crate::{BlockNumReader, DatabaseProviderFactory, DatabaseProviderRO, HeaderProvider}; use reth_db::database::Database; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{GotExpected, B256}; +use reth_storage_errors::provider::ProviderResult; use std::marker::PhantomData; -pub use reth_interfaces::provider::ConsistentViewError; +pub use reth_storage_errors::provider::ConsistentViewError; /// A consistent view over state in the database. /// diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index c84e9d8cec239..60dc635eb3af2 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -9,7 +9,7 @@ use crate::{ }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::{provider::ProviderResult, RethError, RethResult}; +use reth_interfaces::{RethError, RethResult}; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, @@ -30,6 +30,7 @@ mod provider; pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; use reth_db::mdbx::DatabaseArguments; +use reth_storage_errors::provider::ProviderResult; /// A common provider that fetches data from a database or static file. /// @@ -583,7 +584,6 @@ mod tests { test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; use reth_interfaces::{ - provider::ProviderError, test_utils::{ generators, generators::{random_block, random_header}, @@ -594,6 +594,7 @@ mod tests { hex_literal::hex, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, StaticFileSegment, TxNumber, B256, U256, }; + use reth_storage_errors::provider::ProviderError; use std::{ops::RangeInclusive, sync::Arc}; use tokio::sync::watch; diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index ed64314aa2ba3..f815c282d5635 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -10,11 +10,11 @@ use reth_db::{ transaction::DbTx, BlockNumberList, }; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ constants::EPOCH_SLOTS, trie::AccountProof, Account, Address, BlockNumber, Bytecode, StaticFileSegment, StorageKey, StorageValue, B256, }; +use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostState}; use revm::db::BundleState; use std::fmt::Debug; @@ -413,8 +413,8 @@ mod tests { transaction::{DbTx, DbTxMut}, BlockNumberList, }; - use reth_interfaces::provider::ProviderError; use reth_primitives::{address, b256, Account, Address, StorageEntry, B256, U256}; + use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); const HIGHER_ADDRESS: Address = address!("0000000000000000000000000000000000000005"); diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index d3c8af6b7fc20..5079a15d75932 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -7,11 +7,11 @@ use reth_db::{ tables, transaction::DbTx, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ trie::AccountProof, Account, Address, BlockNumber, Bytecode, StaticFileSegment, StorageKey, StorageValue, B256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{proof::Proof, updates::TrieUpdates, HashedPostState}; use revm::db::BundleState; diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 0e2f088bfeb69..0efd8d9c7d920 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -31,20 +31,20 @@ macro_rules! delegate_provider_impls { $crate::providers::state::macros::delegate_impls_to_as_ref!( for $target => StateRootProvider $(where [$($generics)*])? { - fn state_root(&self, state: &revm::db::BundleState) -> reth_interfaces::provider::ProviderResult; - fn state_root_with_updates(&self, state: &revm::db::BundleState) -> reth_interfaces::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; + fn state_root(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult; + fn state_root_with_updates(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; } AccountReader $(where [$($generics)*])? { - fn basic_account(&self, address: reth_primitives::Address) -> reth_interfaces::provider::ProviderResult>; + fn basic_account(&self, address: reth_primitives::Address) -> reth_storage_errors::provider::ProviderResult>; } BlockHashReader $(where [$($generics)*])? { - fn block_hash(&self, number: u64) -> reth_interfaces::provider::ProviderResult>; - fn canonical_hashes_range(&self, start: reth_primitives::BlockNumber, end: reth_primitives::BlockNumber) -> reth_interfaces::provider::ProviderResult>; + fn block_hash(&self, number: u64) -> reth_storage_errors::provider::ProviderResult>; + fn canonical_hashes_range(&self, start: reth_primitives::BlockNumber, end: reth_primitives::BlockNumber) -> reth_storage_errors::provider::ProviderResult>; } StateProvider $(where [$($generics)*])?{ - fn storage(&self, account: reth_primitives::Address, storage_key: reth_primitives::StorageKey) -> reth_interfaces::provider::ProviderResult>; - fn proof(&self, address: reth_primitives::Address, keys: &[reth_primitives::B256]) -> reth_interfaces::provider::ProviderResult; - fn bytecode_by_hash(&self, code_hash: reth_primitives::B256) -> reth_interfaces::provider::ProviderResult>; + fn storage(&self, account: reth_primitives::Address, storage_key: reth_primitives::StorageKey) -> reth_storage_errors::provider::ProviderResult>; + fn proof(&self, address: reth_primitives::Address, keys: &[reth_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; + fn bytecode_by_hash(&self, code_hash: reth_primitives::B256) -> reth_storage_errors::provider::ProviderResult>; } ); } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 6dc75e3074db3..7f4b14fee37b4 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -10,11 +10,11 @@ use reth_db::{ codecs::CompactU256, static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ Address, BlockHash, BlockHashOrNumber, BlockNumber, ChainInfo, Header, Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, B256, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ ops::{Deref, RangeBounds}, sync::Arc, diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 610021d70cf09..275c8935e1af3 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -16,7 +16,6 @@ use reth_db::{ table::Table, tables, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::NippyJar; use reth_primitives::{ keccak256, @@ -26,6 +25,7 @@ use reth_primitives::{ TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, ops::{Deref, Range, RangeBounds, RangeInclusive}, diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 46a1b7453ec19..cb9f879dde6af 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -9,9 +9,9 @@ pub use writer::{StaticFileProviderRW, StaticFileProviderRWRefMut}; mod metrics; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::NippyJar; use reth_primitives::{static_file::SegmentHeader, StaticFileSegment}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ops::Deref, sync::Arc}; const BLOCKS_PER_STATIC_FILE: u64 = 500_000; diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index a81ef5b005ba4..2a2fcf12ad88d 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -6,13 +6,13 @@ use super::{ use dashmap::mapref::one::RefMut; use reth_codecs::Compact; use reth_db::codecs::CompactU256; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; use reth_primitives::{ static_file::{find_fixed_range, SegmentHeader, SegmentRangeInclusive}, BlockHash, BlockNumber, Header, Receipt, StaticFileSegment, TransactionSignedNoHash, TxNumber, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ path::{Path, PathBuf}, sync::{Arc, Weak}, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 893bd052d0b31..7dd7c5b4dc5b4 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -8,7 +8,6 @@ use crate::{ use parking_lot::Mutex; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, BlockWithSenders, Bytecode, Bytes, ChainInfo, ChainSpec, Header, Receipt, @@ -16,6 +15,7 @@ use reth_primitives::{ TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::updates::TrieUpdates; use revm::{ db::BundleState, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 02890eaf1935a..6593b74ccfa54 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -8,7 +8,6 @@ use crate::{ }; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ stage::{StageCheckpoint, StageId}, trie::AccountProof, @@ -18,6 +17,7 @@ use reth_primitives::{ TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, MAINNET, U256, }; +use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::TrieUpdates; use revm::{ db::BundleState, diff --git a/crates/storage/provider/src/traits/account.rs b/crates/storage/provider/src/traits/account.rs index 16042bce122f6..09161d31bb6d3 100644 --- a/crates/storage/provider/src/traits/account.rs +++ b/crates/storage/provider/src/traits/account.rs @@ -1,7 +1,7 @@ use auto_impl::auto_impl; use reth_db::models::AccountBeforeTx; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{Account, Address, BlockNumber}; +use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet}, ops::{RangeBounds, RangeInclusive}, diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 1b767f350a39c..99984d346b8fc 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -4,11 +4,11 @@ use crate::{ }; use auto_impl::auto_impl; use reth_db::models::StoredBlockBodyIndices; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, Header, PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, B256, }; +use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::ops::RangeInclusive; diff --git a/crates/storage/provider/src/traits/block_hash.rs b/crates/storage/provider/src/traits/block_hash.rs index 8bb334c8b8460..7413bb09c2191 100644 --- a/crates/storage/provider/src/traits/block_hash.rs +++ b/crates/storage/provider/src/traits/block_hash.rs @@ -1,6 +1,6 @@ use auto_impl::auto_impl; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; +use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching block hashes by number. #[auto_impl(&, Arc, Box)] diff --git a/crates/storage/provider/src/traits/block_id.rs b/crates/storage/provider/src/traits/block_id.rs index fd52f6c326b2d..8ca2c98f8b06a 100644 --- a/crates/storage/provider/src/traits/block_id.rs +++ b/crates/storage/provider/src/traits/block_id.rs @@ -1,6 +1,6 @@ use super::BlockHashReader; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, ChainInfo, B256}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; /// Client trait for getting important block numbers (such as the latest block number), converting /// block hashes to numbers, and fetching a block hash from its block number. diff --git a/crates/storage/provider/src/traits/database_provider.rs b/crates/storage/provider/src/traits/database_provider.rs index 4335917cc1a06..152c4935fd0b8 100644 --- a/crates/storage/provider/src/traits/database_provider.rs +++ b/crates/storage/provider/src/traits/database_provider.rs @@ -1,6 +1,6 @@ use crate::DatabaseProviderRO; use reth_db::database::Database; -use reth_interfaces::provider::ProviderResult; +use reth_storage_errors::provider::ProviderResult; /// Database provider factory. pub trait DatabaseProviderFactory { diff --git a/crates/storage/provider/src/traits/evm_env.rs b/crates/storage/provider/src/traits/evm_env.rs index 8c821984601d9..cecedad0c912d 100644 --- a/crates/storage/provider/src/traits/evm_env.rs +++ b/crates/storage/provider/src/traits/evm_env.rs @@ -1,6 +1,6 @@ use reth_evm::ConfigureEvmEnv; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, Header}; +use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; /// A provider type that knows chain specific information required to configure an diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/provider/src/traits/hashing.rs index 7978a4b19406b..4e0375c7c4dec 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/provider/src/traits/hashing.rs @@ -1,7 +1,7 @@ use auto_impl::auto_impl; use reth_db::models::BlockNumberAddress; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{Account, Address, BlockNumber, StorageEntry, B256}; +use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, ops::{Range, RangeInclusive}, diff --git a/crates/storage/provider/src/traits/header.rs b/crates/storage/provider/src/traits/header.rs index ad04f52ac957f..4719470a7767c 100644 --- a/crates/storage/provider/src/traits/header.rs +++ b/crates/storage/provider/src/traits/header.rs @@ -1,6 +1,6 @@ use auto_impl::auto_impl; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, Header, SealedHeader, U256}; +use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; /// Client trait for fetching `Header` related data. diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/provider/src/traits/history.rs index daef02b9f36b2..ec9625bdcdd4b 100644 --- a/crates/storage/provider/src/traits/history.rs +++ b/crates/storage/provider/src/traits/history.rs @@ -1,7 +1,7 @@ use auto_impl::auto_impl; use reth_db::models::BlockNumberAddress; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{Address, BlockNumber, B256}; +use reth_storage_errors::provider::ProviderResult; use std::{ collections::BTreeMap, ops::{Range, RangeInclusive}, diff --git a/crates/storage/provider/src/traits/prune_checkpoint.rs b/crates/storage/provider/src/traits/prune_checkpoint.rs index 60470bfecde02..a872e27b33ab2 100644 --- a/crates/storage/provider/src/traits/prune_checkpoint.rs +++ b/crates/storage/provider/src/traits/prune_checkpoint.rs @@ -1,5 +1,5 @@ -use reth_interfaces::provider::ProviderResult; use reth_primitives::{PruneCheckpoint, PruneSegment}; +use reth_storage_errors::provider::ProviderResult; /// The trait for fetching prune checkpoint related data. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/storage/provider/src/traits/receipts.rs b/crates/storage/provider/src/traits/receipts.rs index 8ac2917d77df5..138adcfa779cf 100644 --- a/crates/storage/provider/src/traits/receipts.rs +++ b/crates/storage/provider/src/traits/receipts.rs @@ -1,7 +1,7 @@ use std::ops::RangeBounds; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumberOrTag, Receipt, TxHash, TxNumber}; +use reth_storage_errors::provider::ProviderResult; use crate::BlockIdReader; diff --git a/crates/storage/provider/src/traits/stage_checkpoint.rs b/crates/storage/provider/src/traits/stage_checkpoint.rs index ff58fa3eafbf3..1eca807638db3 100644 --- a/crates/storage/provider/src/traits/stage_checkpoint.rs +++ b/crates/storage/provider/src/traits/stage_checkpoint.rs @@ -1,8 +1,8 @@ -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ stage::{StageCheckpoint, StageId}, BlockNumber, }; +use reth_storage_errors::provider::ProviderResult; /// The trait for fetching stage checkpoint related data. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index ac72d52f9f449..f31469a3def89 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -5,11 +5,11 @@ use crate::{ }; use auto_impl::auto_impl; use reth_db::transaction::{DbTx, DbTxMut}; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ trie::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use revm::db::OriginalValuesKnown; /// Type alias of boxed [StateProvider]. diff --git a/crates/storage/provider/src/traits/stats.rs b/crates/storage/provider/src/traits/stats.rs index dece75e287ba2..97052cf594ed6 100644 --- a/crates/storage/provider/src/traits/stats.rs +++ b/crates/storage/provider/src/traits/stats.rs @@ -1,5 +1,5 @@ use reth_db::table::Table; -use reth_interfaces::provider::ProviderResult; +use reth_storage_errors::provider::ProviderResult; /// The trait for fetching provider statistics. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/storage/provider/src/traits/storage.rs b/crates/storage/provider/src/traits/storage.rs index 302acad8b1873..04cb3a0d2ddf3 100644 --- a/crates/storage/provider/src/traits/storage.rs +++ b/crates/storage/provider/src/traits/storage.rs @@ -4,8 +4,8 @@ use std::{ }; use auto_impl::auto_impl; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{Address, BlockNumber, StorageEntry, B256}; +use reth_storage_errors::provider::ProviderResult; /// Storage reader #[auto_impl(&, Arc, Box)] diff --git a/crates/storage/provider/src/traits/transactions.rs b/crates/storage/provider/src/traits/transactions.rs index 3e798bb419c68..d693c52f80ed5 100644 --- a/crates/storage/provider/src/traits/transactions.rs +++ b/crates/storage/provider/src/traits/transactions.rs @@ -1,9 +1,9 @@ use crate::{BlockNumReader, BlockReader}; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ Address, BlockHashOrNumber, BlockNumber, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::ops::{Range, RangeBounds, RangeInclusive}; /// Client trait for fetching [TransactionSigned] related data. diff --git a/crates/storage/provider/src/traits/trie.rs b/crates/storage/provider/src/traits/trie.rs index 1fa5d780ba247..52f3317a3f305 100644 --- a/crates/storage/provider/src/traits/trie.rs +++ b/crates/storage/provider/src/traits/trie.rs @@ -1,6 +1,6 @@ use auto_impl::auto_impl; -use reth_interfaces::provider::ProviderResult; use reth_primitives::B256; +use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; diff --git a/crates/storage/provider/src/traits/withdrawals.rs b/crates/storage/provider/src/traits/withdrawals.rs index a54dc7db81601..b79cd2539766a 100644 --- a/crates/storage/provider/src/traits/withdrawals.rs +++ b/crates/storage/provider/src/traits/withdrawals.rs @@ -1,5 +1,5 @@ -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, Withdrawal, Withdrawals}; +use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching [Withdrawal] related data. #[auto_impl::auto_impl(&, Arc)] From 7dd787707cae8c330428013efbb650313ebdac85 Mon Sep 17 00:00:00 2001 From: William Law Date: Sat, 25 May 2024 03:20:04 -0700 Subject: [PATCH 625/700] refactor: extract init from node-core (#8373) Co-authored-by: Matthias Seitz --- Cargo.lock | 24 ++++++++++++-- Cargo.toml | 2 ++ bin/reth/Cargo.toml | 1 + bin/reth/src/commands/debug_cmd/execution.rs | 6 ++-- bin/reth/src/commands/import.rs | 2 +- bin/reth/src/commands/import_op.rs | 6 +--- bin/reth/src/commands/init_cmd.rs | 2 +- bin/reth/src/commands/init_state.rs | 2 +- .../src/commands/recover/storage_tries.rs | 3 +- bin/reth/src/commands/stage/drop.rs | 2 +- crates/node-core/Cargo.toml | 3 -- crates/node-core/src/lib.rs | 1 - crates/node/builder/Cargo.toml | 1 + crates/node/builder/src/launch/common.rs | 2 +- crates/storage/db-common/Cargo.toml | 33 +++++++++++++++++++ .../db-common}/src/init.rs | 0 crates/storage/db-common/src/lib.rs | 11 +++++++ 17 files changed, 80 insertions(+), 21 deletions(-) create mode 100644 crates/storage/db-common/Cargo.toml rename crates/{node-core => storage/db-common}/src/init.rs (100%) create mode 100644 crates/storage/db-common/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index a1e9d1de31e18..7f4dce0685601 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6391,6 +6391,7 @@ dependencies = [ "reth-consensus", "reth-consensus-common", "reth-db", + "reth-db-common", "reth-discv4", "reth-discv5", "reth-downloaders", @@ -6659,6 +6660,25 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-db-common" +version = "0.2.0-beta.7" +dependencies = [ + "eyre", + "reth-codecs", + "reth-config", + "reth-db", + "reth-etl", + "reth-interfaces", + "reth-primitives", + "reth-provider", + "reth-trie", + "serde", + "serde_json", + "thiserror", + "tracing", +] + [[package]] name = "reth-discv4" version = "0.2.0-beta.7" @@ -7310,6 +7330,7 @@ dependencies = [ "reth-config", "reth-consensus", "reth-db", + "reth-db-common", "reth-downloaders", "reth-evm", "reth-exex", @@ -7361,14 +7382,12 @@ dependencies = [ "proptest", "rand 0.8.5", "reth-beacon-consensus", - "reth-codecs", "reth-config", "reth-consensus-common", "reth-db", "reth-discv4", "reth-discv5", "reth-engine-primitives", - "reth-etl", "reth-evm", "reth-fs-util", "reth-interfaces", @@ -7388,7 +7407,6 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", - "reth-trie", "secp256k1 0.28.2", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index b1ab9e1eb9573..6f812253ea253 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,6 +66,7 @@ members = [ "crates/storage/codecs/", "crates/storage/codecs/derive/", "crates/storage/db/", + "crates/storage/db-common", "crates/storage/errors/", "crates/storage/libmdbx-rs/", "crates/storage/libmdbx-rs/mdbx-sys/", @@ -222,6 +223,7 @@ reth-config = { path = "crates/config" } reth-consensus = { path = "crates/consensus/consensus" } reth-consensus-common = { path = "crates/consensus/common" } reth-db = { path = "crates/storage/db" } +reth-db-common = { path = "crates/storage/db-common" } reth-discv4 = { path = "crates/net/discv4" } reth-discv5 = { path = "crates/net/discv5" } reth-dns-discovery = { path = "crates/net/dns" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 0bde1f4de8b56..ab1e9927adade 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -54,6 +54,7 @@ reth-node-optimism = { workspace = true, optional = true, features = [ "optimism", ] } reth-node-core.workspace = true +reth-db-common.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true reth-consensus.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 3e6474236801f..c07efab2b9fe2 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -17,6 +17,7 @@ use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; use reth_db::{database::Database, init_db, DatabaseEnv}; +use reth_db_common::init::init_genesis; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -26,7 +27,6 @@ use reth_fs_util as fs; use reth_interfaces::p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_core::init::init_genesis; use reth_primitives::{ stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, }; @@ -187,7 +187,7 @@ impl Command { match get_single_header(&client, BlockHashOrNumber::Number(block)).await { Ok(tip_header) => { info!(target: "reth::cli", ?block, "Successfully fetched block"); - return Ok(tip_header.hash()) + return Ok(tip_header.hash()); } Err(error) => { error!(target: "reth::cli", ?block, %error, "Failed to fetch the block. Retrying..."); @@ -255,7 +255,7 @@ impl Command { provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); if latest_block_number.unwrap_or_default() >= self.to { info!(target: "reth::cli", latest = latest_block_number, "Nothing to run"); - return Ok(()) + return Ok(()); } let pipeline_events = pipeline.events(); diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 1108f8aa7856a..70a2c339cada1 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -16,6 +16,7 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; use reth_db::{database::Database, init_db, tables, transaction::DbTx}; +use reth_db_common::init::init_genesis; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, @@ -25,7 +26,6 @@ use reth_interfaces::p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_core::init::init_genesis; use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; use reth_provider::{ diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index b1ae8e8cb366c..a85fc4e3dcd39 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -13,19 +13,15 @@ use crate::{ use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; - use reth_db::{init_db, tables, transaction::DbTx}; +use reth_db_common::init::init_genesis; use reth_downloaders::file_client::{ ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, }; - -use reth_node_core::init::init_genesis; - use reth_primitives::{op_mainnet::is_dup_tx, stage::StageId, PruneModes}; use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; - use tracing::{debug, error, info}; /// Syncs RLP encoded blocks from a file. diff --git a/bin/reth/src/commands/init_cmd.rs b/bin/reth/src/commands/init_cmd.rs index bdd8acb52d1f2..3b900b3f01a2a 100644 --- a/bin/reth/src/commands/init_cmd.rs +++ b/bin/reth/src/commands/init_cmd.rs @@ -9,7 +9,7 @@ use crate::{ }; use clap::Parser; use reth_db::init_db; -use reth_node_core::init::init_genesis; +use reth_db_common::init::init_genesis; use reth_primitives::ChainSpec; use reth_provider::ProviderFactory; use std::sync::Arc; diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs index ef640e01cf120..f5ee0c4b1c159 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/bin/reth/src/commands/init_state.rs @@ -10,7 +10,7 @@ use crate::{ use clap::Parser; use reth_config::config::EtlConfig; use reth_db::{database::Database, init_db}; -use reth_node_core::init::init_from_state_dump; +use reth_db_common::init::init_from_state_dump; use reth_primitives::{ChainSpec, B256}; use reth_provider::ProviderFactory; diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/bin/reth/src/commands/recover/storage_tries.rs index 025a170a035c1..583829bc39bc9 100644 --- a/bin/reth/src/commands/recover/storage_tries.rs +++ b/bin/reth/src/commands/recover/storage_tries.rs @@ -9,7 +9,8 @@ use reth_db::{ init_db, tables, transaction::DbTx, }; -use reth_node_core::{args::DatabaseArgs, init::init_genesis}; +use reth_db_common::init::init_genesis; +use reth_node_core::args::DatabaseArgs; use reth_primitives::ChainSpec; use reth_provider::{BlockNumReader, HeaderProvider, ProviderError, ProviderFactory}; use reth_trie::StateRoot; diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index 73ac898c970a0..fc3ef5768da0c 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -11,8 +11,8 @@ use crate::{ use clap::Parser; use itertools::Itertools; use reth_db::{open_db, static_file::iter_static_files, tables, transaction::DbTxMut, DatabaseEnv}; +use reth_db_common::init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}; use reth_fs_util as fs; -use reth_node_core::init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}; use reth_primitives::{ stage::StageId, static_file::find_fixed_range, ChainSpec, StaticFileSegment, }; diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index c24060943a832..b17aa0092e50f 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -35,11 +35,8 @@ reth-network-api.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true reth-tasks.workspace = true -reth-trie.workspace = true reth-consensus-common.workspace = true reth-beacon-consensus.workspace = true -reth-etl.workspace = true -reth-codecs.workspace = true # ethereum discv5.workspace = true diff --git a/crates/node-core/src/lib.rs b/crates/node-core/src/lib.rs index 024467ab16c05..956b3ad3c3acc 100644 --- a/crates/node-core/src/lib.rs +++ b/crates/node-core/src/lib.rs @@ -13,7 +13,6 @@ pub mod cli; pub mod dirs; pub mod engine; pub mod exit; -pub mod init; pub mod metrics; pub mod node_config; pub mod utils; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 77f2cd2e8856a..55b0094a63163 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true +reth-db-common.workspace = true reth-exex.workspace = true reth-evm.workspace = true reth-provider.workspace = true diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 8a5d8e519005b..b6b0a03c83f71 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -9,11 +9,11 @@ use tokio::sync::mpsc::Receiver; use reth_auto_seal_consensus::MiningMode; use reth_config::{config::EtlConfig, PruneConfig}; use reth_db::{database::Database, database_metrics::DatabaseMetrics}; +use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_interfaces::p2p::headers::client::HeadersClient; use reth_node_core::{ cli::config::RethRpcConfig, dirs::{ChainPath, DataDirPath}, - init::{init_genesis, InitDatabaseError}, node_config::NodeConfig, }; use reth_primitives::{BlockNumber, Chain, ChainSpec, Head, PruneModes, B256}; diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml new file mode 100644 index 0000000000000..675dde4ba5916 --- /dev/null +++ b/crates/storage/db-common/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "reth-db-common" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true +reth-db = { workspace = true, features = ["mdbx"] } +reth-interfaces = { workspace = true, features = ["clap"] } +reth-provider.workspace = true +reth-config.workspace = true +reth-trie.workspace = true +reth-etl.workspace = true +reth-codecs.workspace = true + +# misc +eyre.workspace = true +thiserror.workspace = true + +# io +serde.workspace = true +serde_json.workspace = true + +# tracing +tracing.workspace = true + +[lints] +workspace = true \ No newline at end of file diff --git a/crates/node-core/src/init.rs b/crates/storage/db-common/src/init.rs similarity index 100% rename from crates/node-core/src/init.rs rename to crates/storage/db-common/src/init.rs diff --git a/crates/storage/db-common/src/lib.rs b/crates/storage/db-common/src/lib.rs new file mode 100644 index 0000000000000..abcbc62762a4b --- /dev/null +++ b/crates/storage/db-common/src/lib.rs @@ -0,0 +1,11 @@ +//! Common db operations + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod init; From df83befcff788b18518f88e80c42330dac89fc97 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 25 May 2024 13:34:28 +0200 Subject: [PATCH 626/700] chore: replace reth-interface usage (#8394) --- Cargo.lock | 2 +- crates/optimism/evm/Cargo.toml | 2 +- crates/optimism/evm/src/l1.rs | 8 +++----- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f4dce0685601..987c4ad46af55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7009,7 +7009,7 @@ version = "0.2.0-beta.7" dependencies = [ "reth-consensus-common", "reth-evm", - "reth-interfaces", + "reth-execution-errors", "reth-optimism-consensus", "reth-primitives", "reth-provider", diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index f9008dc37f363..0423f1bd7dd6c 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -15,7 +15,7 @@ workspace = true reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true -reth-interfaces.workspace = true +reth-execution-errors.workspace = true reth-provider.workspace = true reth-consensus-common.workspace = true diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 66093e857b4ab..82fbb06e921d4 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,7 +1,7 @@ //! Optimism-specific implementation and utilities for the executor use crate::OptimismBlockExecutionError; -use reth_interfaces::{executor::BlockExecutionError, RethError}; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{address, b256, hex, Address, Block, Bytes, ChainSpec, Hardfork, B256, U256}; use revm::{ primitives::{Bytecode, HashMap, SpecId}, @@ -232,7 +232,7 @@ pub fn ensure_create2_deployer( chain_spec: Arc, timestamp: u64, db: &mut revm::State, -) -> Result<(), RethError> +) -> Result<(), DB::Error> where DB: revm::Database, { @@ -246,9 +246,7 @@ where trace!(target: "evm", "Forcing create2 deployer contract deployment on Canyon transition"); // Load the create2 deployer account from the cache. - let acc = db - .load_cache_account(CREATE_2_DEPLOYER_ADDR) - .map_err(|_| RethError::Custom("Failed to load account".to_string()))?; + let acc = db.load_cache_account(CREATE_2_DEPLOYER_ADDR)?; // Update the account info with the create2 deployer codehash and bytecode. let mut acc_info = acc.account_info().unwrap_or_default(); From a6cfa2c089214cf6ee7a54a5c62ffaa0df4df522 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 25 May 2024 14:36:01 +0200 Subject: [PATCH 627/700] chore: remove rpc-layer dep (#8395) --- Cargo.lock | 2 +- crates/node-core/Cargo.toml | 4 +++- crates/node-core/src/args/rpc_server.rs | 2 +- crates/node-core/src/cli/config.rs | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 987c4ad46af55..c4d22bfaa1e3b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7361,6 +7361,7 @@ dependencies = [ name = "reth-node-core" version = "0.2.0-beta.7" dependencies = [ + "alloy-rpc-types-engine", "assert_matches", "clap", "const-str", @@ -7401,7 +7402,6 @@ dependencies = [ "reth-rpc-api", "reth-rpc-builder", "reth-rpc-engine-api", - "reth-rpc-layer", "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index b17aa0092e50f..d762fcba7f501 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -24,7 +24,6 @@ reth-rpc.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } -reth-rpc-layer.workspace = true reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true @@ -38,6 +37,9 @@ reth-tasks.workspace = true reth-consensus-common.workspace = true reth-beacon-consensus.workspace = true +# ethereum +alloy-rpc-types-engine.workspace = true + # ethereum discv5.workspace = true diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node-core/src/args/rpc_server.rs index f67ef6acb74fd..84fcb4ab1f97a 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node-core/src/args/rpc_server.rs @@ -8,6 +8,7 @@ use crate::{ cli::config::RethRpcConfig, utils::get_or_create_jwt_secret_from_path, }; +use alloy_rpc_types_engine::{JwtError, JwtSecret}; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, @@ -31,7 +32,6 @@ use reth_rpc_builder::{ RpcServerConfig, RpcServerHandle, ServerBuilder, TransportRpcModuleConfig, }; use reth_rpc_engine_api::EngineApi; -use reth_rpc_layer::{JwtError, JwtSecret}; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use std::{ diff --git a/crates/node-core/src/cli/config.rs b/crates/node-core/src/cli/config.rs index 4583832012834..6e5d1f6a2a8b6 100644 --- a/crates/node-core/src/cli/config.rs +++ b/crates/node-core/src/cli/config.rs @@ -1,5 +1,6 @@ //! Config traits for various node components. +use alloy_rpc_types_engine::{JwtError, JwtSecret}; use reth_network::protocol::IntoRlpxSubProtocol; use reth_primitives::Bytes; use reth_rpc::eth::{cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig}; @@ -7,7 +8,6 @@ use reth_rpc_builder::{ auth::AuthServerConfig, error::RpcError, EthConfig, Identity, IpcServerBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig, }; -use reth_rpc_layer::{JwtError, JwtSecret}; use reth_transaction_pool::PoolConfig; use std::{borrow::Cow, path::PathBuf, time::Duration}; From 72f2f1b850a9454789f3ebb3f6efccaee4125659 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 25 May 2024 14:58:14 +0200 Subject: [PATCH 628/700] chore: rm unused functions (#8396) --- Cargo.lock | 3 - crates/node-core/Cargo.toml | 4 - crates/node-core/src/args/rpc_server.rs | 100 +----------------------- 3 files changed, 3 insertions(+), 104 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c4d22bfaa1e3b..5486a74f38ea0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7389,19 +7389,16 @@ dependencies = [ "reth-discv4", "reth-discv5", "reth-engine-primitives", - "reth-evm", "reth-fs-util", "reth-interfaces", "reth-metrics", "reth-net-nat", "reth-network", - "reth-network-api", "reth-primitives", "reth-provider", "reth-rpc", "reth-rpc-api", "reth-rpc-builder", - "reth-rpc-engine-api", "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index d762fcba7f501..787c68c9d1b66 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -18,7 +18,6 @@ reth-db = { workspace = true, features = ["mdbx"] } reth-interfaces = { workspace = true, features = ["clap"] } reth-provider.workspace = true reth-network = { workspace = true, features = ["serde"] } -reth-rpc-engine-api.workspace = true reth-rpc-builder.workspace = true reth-rpc.workspace = true reth-rpc-types.workspace = true @@ -30,8 +29,6 @@ reth-config.workspace = true reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true -reth-network-api.workspace = true -reth-evm.workspace = true reth-engine-primitives.workspace = true reth-tasks.workspace = true reth-consensus-common.workspace = true @@ -104,7 +101,6 @@ assert_matches.workspace = true optimism = [ "reth-primitives/optimism", "reth-rpc/optimism", - "reth-rpc-engine-api/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-beacon-consensus/optimism", diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node-core/src/args/rpc_server.rs index 84fcb4ab1f97a..9bf433b7aa438 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node-core/src/args/rpc_server.rs @@ -14,26 +14,14 @@ use clap::{ Arg, Args, Command, }; use rand::Rng; -use reth_engine_primitives::EngineTypes; -use reth_evm::ConfigureEvm; -use reth_network_api::{NetworkInfo, Peers}; -use reth_provider::{ - AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, HeaderProvider, StateProviderFactory, -}; use reth_rpc::eth::{ cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig, RPC_DEFAULT_GAS_CAP, }; use reth_rpc_builder::{ - auth::{AuthServerConfig, AuthServerHandle}, - constants, - error::RpcError, - EthConfig, Identity, IpcServerBuilder, RethRpcModule, RpcModuleConfig, RpcModuleSelection, - RpcServerConfig, RpcServerHandle, ServerBuilder, TransportRpcModuleConfig, + auth::AuthServerConfig, constants, error::RpcError, EthConfig, Identity, IpcServerBuilder, + RethRpcModule, RpcModuleConfig, RpcModuleSelection, RpcServerConfig, ServerBuilder, + TransportRpcModuleConfig, }; -use reth_rpc_engine_api::EngineApi; -use reth_tasks::TaskSpawner; -use reth_transaction_pool::TransactionPool; use std::{ ffi::OsStr, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -277,88 +265,6 @@ impl RpcServerArgs { self = self.with_ipc_random_path(); self } - - /// Convenience function for starting a rpc server with configs which extracted from cli args. - pub async fn start_rpc_server( - &self, - provider: Provider, - pool: Pool, - network: Network, - executor: Tasks, - events: Events, - evm_config: EvmConfig, - ) -> Result - where - Provider: BlockReaderIdExt - + AccountReader - + HeaderProvider - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, - Network: NetworkInfo + Peers + Clone + 'static, - Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, - { - reth_rpc_builder::launch( - provider, - pool, - network, - self.transport_rpc_module_config(), - self.rpc_server_config(), - executor, - events, - evm_config, - ) - .await - } - - /// Create Engine API server. - #[allow(clippy::too_many_arguments)] - pub async fn start_auth_server( - &self, - provider: Provider, - pool: Pool, - network: Network, - executor: Tasks, - engine_api: EngineApi, - jwt_secret: JwtSecret, - evm_config: EvmConfig, - ) -> Result - where - Provider: BlockReaderIdExt - + ChainSpecProvider - + EvmEnvProvider - + HeaderProvider - + StateProviderFactory - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, - Network: NetworkInfo + Peers + Clone + 'static, - Tasks: TaskSpawner + Clone + 'static, - EngineT: EngineTypes + 'static, - EvmConfig: ConfigureEvm + 'static, - { - let socket_address = SocketAddr::new(self.auth_addr, self.auth_port); - - reth_rpc_builder::auth::launch( - provider, - pool, - network, - executor, - engine_api, - socket_address, - jwt_secret, - evm_config, - ) - .await - } } impl RethRpcConfig for RpcServerArgs { From 50590aa18c9c4b2ce0f726f55f3aaf8d304817b3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 25 May 2024 18:11:48 +0200 Subject: [PATCH 629/700] chore: rm redundant pin (#8397) --- crates/net/network/src/manager.rs | 23 ++++++++++++----------- crates/node/builder/src/builder/mod.rs | 2 +- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index b6b1d4d1ecbc7..3f7b77b424bf4 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -53,7 +53,7 @@ use reth_tokio_util::EventSender; use secp256k1::SecretKey; use std::{ net::SocketAddr, - pin::{pin, Pin}, + pin::Pin, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, @@ -895,25 +895,26 @@ where { /// Drives the [NetworkManager] future until a [GracefulShutdown] signal is received. /// - /// This also run the given function `shutdown_hook` afterwards. - pub async fn run_until_graceful_shutdown( - self, + /// This invokes the given function `shutdown_hook` while holding the graceful shutdown guard. + pub async fn run_until_graceful_shutdown( + mut self, shutdown: GracefulShutdown, - shutdown_hook: impl FnOnce(&mut Self), - ) { - let network = self; - let mut network = pin!(network); - + shutdown_hook: F, + ) -> R + where + F: FnOnce(Self) -> R, + { let mut graceful_guard = None; tokio::select! { - _ = &mut network => {}, + _ = &mut self => {}, guard = shutdown => { graceful_guard = Some(guard); }, } - shutdown_hook(&mut network); + let res = shutdown_hook(self); drop(graceful_guard); + res } } diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index b6f0a191e3e53..c28e435798497 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -544,7 +544,7 @@ impl BuilderContext { "p2p network task", |shutdown| { network.run_until_graceful_shutdown(shutdown, |network| { - write_peers_to_file(network, known_peers_file) + write_peers_to_file(&network, known_peers_file) }) }, ); From 2d33c17bc002db80e28aa56fcfeda718e393bac4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 26 May 2024 08:23:52 +0000 Subject: [PATCH 630/700] chore(deps): weekly `cargo update` (#8401) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 250 +++++++++++++++++++++++++++-------------------------- 1 file changed, 127 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5486a74f38ea0..099fe08d50fa6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" +source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -195,7 +195,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" +source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -220,7 +220,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" +source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -359,7 +359,7 @@ checksum = "8037e03c7f462a063f28daec9fda285a9a89da003c552f8637a80b9c8fd96241" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -407,7 +407,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" +source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -488,7 +488,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#092efd72526cc88b08f930a3e67358283ca11eb6" +source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" dependencies = [ "alloy-primitives", "serde", @@ -536,7 +536,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -553,7 +553,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", "syn-solidity", "tiny-keccak", ] @@ -571,7 +571,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.64", + "syn 2.0.66", "syn-solidity", ] @@ -736,7 +736,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -979,7 +979,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -996,7 +996,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -1034,7 +1034,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -1163,7 +1163,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.64", + "syn 2.0.66", "which", ] @@ -1374,7 +1374,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", "synstructure", ] @@ -1492,13 +1492,13 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" +checksum = "369cfaf2a5bed5d8f8202073b2e093c9f508251de1551a0deb4253e4c7d80909" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -1585,9 +1585,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" dependencies = [ "jobserver", "libc", @@ -1712,7 +1712,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -1946,9 +1946,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] @@ -1993,9 +1993,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ "crossbeam-utils", ] @@ -2021,9 +2021,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crossterm" @@ -2035,7 +2035,7 @@ dependencies = [ "crossterm_winapi", "libc", "mio", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "signal-hook", "signal-hook-mio", "winapi", @@ -2154,7 +2154,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -2287,7 +2287,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -2309,7 +2309,7 @@ checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core 0.20.9", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -2426,7 +2426,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -2578,7 +2578,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -2775,7 +2775,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -2788,7 +2788,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -2799,7 +2799,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -3114,7 +3114,7 @@ dependencies = [ [[package]] name = "foundry-blob-explorers" version = "0.1.0" -source = "git+https://github.com/foundry-rs/block-explorers#adcb750e8d8e57f7decafca433118bf7836ffd55" +source = "git+https://github.com/foundry-rs/block-explorers#1b024125d8327595f67f18a60ac29c49056c3a6d" dependencies = [ "alloy-chains", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -3220,7 +3220,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -3789,9 +3789,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +checksum = "3d8d52be92d09acc2e01dddb7fde3ad983fc6489c7db4837e605bc3fca4cb63e" dependencies = [ "bytes", "futures-channel", @@ -3830,7 +3830,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -3912,9 +3912,9 @@ checksum = "545c6c3e8bf9580e2dafee8de6f9ec14826aaf359787789c7724f1f85f47d3dc" [[package]] name = "icu_normalizer" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c183e31ed700f1ecd6b032d104c52fe8b15d028956b73727c97ec176b170e187" +checksum = "183072b0ba2f336279c830a3d594a04168494a726c3c94b50c53d788178cf2c2" dependencies = [ "displaydoc", "icu_collections", @@ -3930,15 +3930,15 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22026918a80e6a9a330cb01b60f950e2b4e5284c59528fd0c6150076ef4c8522" +checksum = "e3744fecc0df9ce19999cdaf1f9f3a48c253431ce1d67ef499128fe9d0b607ab" [[package]] name = "icu_properties" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976e296217453af983efa25f287a4c1da04b9a63bf1ed63719455068e4453eb5" +checksum = "3a89401989d8fdf571b829ce1022801367ec89affc7b1e162b79eff4ae029e69" dependencies = [ "displaydoc", "icu_collections", @@ -3951,9 +3951,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6a86c0e384532b06b6c104814f9c1b13bcd5b64409001c0d05713a1f3529d99" +checksum = "e70a8b51ee5dd4ff8f20ee9b1dd1bc07afc110886a3747b1fec04cc6e5a15815" [[package]] name = "icu_provider" @@ -3980,7 +3980,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -4124,12 +4124,6 @@ dependencies = [ "serde", ] -[[package]] -name = "indoc" -version = "2.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" - [[package]] name = "infer" version = "0.2.3" @@ -4351,7 +4345,7 @@ dependencies = [ "futures-util", "hyper 0.14.28", "jsonrpsee-types", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "pin-project", "rand 0.8.5", "rustc-hash", @@ -4394,7 +4388,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -4567,7 +4561,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -4639,7 +4633,7 @@ dependencies = [ "multihash", "multistream-select", "once_cell", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "pin-project", "quick-protobuf", "rand 0.8.5", @@ -4957,7 +4951,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -5081,7 +5075,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -5367,7 +5361,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -5503,9 +5497,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core 0.9.10", @@ -5633,7 +5627,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -5662,7 +5656,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -5721,9 +5715,9 @@ checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" dependencies = [ "num-traits", "plotters-backend", @@ -5734,15 +5728,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" dependencies = [ "plotters-backend", ] @@ -5807,7 +5801,7 @@ dependencies = [ "log", "nix", "once_cell", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "smallvec", "symbolic-demangle", "tempfile", @@ -5853,7 +5847,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -5904,9 +5898,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6" dependencies = [ "unicode-ident", ] @@ -6136,21 +6130,21 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80" +checksum = "f44c9e68fd46eda15c646fbb85e1040b657a58cdc8c98db1d97a55930d991eef" dependencies = [ "bitflags 2.5.0", "cassowary", "compact_str", "crossterm", - "indoc", "itertools 0.12.1", "lru", "paste", "stability", "strum", "unicode-segmentation", + "unicode-truncate", "unicode-width", ] @@ -6530,7 +6524,7 @@ dependencies = [ "assert_matches", "linked_hash_set", "metrics", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "reth-consensus", "reth-db", "reth-evm", @@ -6585,7 +6579,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -6688,7 +6682,7 @@ dependencies = [ "discv5", "enr", "generic-array", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "rand 0.8.5", "reth-net-common", "reth-net-nat", @@ -6735,7 +6729,7 @@ dependencies = [ "data-encoding", "enr", "linked_hash_set", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "rand 0.8.5", "reth-net-common", "reth-network-types", @@ -6983,7 +6977,7 @@ name = "reth-evm" version = "0.2.0-beta.7" dependencies = [ "futures-util", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "reth-execution-errors", "reth-primitives", "reth-storage-errors", @@ -7106,7 +7100,7 @@ dependencies = [ "indexmap 2.2.6", "libc", "libffi", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "pprof", "rand 0.8.5", "rand_xorshift", @@ -7146,7 +7140,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.64", + "syn 2.0.66", "trybuild", ] @@ -7193,7 +7187,7 @@ dependencies = [ "itertools 0.12.1", "linked_hash_set", "metrics", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "pin-project", "pprof", "rand 0.8.5", @@ -7250,7 +7244,7 @@ version = "0.2.0-beta.7" dependencies = [ "auto_impl", "futures", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "rand 0.8.5", "reth-consensus", "reth-eth-wire-types", @@ -7476,7 +7470,7 @@ dependencies = [ "eyre", "hyper 0.14.28", "jsonrpsee", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "reqwest 0.12.4", "reth", "reth-basic-payload-builder", @@ -7629,7 +7623,7 @@ dependencies = [ "dashmap", "itertools 0.12.1", "metrics", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "pin-project", "rand 0.8.5", "rayon", @@ -7709,7 +7703,7 @@ dependencies = [ "jsonrpsee", "jsonwebtoken 8.3.0", "metrics", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "pin-project", "rand 0.8.5", "reth-consensus-common", @@ -7957,7 +7951,7 @@ name = "reth-static-file" version = "0.2.0-beta.7" dependencies = [ "assert_matches", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "rayon", "reth-db", "reth-interfaces", @@ -8054,7 +8048,7 @@ dependencies = [ "futures-util", "itertools 0.12.1", "metrics", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "paste", "pprof", "proptest", @@ -8730,9 +8724,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.202" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] @@ -8748,13 +8742,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.202" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -8828,7 +8822,7 @@ dependencies = [ "darling 0.20.9", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -8840,7 +8834,7 @@ dependencies = [ "futures", "log", "once_cell", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "scc", "serial_test_derive", ] @@ -8853,7 +8847,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -9137,7 +9131,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -9189,7 +9183,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -9257,9 +9251,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.64" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ad3dee41f36859875573074334c200d1add8e4a87bb37113ebd31d926b7b11f" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ "proc-macro2", "quote", @@ -9275,7 +9269,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -9292,7 +9286,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -9384,7 +9378,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -9423,7 +9417,7 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -9571,7 +9565,7 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", "socket2 0.5.7", @@ -9587,7 +9581,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -9778,7 +9772,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -9959,7 +9953,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "rand 0.8.5", "resolv-conf", "smallvec", @@ -10066,6 +10060,16 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +[[package]] +name = "unicode-truncate" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5fbabedabe362c618c714dbefda9927b5afc8e2a8102f47f081089a9019226" +dependencies = [ + "itertools 0.12.1", + "unicode-width", +] + [[package]] name = "unicode-width" version = "0.1.12" @@ -10250,7 +10254,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", "wasm-bindgen-shared", ] @@ -10284,7 +10288,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10637,7 +10641,7 @@ checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", "synstructure", ] @@ -10658,7 +10662,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -10678,15 +10682,15 @@ checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", "synstructure", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] @@ -10699,7 +10703,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] @@ -10721,7 +10725,7 @@ checksum = "7b4e5997cbf58990550ef1f0e5124a05e47e1ebd33a84af25739be6031a62c20" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.66", ] [[package]] From 4dd2ad99f62e03dc08435f879f4641f59f631901 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 12:22:13 +0200 Subject: [PATCH 631/700] chore: extract blockchaintree types to blockchain-tree-api crate (#8393) --- Cargo.lock | 15 +++++++++- Cargo.toml | 2 ++ crates/blockchain-tree-api/Cargo.toml | 20 +++++++++++++ .../src}/error.rs | 23 +-------------- .../mod.rs => blockchain-tree-api/src/lib.rs} | 21 ++++++++++---- crates/blockchain-tree/Cargo.toml | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 8 ++--- crates/blockchain-tree/src/chain.rs | 8 ++--- crates/blockchain-tree/src/lib.rs | 3 ++ crates/blockchain-tree/src/noop.rs | 19 +++++------- crates/blockchain-tree/src/shareable.rs | 19 +++++------- crates/consensus/beacon/src/engine/mod.rs | 2 +- crates/interfaces/Cargo.toml | 2 +- crates/interfaces/src/lib.rs | 2 +- crates/storage/provider/Cargo.toml | 1 + crates/storage/provider/src/providers/mod.rs | 29 +++++++++---------- 16 files changed, 98 insertions(+), 78 deletions(-) create mode 100644 crates/blockchain-tree-api/Cargo.toml rename crates/{interfaces/src/blockchain_tree => blockchain-tree-api/src}/error.rs (93%) rename crates/{interfaces/src/blockchain_tree/mod.rs => blockchain-tree-api/src/lib.rs} (95%) diff --git a/Cargo.lock b/Cargo.lock index 099fe08d50fa6..9aaedd5301233 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6525,6 +6525,7 @@ dependencies = [ "linked_hash_set", "metrics", "parking_lot 0.12.3", + "reth-blockchain-tree-api", "reth-consensus", "reth-db", "reth-evm", @@ -6544,6 +6545,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-blockchain-tree-api" +version = "0.2.0-beta.7" +dependencies = [ + "reth-consensus", + "reth-execution-errors", + "reth-primitives", + "reth-storage-errors", + "thiserror", +] + [[package]] name = "reth-cli-runner" version = "0.2.0-beta.7" @@ -7056,12 +7068,12 @@ dependencies = [ name = "reth-interfaces" version = "0.2.0-beta.7" dependencies = [ + "reth-blockchain-tree-api", "reth-consensus", "reth-execution-errors", "reth-fs-util", "reth-network-api", "reth-network-p2p", - "reth-primitives", "reth-storage-errors", "thiserror", ] @@ -7627,6 +7639,7 @@ dependencies = [ "pin-project", "rand 0.8.5", "rayon", + "reth-blockchain-tree-api", "reth-codecs", "reth-db", "reth-evm", diff --git a/Cargo.toml b/Cargo.toml index 6f812253ea253..a61d1ca4ba5db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ members = [ "bin/reth/", "crates/blockchain-tree/", + "crates/blockchain-tree-api/", "crates/cli/runner/", "crates/config/", "crates/consensus/auto-seal/", @@ -216,6 +217,7 @@ reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-blockchain-tree = { path = "crates/blockchain-tree" } +reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } reth-codecs-derive = { path = "crates/storage/codecs/derive" } diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml new file mode 100644 index 0000000000000..69616209dd1cb --- /dev/null +++ b/crates/blockchain-tree-api/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "reth-blockchain-tree-api" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-consensus.workspace = true +reth-execution-errors.workspace = true +reth-primitives.workspace = true +reth-storage-errors.workspace = true + +# misc +thiserror.workspace = true diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/blockchain-tree-api/src/error.rs similarity index 93% rename from crates/interfaces/src/blockchain_tree/error.rs rename to crates/blockchain-tree-api/src/error.rs index 122b857437e25..c48a97676983e 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/blockchain-tree-api/src/error.rs @@ -1,10 +1,9 @@ //! Error handling for the blockchain tree -use crate::RethError; use reth_consensus::ConsensusError; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_primitives::{BlockHash, BlockNumber, SealedBlock}; -use reth_storage_errors::provider::ProviderError; +pub use reth_storage_errors::provider::ProviderError; /// Various error cases that can occur when a block violates tree assumptions. #[derive(Debug, Clone, Copy, thiserror::Error, Eq, PartialEq)] @@ -133,11 +132,6 @@ impl InsertBlockError { Self::new(block, InsertBlockErrorKind::Execution(error)) } - /// Create a new InsertBlockError from a RethError and block. - pub fn from_reth_error(error: RethError, block: SealedBlock) -> Self { - Self::new(block, error.into()) - } - /// Consumes the error and returns the block that resulted in the error #[inline] pub fn into_block(self) -> SealedBlock { @@ -383,18 +377,3 @@ impl InsertBlockErrorKind { } } } - -// This is a convenience impl to convert from crate::Error to InsertBlockErrorKind -impl From for InsertBlockErrorKind { - fn from(err: RethError) -> Self { - match err { - RethError::Execution(err) => InsertBlockErrorKind::Execution(err), - RethError::Consensus(err) => InsertBlockErrorKind::Consensus(err), - RethError::Database(err) => InsertBlockErrorKind::Internal(Box::new(err)), - RethError::Provider(err) => InsertBlockErrorKind::Internal(Box::new(err)), - RethError::Network(err) => InsertBlockErrorKind::Internal(Box::new(err)), - RethError::Custom(err) => InsertBlockErrorKind::Internal(err.into()), - RethError::Canonical(err) => InsertBlockErrorKind::Canonical(err), - } - } -} diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/blockchain-tree-api/src/lib.rs similarity index 95% rename from crates/interfaces/src/blockchain_tree/mod.rs rename to crates/blockchain-tree-api/src/lib.rs index 0c1a9553dc30b..113e951e6a790 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -1,12 +1,21 @@ -use crate::{blockchain_tree::error::InsertBlockError, provider::ProviderError, RethResult}; +//! Interfaces and types for interacting with the blockchain tree. +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use self::error::CanonicalError; +use crate::error::InsertBlockError; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, }; +use reth_storage_errors::provider::ProviderError; use std::collections::{BTreeMap, HashSet}; -use self::error::CanonicalError; - pub mod error; /// * [BlockchainTreeEngine::insert_block]: Connect block to chain, execute it and if valid insert @@ -76,21 +85,21 @@ pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, last_finalized_block: BlockNumber, - ) -> RethResult<()>; + ) -> Result<(), CanonicalError>; /// Update all block hashes. iterate over present and new list of canonical hashes and compare /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered /// blocks before the tip. fn update_block_hashes_and_clear_buffered( &self, - ) -> RethResult>; + ) -> Result, CanonicalError>; /// Reads the last `N` canonical hashes from the database and updates the block indices of the /// tree by attempting to connect the buffered blocks to canonical hashes. /// /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the /// `BLOCKHASH` opcode in the EVM. - fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()>; + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError>; /// Make a block and its parent chain part of the canonical chain by committing it to the /// database. diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 1b8a53394b636..58ee1cda5ac67 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -12,8 +12,8 @@ workspace = true [dependencies] # reth +reth-blockchain-tree-api.workspace = true reth-primitives.workspace = true -reth-interfaces.workspace = true reth-storage-errors.workspace = true reth-execution-errors.workspace = true reth-db.workspace = true diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index c031a5749bfd6..dc256eedf295f 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -5,14 +5,14 @@ use crate::{ state::{BlockchainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, BundleStateData, TreeExternals, }; +use reth_blockchain_tree_api::{ + error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, + BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, +}; use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_evm::execute::BlockExecutorProvider; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_interfaces::blockchain_tree::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, -}; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index e73b1757666ea..e8e40cb41ef9b 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -5,14 +5,14 @@ use super::externals::TreeExternals; use crate::BundleStateDataRef; +use reth_blockchain_tree_api::{ + error::{BlockchainTreeError, InsertBlockErrorKind}, + BlockAttachment, BlockValidationKind, +}; use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; -use reth_interfaces::blockchain_tree::{ - error::{BlockchainTreeError, InsertBlockErrorKind}, - BlockAttachment, BlockValidationKind, -}; use reth_primitives::{ BlockHash, BlockNumber, ForkBlock, GotExpected, Receipts, SealedBlockWithSenders, SealedHeader, U256, diff --git a/crates/blockchain-tree/src/lib.rs b/crates/blockchain-tree/src/lib.rs index 1ae44b85a7dcb..6f5717abdd2e3 100644 --- a/crates/blockchain-tree/src/lib.rs +++ b/crates/blockchain-tree/src/lib.rs @@ -18,6 +18,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] +/// Re-export of the blockchain tree API. +pub use reth_blockchain_tree_api::*; + pub mod blockchain_tree; pub use blockchain_tree::BlockchainTree; diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 18423d3bb7e63..f4d27272f4411 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -1,11 +1,8 @@ -use reth_interfaces::{ - blockchain_tree::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, - }, - provider::ProviderError, - RethResult, +use reth_blockchain_tree_api::{ + self, + error::{BlockchainTreeError, CanonicalError, InsertBlockError, ProviderError}, + BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, + InsertPayloadOk, }; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, Receipt, SealedBlock, SealedBlockWithSenders, @@ -57,11 +54,11 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, _last_finalized_block: BlockNumber, - ) -> RethResult<()> { + ) -> Result<(), CanonicalError> { Ok(()) } - fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()> { + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { Ok(()) } @@ -71,7 +68,7 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn update_block_hashes_and_clear_buffered( &self, - ) -> RethResult> { + ) -> Result, CanonicalError> { Ok(BTreeMap::new()) } } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 624dfd0e3afba..52a98e84dac3e 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -2,16 +2,13 @@ use super::BlockchainTree; use parking_lot::RwLock; +use reth_blockchain_tree_api::{ + error::{CanonicalError, InsertBlockError}, + BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, + InsertPayloadOk, +}; use reth_db::database::Database; use reth_evm::execute::BlockExecutorProvider; -use reth_interfaces::{ - blockchain_tree::{ - error::{CanonicalError, InsertBlockError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, - }, - RethResult, -}; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, @@ -74,7 +71,7 @@ where fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, last_finalized_block: BlockNumber, - ) -> RethResult<()> { + ) -> Result<(), CanonicalError> { trace!(target: "blockchain_tree", last_finalized_block, "Connecting buffered blocks to canonical hashes and finalizing the tree"); let mut tree = self.tree.write(); let res = @@ -85,14 +82,14 @@ where fn update_block_hashes_and_clear_buffered( &self, - ) -> RethResult> { + ) -> Result, CanonicalError> { let mut tree = self.tree.write(); let res = tree.update_block_hashes_and_clear_buffered(); tree.update_chains_metrics(); Ok(res?) } - fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()> { + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes"); let mut tree = self.tree.write(); let res = tree.connect_buffered_blocks_to_canonical_hashes(); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 5f7f583902dd0..c1ef6228746e4 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1656,7 +1656,7 @@ where self.blockchain.connect_buffered_blocks_to_canonical_hashes() { error!(target: "consensus::engine", %error, "Error connecting buffered blocks to canonical hashes on hook result"); - return Err(error.into()) + return Err(RethError::Canonical(error).into()) } } } diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 1d7483691bf48..a5c01ecb92c7e 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -11,12 +11,12 @@ repository.workspace = true workspace = true [dependencies] +reth-blockchain-tree-api.workspace = true reth-consensus.workspace = true reth-execution-errors.workspace = true reth-fs-util.workspace = true reth-network-api.workspace = true reth-network-p2p.workspace = true -reth-primitives.workspace = true reth-storage-errors.workspace = true # misc diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index 461413a1e2f1c..651283bb8bd67 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -32,7 +32,7 @@ pub use reth_execution_errors::trie; pub use reth_network_p2p::sync; /// BlockchainTree related traits. -pub mod blockchain_tree; +pub use reth_blockchain_tree_api as blockchain_tree; /// Common test helpers for mocking out Consensus, Downloaders and Header Clients. #[cfg(feature = "test-utils")] diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index d9a555161988a..c9eb5f7e378d4 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-blockchain-tree-api.workspace = true reth-execution-errors.workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index f9969a9500dcc..d6a7d34c816ac 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -7,20 +7,16 @@ use crate::{ StateProviderBox, StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; +use reth_blockchain_tree_api::{ + error::{CanonicalError, InsertBlockError}, + BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, + InsertPayloadOk, +}; use reth_db::{ database::Database, models::{AccountBeforeTx, StoredBlockBodyIndices}, }; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::{ - blockchain_tree::{ - error::{CanonicalError, InsertBlockError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, - }, - provider::ProviderResult, - RethResult, -}; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumber, @@ -29,6 +25,7 @@ use reth_primitives::{ TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; +use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ collections::{BTreeMap, HashSet}, @@ -669,18 +666,20 @@ where self.tree.finalize_block(finalized_block) } - fn update_block_hashes_and_clear_buffered(&self) -> RethResult> { - self.tree.update_block_hashes_and_clear_buffered() - } - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, last_finalized_block: BlockNumber, - ) -> RethResult<()> { + ) -> Result<(), CanonicalError> { self.tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block) } - fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()> { + fn update_block_hashes_and_clear_buffered( + &self, + ) -> Result, CanonicalError> { + self.tree.update_block_hashes_and_clear_buffered() + } + + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { self.tree.connect_buffered_blocks_to_canonical_hashes() } From 89e55c4830a90092bd3a3fb7f9155dfea9d2749b Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 27 May 2024 12:23:12 +0200 Subject: [PATCH 632/700] feat: reset trie updates on make_canonical (#8370) --- crates/blockchain-tree/src/blockchain_tree.rs | 17 +++++++++++++++++ crates/blockchain-tree/src/metrics.rs | 5 +++++ crates/storage/provider/src/chain.rs | 5 +++++ 3 files changed, 27 insertions(+) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index dc256eedf295f..3b51ee6fae317 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1192,6 +1192,23 @@ where "Canonicalization finished" ); + // clear trie updates for other childs + self.block_indices() + .fork_to_child() + .get(&old_tip.hash) + .cloned() + .unwrap_or_default() + .into_iter() + .for_each(|child| { + if let Some(chain_id) = self.block_indices().get_blocks_chain_id(&child) { + if let Some(chain) = self.state.chains.get_mut(&chain_id) { + chain.clear_trie_updates(); + } + } + }); + + durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChilds); + // Send notification about new canonical chain and return outcome of canonicalization. let outcome = CanonicalOutcome::Committed { head: chain_notification.tip().header.clone() }; let _ = self.canon_state_notification_sender.send(chain_notification); diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs index 056544354eb34..71a4475c5c585 100644 --- a/crates/blockchain-tree/src/metrics.rs +++ b/crates/blockchain-tree/src/metrics.rs @@ -85,6 +85,8 @@ pub(crate) enum MakeCanonicalAction { RevertCanonicalChainFromDatabase, /// Inserting an old canonical chain. InsertOldCanonicalChain, + /// Clearing trie updates of other childs chains after fork choice update. + ClearTrieUpdatesForOtherChilds, } impl MakeCanonicalAction { @@ -104,6 +106,9 @@ impl MakeCanonicalAction { "revert canonical chain from database" } MakeCanonicalAction::InsertOldCanonicalChain => "insert old canonical chain", + MakeCanonicalAction::ClearTrieUpdatesForOtherChilds => { + "clear trie updates of other childs chains after fork choice update" + } } } } diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index bc419aa3f0d4e..a1064c4dce0d6 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -81,6 +81,11 @@ impl Chain { self.trie_updates.as_ref() } + /// Remove cached trie updates for this chain. + pub fn clear_trie_updates(&mut self) { + self.trie_updates.take(); + } + /// Get post state of this chain pub fn state(&self) -> &BundleStateWithReceipts { &self.state From f3013e4ea4d497ccdf6463dfc299945eaacffa9c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 13:22:08 +0200 Subject: [PATCH 633/700] fix: validate received ENR response in discv4 (#8407) --- crates/net/discv4/src/lib.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 2019f58ee1609..2a8990deefa23 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -39,7 +39,7 @@ use discv5::{ use enr::Enr; use parking_lot::Mutex; use proto::{EnrRequest, EnrResponse}; -use reth_network_types::PeerId; +use reth_network_types::{pk2id, PeerId}; use reth_primitives::{bytes::Bytes, hex, ForkId, B256}; use secp256k1::SecretKey; use std::{ @@ -1238,6 +1238,12 @@ impl Discv4Service { fn on_enr_response(&mut self, msg: EnrResponse, remote_addr: SocketAddr, id: PeerId) { trace!(target: "discv4", ?remote_addr, ?msg, "received ENR response"); if let Some(resp) = self.pending_enr_requests.remove(&id) { + // ensure the ENR's public key matches the expected node id + let enr_id = pk2id(&msg.enr.public_key()); + if id != enr_id { + return + } + if resp.echo_hash == msg.request_hash { let key = kad_key(id); let fork_id = msg.eth_fork_id(); From ed926ec9b9c376d580d20c4fa9a24b3b3ded4f01 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 14:36:39 +0200 Subject: [PATCH 634/700] chore: move `generic-array` to workspace (#8404) --- Cargo.toml | 1 + crates/net/discv4/Cargo.toml | 2 +- crates/net/ecies/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a61d1ca4ba5db..0f924cfcf9da1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -337,6 +337,7 @@ dashmap = "5.5" derive_more = "0.99.17" fdlimit = "0.3.0" eyre = "0.6" +generic-array = "0.14" tracing = "0.1.0" tracing-appender = "0.2" thiserror = "1.0" diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 49e9b4ecc2e0f..13ef81f4408cd 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -37,7 +37,7 @@ tracing.workspace = true thiserror.workspace = true parking_lot.workspace = true rand = { workspace = true, optional = true } -generic-array = "0.14" +generic-array.workspace = true serde = { workspace = true, optional = true } [dev-dependencies] diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index d4a4de32aceed..6dbf1d4c52d27 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -27,7 +27,7 @@ educe = "0.4.19" tracing.workspace = true # HeaderBytes -generic-array = "0.14.6" +generic-array.workspace = true typenum = "1.15.0" byteorder = "1.4.3" From 2e47e9fb0d72c90857c88953aaffcb2cf34b9526 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 14:27:34 +0200 Subject: [PATCH 635/700] feat: add udp packet ratelimiting (#8406) Co-authored-by: Federico Gimenez --- Cargo.lock | 1 + crates/net/discv4/Cargo.toml | 1 + crates/net/discv4/src/lib.rs | 90 +++++++++++++++++++++++++++++++++- crates/net/discv4/src/proto.rs | 2 +- 4 files changed, 92 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9aaedd5301233..60f18e1d6144f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6701,6 +6701,7 @@ dependencies = [ "reth-network-types", "reth-primitives", "reth-tracing", + "schnellru", "secp256k1 0.28.2", "serde", "thiserror", diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 13ef81f4408cd..719ec83a6894c 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -33,6 +33,7 @@ tokio = { workspace = true, features = ["io-util", "net", "time"] } tokio-stream.workspace = true # misc +schnellru.workspace = true tracing.workspace = true thiserror.workspace = true parking_lot.workspace = true diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 2a8990deefa23..8e2ff1251351d 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -45,7 +45,9 @@ use secp256k1::SecretKey; use std::{ cell::RefCell, collections::{btree_map, hash_map::Entry, BTreeMap, HashMap, VecDeque}, - fmt, io, + fmt, + future::poll_fn, + io, net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4}, pin::Pin, rc::Rc, @@ -1796,7 +1798,13 @@ pub(crate) async fn send_loop(udp: Arc, rx: EgressReceiver) { } } +/// Rate limits the number of incoming packets from individual IPs to 1 packet/second +const MAX_INCOMING_PACKETS_PER_MINUTE_BY_IP: usize = 60usize; + /// Continuously awaits new incoming messages and sends them back through the channel. +/// +/// The receive loop enforce primitive rate limiting for ips to prevent message spams from +/// individual IPs pub(crate) async fn receive_loop(udp: Arc, tx: IngressSender, local_id: PeerId) { let send = |event: IngressEvent| async { let _ = tx.send(event).await.map_err(|err| { @@ -1808,6 +1816,12 @@ pub(crate) async fn receive_loop(udp: Arc, tx: IngressSender, local_i }); }; + let mut cache = ReceiveCache::default(); + + // tick at half the rate of the limit + let tick = MAX_INCOMING_PACKETS_PER_MINUTE_BY_IP / 2; + let mut interval = tokio::time::interval(Duration::from_secs(tick as u64)); + let mut buf = [0; MAX_PACKET_SIZE]; loop { let res = udp.recv_from(&mut buf).await; @@ -1817,6 +1831,12 @@ pub(crate) async fn receive_loop(udp: Arc, tx: IngressSender, local_i send(IngressEvent::RecvError(err)).await; } Ok((read, remote_addr)) => { + // rate limit incoming packets by IP + if cache.inc_ip(remote_addr.ip()) > MAX_INCOMING_PACKETS_PER_MINUTE_BY_IP { + trace!(target: "discv4", ?remote_addr, "Too many incoming packets from IP."); + continue + } + let packet = &buf[..read]; match Message::decode(packet) { Ok(packet) => { @@ -1825,6 +1845,13 @@ pub(crate) async fn receive_loop(udp: Arc, tx: IngressSender, local_i debug!(target: "discv4", ?remote_addr, "Received own packet."); continue } + + // skip if we've already received the same packet + if cache.contains_packet(packet.hash) { + debug!(target: "discv4", ?remote_addr, "Received duplicate packet."); + continue + } + send(IngressEvent::Packet(remote_addr, packet)).await; } Err(err) => { @@ -1834,6 +1861,67 @@ pub(crate) async fn receive_loop(udp: Arc, tx: IngressSender, local_i } } } + + // reset the tracked ips if the interval has passed + if poll_fn(|cx| match interval.poll_tick(cx) { + Poll::Ready(_) => Poll::Ready(true), + Poll::Pending => Poll::Ready(false), + }) + .await + { + cache.tick_ips(tick); + } + } +} + +/// A cache for received packets and their source address. +/// +/// This is used to discard duplicated packets and rate limit messages from the same source. +struct ReceiveCache { + /// keeps track of how many messages we've received from a given IP address since the last + /// tick. + /// + /// This is used to count the number of messages received from a given IP address within an + /// interval. + ip_messages: HashMap, + // keeps track of unique packet hashes + unique_packets: schnellru::LruMap, +} + +impl ReceiveCache { + /// Updates the counter for each IP address and removes IPs that have exceeded the limit. + /// + /// This will decrement the counter for each IP address and remove IPs that have reached 0. + fn tick_ips(&mut self, tick: usize) { + self.ip_messages.retain(|_, count| { + if let Some(reset) = count.checked_sub(tick) { + *count = reset; + true + } else { + false + } + }); + } + + /// Increases the counter for the given IP address and returns the new count. + fn inc_ip(&mut self, ip: IpAddr) -> usize { + let ctn = self.ip_messages.entry(ip).or_default(); + *ctn = ctn.saturating_add(1); + *ctn + } + + /// Returns true if we previously received the packet + fn contains_packet(&mut self, hash: B256) -> bool { + !self.unique_packets.insert(hash, ()) + } +} + +impl Default for ReceiveCache { + fn default() -> Self { + Self { + ip_messages: Default::default(), + unique_packets: schnellru::LruMap::new(schnellru::ByLength::new(32)), + } } } diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 62dd9235d0f44..be26487a6907e 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -72,7 +72,7 @@ pub enum Message { impl Message { /// Returns the id for this type - pub fn msg_type(&self) -> MessageId { + pub const fn msg_type(&self) -> MessageId { match self { Message::Ping(_) => MessageId::Ping, Message::Pong(_) => MessageId::Pong, From 2bf5c930396f92fb8a89b7b84802053ba7216baf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 14:31:33 +0200 Subject: [PATCH 636/700] chore(docs): clarify tree canonical chain docs (#8408) --- crates/blockchain-tree/src/block_indices.rs | 20 ++---- crates/blockchain-tree/src/blockchain_tree.rs | 63 ++++++++++--------- crates/blockchain-tree/src/canonical_chain.rs | 28 +++------ crates/blockchain-tree/src/state.rs | 4 +- 4 files changed, 49 insertions(+), 66 deletions(-) diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 373b419b37536..875b6fbe1010e 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -17,9 +17,9 @@ use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet pub struct BlockIndices { /// Last finalized block. last_finalized_block: BlockNumber, - /// Canonical chain. Contains N number (depends on `finalization_depth`) of blocks. - /// These blocks are found in fork_to_child but not inside `blocks_to_chain` or - /// `number_to_block` as those are chain specific indices. + /// Non-finalized canonical chain. Contains N number (depends on `finalization_depth`) of + /// blocks. These blocks are found in fork_to_child but not inside `blocks_to_chain` or + /// `number_to_block` as those are sidechain specific indices. canonical_chain: CanonicalChain, /// Index needed when discarding the chain, so we can remove connected chains from tree. /// @@ -101,14 +101,6 @@ impl BlockIndices { (canonical_tip.number + 1, pending_blocks) } - /// Returns the block number of the canonical block with the given hash. - /// - /// Returns `None` if no block could be found in the canonical chain. - #[inline] - pub(crate) fn get_canonical_block_number(&self, block_hash: &BlockHash) -> Option { - self.canonical_chain.get_canonical_block_number(self.last_finalized_block, block_hash) - } - /// Last finalized block pub fn last_finalized_block(&self) -> BlockNumber { self.last_finalized_block @@ -138,8 +130,8 @@ impl BlockIndices { self.fork_to_child.entry(first.parent_hash).or_default().insert_if_absent(first.hash()); } - /// Get the chain ID the block belongs to - pub(crate) fn get_blocks_chain_id(&self, block: &BlockHash) -> Option { + /// Get the [BlockchainId] the given block belongs to if it exists. + pub(crate) fn get_block_chain_id(&self, block: &BlockHash) -> Option { self.blocks_to_chain.get(block).cloned() } @@ -370,7 +362,7 @@ impl BlockIndices { /// Returns the block number of the canonical block with the given hash. #[inline] - pub fn canonical_number(&self, block_hash: BlockHash) -> Option { + pub fn canonical_number(&self, block_hash: &BlockHash) -> Option { self.canonical_chain.canonical_number(block_hash) } diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 3b51ee6fae317..db91023149754 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -215,7 +215,7 @@ where } // is block inside chain - if let Some(attachment) = self.is_block_inside_chain(&block) { + if let Some(attachment) = self.is_block_inside_sidechain(&block) { return Ok(Some(BlockStatus::Valid(attachment))) } @@ -260,7 +260,7 @@ where } /// Returns true if the block is included in a side-chain. - fn is_block_hash_inside_chain(&self, block_hash: BlockHash) -> bool { + fn is_block_hash_inside_sidechain(&self, block_hash: BlockHash) -> bool { self.block_by_hash(block_hash).is_some() } @@ -287,7 +287,7 @@ where let canonical_chain = self.state.block_indices.canonical_chain(); // if it is part of the chain - if let Some(chain_id) = self.block_indices().get_blocks_chain_id(&block_hash) { + if let Some(chain_id) = self.block_indices().get_block_chain_id(&block_hash) { trace!(target: "blockchain_tree", ?block_hash, "Constructing post state data based on non-canonical chain"); // get block state let Some(chain) = self.state.chains.get(&chain_id) else { @@ -318,7 +318,7 @@ where } // check if there is canonical block - if let Some(canonical_number) = canonical_chain.canonical_number(block_hash) { + if let Some(canonical_number) = canonical_chain.canonical_number(&block_hash) { trace!(target: "blockchain_tree", %block_hash, "Constructing post state data based on canonical chain"); return Some(BundleStateData { canonical_fork: ForkBlock { number: canonical_number, hash: block_hash }, @@ -345,7 +345,7 @@ where let parent = block.parent_num_hash(); // check if block parent can be found in any side chain. - if let Some(chain_id) = self.block_indices().get_blocks_chain_id(&parent.hash) { + if let Some(chain_id) = self.block_indices().get_block_chain_id(&parent.hash) { // found parent in side tree, try to insert there return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind) } @@ -358,7 +358,7 @@ where // this is another check to ensure that if the block points to a canonical block its block // is valid if let Some(canonical_parent_number) = - self.block_indices().canonical_number(block.parent_hash) + self.block_indices().canonical_number(&block.parent_hash) { // we found the parent block in canonical chain if canonical_parent_number != parent.number { @@ -458,7 +458,7 @@ where /// Try inserting a block into the given side chain. /// - /// WARNING: This expects a valid side chain id, see [BlockIndices::get_blocks_chain_id] + /// WARNING: This expects a valid side chain id, see [BlockIndices::get_block_chain_id] #[instrument(level = "trace", skip_all, target = "blockchain_tree")] fn try_insert_block_into_side_chain( &mut self, @@ -557,8 +557,7 @@ where } let fork_block = chain.fork_block(); - if let Some(next_chain_id) = self.block_indices().get_blocks_chain_id(&fork_block.hash) - { + if let Some(next_chain_id) = self.block_indices().get_block_chain_id(&fork_block.hash) { chain_id = next_chain_id; } else { // if there is no fork block that point to other chains, break the loop. @@ -582,7 +581,7 @@ where // chain fork block fork = self.state.chains.get(&chain_id)?.fork_block(); // get fork block chain - if let Some(fork_chain_id) = self.block_indices().get_blocks_chain_id(&fork.hash) { + if let Some(fork_chain_id) = self.block_indices().get_block_chain_id(&fork.hash) { chain_id = fork_chain_id; continue } @@ -608,7 +607,7 @@ where while let Some(block) = dependent_block.pop_back() { // Get chain of dependent block. - let Some(chain_id) = self.block_indices().get_blocks_chain_id(&block) else { + let Some(chain_id) = self.block_indices().get_block_chain_id(&block) else { debug!(target: "blockchain_tree", ?block, "Block not in tree"); return Default::default(); }; @@ -735,15 +734,15 @@ where Ok(()) } - /// Check if block is found inside chain and its attachment. + /// Check if block is found inside a sidechain and its attachment. /// /// if it is canonical or extends the canonical chain, return [BlockAttachment::Canonical] /// if it does not extend the canonical chain, return [BlockAttachment::HistoricalFork] /// if the block is not in the tree or its chain id is not valid, return None #[track_caller] - fn is_block_inside_chain(&self, block: &BlockNumHash) -> Option { + fn is_block_inside_sidechain(&self, block: &BlockNumHash) -> Option { // check if block known and is already in the tree - if let Some(chain_id) = self.block_indices().get_blocks_chain_id(&block.hash) { + if let Some(chain_id) = self.block_indices().get_block_chain_id(&block.hash) { // find the canonical fork of this chain let Some(canonical_fork) = self.canonical_fork(chain_id) else { debug!(target: "blockchain_tree", chain_id=?chain_id, block=?block.hash, "Chain id not valid"); @@ -981,22 +980,25 @@ where /// /// Returns `Ok(None)` if the block hash is not canonical (block hash does not exist, or is /// included in a sidechain). + /// + /// Note: this does not distinguish between a block that is finalized and a block that is not + /// finalized yet, only whether it is part of the canonical chain or not. pub fn find_canonical_header( &self, hash: &BlockHash, ) -> Result, ProviderError> { // if the indices show that the block hash is not canonical, it's either in a sidechain or - // canonical, but in the db. If it is in a sidechain, it is not canonical. If it is not in - // the db, then it is not canonical. + // canonical, but in the db. If it is in a sidechain, it is not canonical. If it is missing + // in the db, then it is also not canonical. let provider = self.externals.provider_factory.provider()?; let mut header = None; - if let Some(num) = self.block_indices().get_canonical_block_number(hash) { + if let Some(num) = self.block_indices().canonical_number(hash) { header = provider.header_by_number(num)?; } - if header.is_none() && self.is_block_hash_inside_chain(*hash) { + if header.is_none() && self.is_block_hash_inside_sidechain(*hash) { return Ok(None) } @@ -1008,6 +1010,9 @@ where } /// Determines whether or not a block is canonical, checking the db if necessary. + /// + /// Note: this does not distinguish between a block that is finalized and a block that is not + /// finalized yet, only whether it is part of the canonical chain or not. pub fn is_block_hash_canonical(&self, hash: &BlockHash) -> Result { self.find_canonical_header(hash).map(|header| header.is_some()) } @@ -1062,7 +1067,7 @@ where return Ok(CanonicalOutcome::AlreadyCanonical { header }) } - let Some(chain_id) = self.block_indices().get_blocks_chain_id(&block_hash) else { + let Some(chain_id) = self.block_indices().get_block_chain_id(&block_hash) else { debug!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices"); return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { block_hash, @@ -1085,7 +1090,7 @@ where let mut chains_to_promote = vec![canonical]; // loop while fork blocks are found in Tree. - while let Some(chain_id) = self.block_indices().get_blocks_chain_id(&fork_block.hash) { + while let Some(chain_id) = self.block_indices().get_block_chain_id(&fork_block.hash) { // canonical chain is lower part of the chain. let Some(canonical) = self.remove_and_split_chain(chain_id, ChainSplitTarget::Number(fork_block.number)) @@ -1200,7 +1205,7 @@ where .unwrap_or_default() .into_iter() .for_each(|child| { - if let Some(chain_id) = self.block_indices().get_blocks_chain_id(&child) { + if let Some(chain_id) = self.block_indices().get_block_chain_id(&child) { if let Some(chain) = self.state.chains.get_mut(&chain_id) { chain.clear_trie_updates(); } @@ -1307,8 +1312,8 @@ where // This should only happen when an optimistic sync target was re-orged. // // Static files generally contain finalized data. The blockchain tree only deals - // with unfinalized data. The only scenario where canonical reverts go past the highest - // static file is when an optimistic sync occured and unfinalized data was written to + // with non-finalized data. The only scenario where canonical reverts go past the highest + // static file is when an optimistic sync occurred and non-finalized data was written to // static files. if self .externals @@ -1780,7 +1785,7 @@ mod tests { ); let block3a_chain_id = - tree.state.block_indices.get_blocks_chain_id(&block3a.hash()).unwrap(); + tree.state.block_indices.get_block_chain_id(&block3a.hash()).unwrap(); assert_eq!( tree.all_chain_hashes(block3a_chain_id), BTreeMap::from([ @@ -1820,7 +1825,7 @@ mod tests { tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) ); - let block1_chain_id = tree.state.block_indices.get_blocks_chain_id(&block1.hash()).unwrap(); + let block1_chain_id = tree.state.block_indices.get_block_chain_id(&block1.hash()).unwrap(); let block1_chain = tree.state.chains.get(&block1_chain_id).unwrap(); assert!(block1_chain.trie_updates().is_some()); @@ -1828,7 +1833,7 @@ mod tests { tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) ); - let block2_chain_id = tree.state.block_indices.get_blocks_chain_id(&block2.hash()).unwrap(); + let block2_chain_id = tree.state.block_indices.get_block_chain_id(&block2.hash()).unwrap(); let block2_chain = tree.state.chains.get(&block2_chain_id).unwrap(); assert!(block2_chain.trie_updates().is_none()); @@ -1841,7 +1846,7 @@ mod tests { tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) ); - let block3_chain_id = tree.state.block_indices.get_blocks_chain_id(&block3.hash()).unwrap(); + let block3_chain_id = tree.state.block_indices.get_block_chain_id(&block3.hash()).unwrap(); let block3_chain = tree.state.chains.get(&block3_chain_id).unwrap(); assert!(block3_chain.trie_updates().is_some()); @@ -1854,7 +1859,7 @@ mod tests { tree.insert_block(block4.clone(), BlockValidationKind::Exhaustive).unwrap(), InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) ); - let block4_chain_id = tree.state.block_indices.get_blocks_chain_id(&block4.hash()).unwrap(); + let block4_chain_id = tree.state.block_indices.get_block_chain_id(&block4.hash()).unwrap(); let block4_chain = tree.state.chains.get(&block4_chain_id).unwrap(); assert!(block4_chain.trie_updates().is_some()); @@ -1863,7 +1868,7 @@ mod tests { InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) ); - let block5_chain_id = tree.state.block_indices.get_blocks_chain_id(&block5.hash()).unwrap(); + let block5_chain_id = tree.state.block_indices.get_block_chain_id(&block5.hash()).unwrap(); let block5_chain = tree.state.chains.get(&block5_chain_id).unwrap(); assert!(block5_chain.trie_updates().is_none()); diff --git a/crates/blockchain-tree/src/canonical_chain.rs b/crates/blockchain-tree/src/canonical_chain.rs index e641e455a78b9..0aca1bf945d7b 100644 --- a/crates/blockchain-tree/src/canonical_chain.rs +++ b/crates/blockchain-tree/src/canonical_chain.rs @@ -1,13 +1,13 @@ use reth_primitives::{BlockHash, BlockNumHash, BlockNumber}; use std::collections::BTreeMap; -/// This keeps track of all blocks of the canonical chain. +/// This keeps track of (non-finalized) blocks of the canonical chain. /// /// This is a wrapper type around an ordered set of block numbers and hashes that belong to the -/// canonical chain. +/// canonical chain that is not yet finalized. #[derive(Debug, Clone, Default)] pub(crate) struct CanonicalChain { - /// All blocks of the canonical chain in order. + /// All blocks of the canonical chain in order of their block number. chain: BTreeMap, } @@ -22,18 +22,18 @@ impl CanonicalChain { self.chain = chain; } - /// Returns the block hash of the canonical block with the given number. + /// Returns the block hash of the (non-finalized) canonical block with the given number. #[inline] pub(crate) fn canonical_hash(&self, number: &BlockNumber) -> Option { self.chain.get(number).cloned() } - /// Returns the block number of the canonical block with the given hash. + /// Returns the block number of the (non-finalized) canonical block with the given hash. #[inline] - pub(crate) fn canonical_number(&self, block_hash: BlockHash) -> Option { + pub(crate) fn canonical_number(&self, block_hash: &BlockHash) -> Option { self.chain.iter().find_map( |(number, hash)| { - if *hash == block_hash { + if hash == block_hash { Some(*number) } else { None @@ -42,20 +42,6 @@ impl CanonicalChain { ) } - /// Returns the block number of the canonical block with the given hash. - /// - /// Returns `None` if no block could be found in the canonical chain. - #[inline] - pub(crate) fn get_canonical_block_number( - &self, - last_finalized_block: BlockNumber, - block_hash: &BlockHash, - ) -> Option { - self.chain - .range(last_finalized_block..) - .find_map(|(num, &h)| (h == *block_hash).then_some(*num)) - } - /// Extends all items from the given iterator to the chain. #[inline] pub(crate) fn extend(&mut self, blocks: impl Iterator) { diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index 75b6b4a919341..f02890654c3c4 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -68,7 +68,7 @@ impl TreeState { &self, block_hash: BlockHash, ) -> Option<&SealedBlockWithSenders> { - let id = self.block_indices.get_blocks_chain_id(&block_hash)?; + let id = self.block_indices.get_block_chain_id(&block_hash)?; let chain = self.chains.get(&id)?; chain.block_with_senders(block_hash) } @@ -77,7 +77,7 @@ impl TreeState { /// /// Caution: This will not return blocks from the canonical chain. pub(crate) fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - let id = self.block_indices.get_blocks_chain_id(&block_hash)?; + let id = self.block_indices.get_block_chain_id(&block_hash)?; let chain = self.chains.get(&id)?; chain.receipts_by_block_hash(block_hash) } From b4d7d368a40facc0a490318dde0cbb497cf64599 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 27 May 2024 14:38:39 +0200 Subject: [PATCH 637/700] chore(trie): simplify hashed cursor abstraction (#8380) --- crates/trie/src/hashed_cursor/default.rs | 42 +++++--- crates/trie/src/hashed_cursor/mod.rs | 26 +++-- crates/trie/src/hashed_cursor/post_state.rs | 103 +++++++++++--------- crates/trie/src/node_iter.rs | 28 +++--- 4 files changed, 111 insertions(+), 88 deletions(-) diff --git a/crates/trie/src/hashed_cursor/default.rs b/crates/trie/src/hashed_cursor/default.rs index 1e5068870d7d6..69ce1f4ff3405 100644 --- a/crates/trie/src/hashed_cursor/default.rs +++ b/crates/trie/src/hashed_cursor/default.rs @@ -1,10 +1,10 @@ -use super::{HashedAccountCursor, HashedCursorFactory, HashedStorageCursor}; +use super::{HashedCursor, HashedCursorFactory, HashedStorageCursor}; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRO}, tables, transaction::DbTx, }; -use reth_primitives::{Account, StorageEntry, B256}; +use reth_primitives::{Account, B256, U256}; impl<'a, TX: DbTx> HashedCursorFactory for &'a TX { type AccountCursor = ::Cursor; @@ -26,25 +26,29 @@ impl<'a, TX: DbTx> HashedCursorFactory for &'a TX { } } -impl HashedAccountCursor for C +impl HashedCursor for C where C: DbCursorRO, { - fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { + type Value = Account; + + fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { self.seek(key) } - fn next(&mut self) -> Result, reth_db::DatabaseError> { + fn next(&mut self) -> Result, reth_db::DatabaseError> { self.next() } } /// The structure wrapping a database cursor for hashed storage and -/// a target hashed address. Implements [HashedStorageCursor] for iterating -/// hashed state +/// a target hashed address. Implements [HashedCursor] and [HashedStorageCursor] +/// for iterating over hashed storage. #[derive(Debug)] pub struct DatabaseHashedStorageCursor { + /// Database hashed storage cursor. cursor: C, + /// Target hashed address of the account that the storage belongs to. hashed_address: B256, } @@ -55,19 +59,29 @@ impl DatabaseHashedStorageCursor { } } -impl HashedStorageCursor for DatabaseHashedStorageCursor +impl HashedCursor for DatabaseHashedStorageCursor where C: DbCursorRO + DbDupCursorRO, { - fn is_storage_empty(&mut self) -> Result { - Ok(self.cursor.seek_exact(self.hashed_address)?.is_none()) + type Value = U256; + + fn seek( + &mut self, + subkey: B256, + ) -> Result, reth_db::DatabaseError> { + Ok(self.cursor.seek_by_key_subkey(self.hashed_address, subkey)?.map(|e| (e.key, e.value))) } - fn seek(&mut self, subkey: B256) -> Result, reth_db::DatabaseError> { - self.cursor.seek_by_key_subkey(self.hashed_address, subkey) + fn next(&mut self) -> Result, reth_db::DatabaseError> { + Ok(self.cursor.next_dup_val()?.map(|e| (e.key, e.value))) } +} - fn next(&mut self) -> Result, reth_db::DatabaseError> { - self.cursor.next_dup_val() +impl HashedStorageCursor for DatabaseHashedStorageCursor +where + C: DbCursorRO + DbDupCursorRO, +{ + fn is_storage_empty(&mut self) -> Result { + Ok(self.cursor.seek_exact(self.hashed_address)?.is_none()) } } diff --git a/crates/trie/src/hashed_cursor/mod.rs b/crates/trie/src/hashed_cursor/mod.rs index 916dd6f424174..edfd0cb765bc8 100644 --- a/crates/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/src/hashed_cursor/mod.rs @@ -1,4 +1,4 @@ -use reth_primitives::{Account, StorageEntry, B256}; +use reth_primitives::{Account, B256, U256}; /// Default implementation of the hashed state cursor traits. mod default; @@ -11,9 +11,9 @@ pub use post_state::*; /// The factory trait for creating cursors over the hashed state. pub trait HashedCursorFactory { /// The hashed account cursor type. - type AccountCursor: HashedAccountCursor; + type AccountCursor: HashedCursor; /// The hashed storage cursor type. - type StorageCursor: HashedStorageCursor; + type StorageCursor: HashedStorageCursor; /// Returns a cursor for iterating over all hashed accounts in the state. fn hashed_account_cursor(&self) -> Result; @@ -25,23 +25,21 @@ pub trait HashedCursorFactory { ) -> Result; } -/// The cursor for iterating over hashed accounts. -pub trait HashedAccountCursor { +/// The cursor for iterating over hashed entries. +pub trait HashedCursor { + /// Value returned by the cursor. + type Value; + /// Seek an entry greater or equal to the given key and position the cursor there. - fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError>; + /// Returns the first entry with the key greater or equal to the sought key. + fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError>; /// Move the cursor to the next entry and return it. - fn next(&mut self) -> Result, reth_db::DatabaseError>; + fn next(&mut self) -> Result, reth_db::DatabaseError>; } /// The cursor for iterating over hashed storage entries. -pub trait HashedStorageCursor { +pub trait HashedStorageCursor: HashedCursor { /// Returns `true` if there are no entries for a given key. fn is_storage_empty(&mut self) -> Result; - - /// Seek an entry greater or equal to the given key/subkey and position the cursor there. - fn seek(&mut self, subkey: B256) -> Result, reth_db::DatabaseError>; - - /// Move the cursor to the next entry and return it. - fn next(&mut self) -> Result, reth_db::DatabaseError>; } diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index 379b08c2cb6b0..ed4fc1e2f84fd 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -1,6 +1,6 @@ -use super::{HashedAccountCursor, HashedCursorFactory, HashedStorageCursor}; +use super::{HashedCursor, HashedCursorFactory, HashedStorageCursor}; use crate::state::HashedPostStateSorted; -use reth_primitives::{Account, StorageEntry, B256, U256}; +use reth_primitives::{Account, B256, U256}; /// The hashed cursor factory for the post state. #[derive(Debug, Clone)] @@ -88,10 +88,12 @@ impl<'b, C> HashedPostStateAccountCursor<'b, C> { } } -impl<'b, C> HashedAccountCursor for HashedPostStateAccountCursor<'b, C> +impl<'b, C> HashedCursor for HashedPostStateAccountCursor<'b, C> where - C: HashedAccountCursor, + C: HashedCursor, { + type Value = Account; + /// Seek the next entry for a given hashed account key. /// /// If the post state contains the exact match for the key, return it. @@ -99,8 +101,8 @@ where /// database and the post state. The two entries are compared and the lowest is returned. /// /// The returned account key is memoized and the cursor remains positioned at that key until - /// [HashedAccountCursor::seek] or [HashedAccountCursor::next] are called. - fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { + /// [HashedCursor::seek] or [HashedCursor::next] are called. + fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { self.last_account = None; // Take the next account from the post state with the key greater than or equal to the @@ -142,9 +144,9 @@ where /// If the cursor is positioned at the entry, return the entry with next greater key. /// Returns [None] if the previous memoized or the next greater entries are missing. /// - /// NOTE: This function will not return any entry unless [HashedAccountCursor::seek] has been + /// NOTE: This function will not return any entry unless [HashedCursor::seek] has been /// called. - fn next(&mut self) -> Result, reth_db::DatabaseError> { + fn next(&mut self) -> Result, reth_db::DatabaseError> { let last_account = match self.last_account.as_ref() { Some(account) => account, None => return Ok(None), // no previous entry was found @@ -192,7 +194,7 @@ pub struct HashedPostStateStorageCursor<'b, C> { } impl<'b, C> HashedPostStateStorageCursor<'b, C> { - /// Create new instance of [HashedPostStateStorageCursor]. + /// Create new instance of [HashedPostStateStorageCursor] for the given hashed address. pub fn new(cursor: C, post_state: &'b HashedPostStateSorted, hashed_address: B256) -> Self { Self { cursor, post_state, hashed_address, last_slot: None, post_state_storage_index: 0 } } @@ -222,49 +224,35 @@ impl<'b, C> HashedPostStateStorageCursor<'b, C> { /// If the storage keys are the same, the post state entry is given precedence. fn next_slot( post_state_item: Option<&(B256, U256)>, - db_item: Option, - ) -> Option { + db_item: Option<(B256, U256)>, + ) -> Option<(B256, U256)> { match (post_state_item, db_item) { // If both are not empty, return the smallest of the two // Post state is given precedence if keys are equal - (Some((post_state_slot, post_state_value)), Some(db_entry)) => { - if post_state_slot <= &db_entry.key { - Some(StorageEntry { key: *post_state_slot, value: *post_state_value }) + (Some((post_state_slot, post_state_value)), Some((db_slot, db_value))) => { + if post_state_slot <= &db_slot { + Some((*post_state_slot, *post_state_value)) } else { - Some(db_entry) + Some((db_slot, db_value)) } } // Return either non-empty entry - _ => db_item.or_else(|| { - post_state_item.copied().map(|(key, value)| StorageEntry { key, value }) - }), + _ => db_item.or_else(|| post_state_item.copied()), } } } -impl<'b, C> HashedStorageCursor for HashedPostStateStorageCursor<'b, C> +impl<'b, C> HashedCursor for HashedPostStateStorageCursor<'b, C> where - C: HashedStorageCursor, + C: HashedStorageCursor, { - /// Returns `true` if the account has no storage entries. - /// - /// This function should be called before attempting to call [HashedStorageCursor::seek] or - /// [HashedStorageCursor::next]. - fn is_storage_empty(&mut self) -> Result { - let is_empty = match self.post_state.storages.get(&self.hashed_address) { - Some(storage) => { - // If the storage has been wiped at any point - storage.wiped && - // and the current storage does not contain any non-zero values - storage.non_zero_valued_slots.is_empty() - } - None => self.cursor.is_storage_empty()?, - }; - Ok(is_empty) - } + type Value = U256; /// Seek the next account storage entry for a given hashed key pair. - fn seek(&mut self, subkey: B256) -> Result, reth_db::DatabaseError> { + fn seek( + &mut self, + subkey: B256, + ) -> Result, reth_db::DatabaseError> { // Attempt to find the account's storage in post state. let mut post_state_entry = None; if let Some(storage) = self.post_state.storages.get(&self.hashed_address) { @@ -281,7 +269,7 @@ where if let Some((slot, value)) = post_state_entry { if slot == &subkey { self.last_slot = Some(*slot); - return Ok(Some(StorageEntry { key: *slot, value: *value })) + return Ok(Some((*slot, *value))) } } @@ -293,7 +281,7 @@ where while db_entry .as_ref() - .map(|entry| self.is_slot_zero_valued(&entry.key)) + .map(|entry| self.is_slot_zero_valued(&entry.0)) .unwrap_or_default() { db_entry = self.cursor.next()?; @@ -304,7 +292,7 @@ where // Compare two entries and return the lowest. let result = Self::next_slot(post_state_entry, db_entry); - self.last_slot = result.as_ref().map(|entry| entry.key); + self.last_slot = result.as_ref().map(|entry| entry.0); Ok(result) } @@ -312,9 +300,9 @@ where /// /// # Panics /// - /// If the account key is not set. [HashedStorageCursor::seek] must be called first in order to + /// If the account key is not set. [HashedCursor::seek] must be called first in order to /// position the cursor. - fn next(&mut self) -> Result, reth_db::DatabaseError> { + fn next(&mut self) -> Result, reth_db::DatabaseError> { let last_slot = match self.last_slot.as_ref() { Some(slot) => slot, None => return Ok(None), // no previous entry was found @@ -329,7 +317,7 @@ where // If the entry was already returned or is zero-values, move to the next. while db_entry .as_ref() - .map(|entry| &entry.key == last_slot || self.is_slot_zero_valued(&entry.key)) + .map(|entry| &entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) .unwrap_or_default() { db_entry = self.cursor.next()?; @@ -350,11 +338,33 @@ where // Compare two entries and return the lowest. let result = Self::next_slot(post_state_entry, db_entry); - self.last_slot = result.as_ref().map(|entry| entry.key); + self.last_slot = result.as_ref().map(|entry| entry.0); Ok(result) } } +impl<'b, C> HashedStorageCursor for HashedPostStateStorageCursor<'b, C> +where + C: HashedStorageCursor, +{ + /// Returns `true` if the account has no storage entries. + /// + /// This function should be called before attempting to call [HashedCursor::seek] or + /// [HashedCursor::next]. + fn is_storage_empty(&mut self) -> Result { + let is_empty = match self.post_state.storages.get(&self.hashed_address) { + Some(storage) => { + // If the storage has been wiped at any point + storage.wiped && + // and the current storage does not contain any non-zero values + storage.non_zero_valued_slots.is_empty() + } + None => self.cursor.is_storage_empty()?, + }; + Ok(is_empty) + } +} + #[cfg(test)] mod tests { use super::*; @@ -363,6 +373,7 @@ mod tests { use reth_db::{ database::Database, tables, test_utils::create_test_rw_db, transaction::DbTxMut, }; + use reth_primitives::StorageEntry; use std::collections::BTreeMap; fn assert_account_cursor_order( @@ -391,11 +402,11 @@ mod tests { let mut expected_storage = storage.into_iter(); let first_storage = cursor.seek(B256::default()).unwrap(); - assert_eq!(first_storage.map(|e| (e.key, e.value)), expected_storage.next()); + assert_eq!(first_storage, expected_storage.next()); for expected_entry in expected_storage { let next_cursor_storage = cursor.next().unwrap(); - assert_eq!(next_cursor_storage.map(|e| (e.key, e.value)), Some(expected_entry)); + assert_eq!(next_cursor_storage, Some(expected_entry)); } assert!(cursor.next().unwrap().is_none()); diff --git a/crates/trie/src/node_iter.rs b/crates/trie/src/node_iter.rs index 3a621a38b1f59..e0faad6c2e170 100644 --- a/crates/trie/src/node_iter.rs +++ b/crates/trie/src/node_iter.rs @@ -1,10 +1,10 @@ use crate::{ - hashed_cursor::{HashedAccountCursor, HashedCursorFactory, HashedStorageCursor}, + hashed_cursor::{HashedCursor, HashedCursorFactory, HashedStorageCursor}, trie_cursor::TrieCursor, walker::TrieWalker, }; use reth_db::DatabaseError; -use reth_primitives::{trie::Nibbles, Account, StorageEntry, B256, U256}; +use reth_primitives::{trie::Nibbles, Account, B256, U256}; /// Represents a branch node in the trie. #[derive(Debug)] @@ -90,7 +90,7 @@ impl AccountNodeIter { impl AccountNodeIter where C: TrieCursor, - H: HashedAccountCursor, + H: HashedCursor, { /// Return the next account trie node to be added to the hash builder. /// @@ -168,7 +168,7 @@ pub struct StorageNodeIter { pub hashed_storage_cursor: H, /// Current hashed storage entry. - current_hashed_entry: Option, + current_hashed_entry: Option<(B256, U256)>, /// Flag indicating whether we should check the current walker key. current_walker_key_checked: bool, } @@ -188,7 +188,7 @@ impl StorageNodeIter { impl StorageNodeIter where C: TrieCursor, - H: HashedStorageCursor, + H: HashedStorageCursor, { /// Return the next storage trie node to be added to the hash builder. /// @@ -219,8 +219,7 @@ where } // Check for a current hashed storage entry. - if let Some(StorageEntry { key: hashed_key, value }) = self.current_hashed_entry.take() - { + if let Some((hashed_key, value)) = self.current_hashed_entry.take() { // Compare keys and proceed accordingly. if self.walker.key().map_or(false, |key| key < &Nibbles::unpack(hashed_key)) { self.current_walker_key_checked = false; @@ -233,14 +232,15 @@ where } // Attempt to get the next unprocessed key from the walker. - if let Some(seek_key) = self.walker.next_unprocessed_key() { - // Seek and update the current hashed entry based on the new seek key. - self.current_hashed_entry = self.hashed_storage_cursor.seek(seek_key)?; - self.walker.advance()?; - } else { + match self.walker.next_unprocessed_key() { + Some(seek_key) => { + // Seek and update the current hashed entry based on the new seek key. + self.current_hashed_entry = self.hashed_storage_cursor.seek(seek_key)?; + self.walker.advance()?; + } // No more keys to process, break the loop. - break - } + None => break, + }; } Ok(None) // Return None if no more nodes are available. From cfc13444541f1f4cc05fdba0721e3f0d25a3c57d Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 27 May 2024 15:07:48 +0200 Subject: [PATCH 638/700] chore(trie): dedup node iters (#8381) --- crates/trie-parallel/src/async_root.rs | 25 +-- crates/trie-parallel/src/parallel_root.rs | 23 +-- crates/trie/src/hashed_cursor/mod.rs | 2 +- crates/trie/src/node_iter.rs | 199 +++++----------------- crates/trie/src/proof.rs | 14 +- crates/trie/src/trie.rs | 21 ++- 6 files changed, 89 insertions(+), 195 deletions(-) diff --git a/crates/trie-parallel/src/async_root.rs b/crates/trie-parallel/src/async_root.rs index 68595ed951194..9665c09295cd1 100644 --- a/crates/trie-parallel/src/async_root.rs +++ b/crates/trie-parallel/src/async_root.rs @@ -10,8 +10,8 @@ use reth_primitives::{ use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; use reth_tasks::pool::BlockingTaskPool; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, - node_iter::{AccountNode, AccountNodeIter}, + hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + node_iter::{TrieElement, TrieNodeIter}, trie_cursor::TrieCursorFactory, updates::TrieUpdates, walker::TrieWalker, @@ -131,23 +131,24 @@ where let hashed_cursor_factory = HashedPostStateCursorFactory::new(tx, &hashed_state_sorted); let trie_cursor_factory = tx; - let trie_cursor = - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?; + let walker = TrieWalker::new( + trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, + prefix_sets.account_prefix_set, + ) + .with_updates(retain_updates); + let mut account_node_iter = TrieNodeIter::new( + walker, + hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + ); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let walker = TrieWalker::new(trie_cursor, prefix_sets.account_prefix_set) - .with_updates(retain_updates); - let mut account_node_iter = - AccountNodeIter::from_factory(walker, hashed_cursor_factory.clone()) - .map_err(ProviderError::Database)?; - let mut account_rlp = Vec::with_capacity(128); while let Some(node) = account_node_iter.try_next().map_err(ProviderError::Database)? { match node { - AccountNode::Branch(node) => { + TrieElement::Branch(node) => { hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } - AccountNode::Leaf(hashed_address, account) => { + TrieElement::Leaf(hashed_address, account) => { let (storage_root, _, updates) = match storage_roots.remove(&hashed_address) { Some(rx) => rx.await.map_err(|_| { AsyncStateRootError::StorageRootChannelClosed { hashed_address } diff --git a/crates/trie-parallel/src/parallel_root.rs b/crates/trie-parallel/src/parallel_root.rs index 9abb8ac2cce9b..58957765201ff 100644 --- a/crates/trie-parallel/src/parallel_root.rs +++ b/crates/trie-parallel/src/parallel_root.rs @@ -10,7 +10,7 @@ use reth_primitives::{ use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, - node_iter::{AccountNode, AccountNodeIter}, + node_iter::{TrieElement, TrieNodeIter}, trie_cursor::TrieCursorFactory, updates::TrieUpdates, walker::TrieWalker, @@ -115,23 +115,24 @@ where HashedPostStateCursorFactory::new(provider_ro.tx_ref(), &hashed_state_sorted); let trie_cursor_factory = provider_ro.tx_ref(); - let hashed_account_cursor = - hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?; - let trie_cursor = - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?; + let walker = TrieWalker::new( + trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, + prefix_sets.account_prefix_set, + ) + .with_updates(retain_updates); + let mut account_node_iter = TrieNodeIter::new( + walker, + hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + ); - let walker = TrieWalker::new(trie_cursor, prefix_sets.account_prefix_set) - .with_updates(retain_updates); - let mut account_node_iter = AccountNodeIter::new(walker, hashed_account_cursor); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let mut account_rlp = Vec::with_capacity(128); while let Some(node) = account_node_iter.try_next().map_err(ProviderError::Database)? { match node { - AccountNode::Branch(node) => { + TrieElement::Branch(node) => { hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } - AccountNode::Leaf(hashed_address, account) => { + TrieElement::Leaf(hashed_address, account) => { let (storage_root, _, updates) = match storage_roots.remove(&hashed_address) { Some(result) => result, // Since we do not store all intermediate nodes in the database, there might diff --git a/crates/trie/src/hashed_cursor/mod.rs b/crates/trie/src/hashed_cursor/mod.rs index edfd0cb765bc8..05de76721d59d 100644 --- a/crates/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/src/hashed_cursor/mod.rs @@ -28,7 +28,7 @@ pub trait HashedCursorFactory { /// The cursor for iterating over hashed entries. pub trait HashedCursor { /// Value returned by the cursor. - type Value; + type Value: std::fmt::Debug; /// Seek an entry greater or equal to the given key and position the cursor there. /// Returns the first entry with the key greater or equal to the sought key. diff --git a/crates/trie/src/node_iter.rs b/crates/trie/src/node_iter.rs index e0faad6c2e170..19cbd89964cb5 100644 --- a/crates/trie/src/node_iter.rs +++ b/crates/trie/src/node_iter.rs @@ -1,10 +1,6 @@ -use crate::{ - hashed_cursor::{HashedCursor, HashedCursorFactory, HashedStorageCursor}, - trie_cursor::TrieCursor, - walker::TrieWalker, -}; +use crate::{hashed_cursor::HashedCursor, trie_cursor::TrieCursor, walker::TrieWalker}; use reth_db::DatabaseError; -use reth_primitives::{trie::Nibbles, Account, B256, U256}; +use reth_primitives::{trie::Nibbles, B256}; /// Represents a branch node in the trie. #[derive(Debug)] @@ -24,95 +20,80 @@ impl TrieBranchNode { } } -/// Represents a variant of an account node. +/// Represents variants of trie nodes returned by the iteration. #[derive(Debug)] -pub enum AccountNode { +pub enum TrieElement { /// Branch node. Branch(TrieBranchNode), /// Leaf node. - Leaf(B256, Account), -} - -/// Represents a variant of a storage node. -#[derive(Debug)] -pub enum StorageNode { - /// Branch node. - Branch(TrieBranchNode), - /// Leaf node. - Leaf(B256, U256), + Leaf(B256, Value), } /// An iterator over existing intermediate branch nodes and updated leaf nodes. #[derive(Debug)] -pub struct AccountNodeIter { - /// Underlying walker over intermediate nodes. +pub struct TrieNodeIter { + /// The walker over intermediate nodes. pub walker: TrieWalker, - /// The cursor for the hashed account entries. - pub hashed_account_cursor: H, - /// The previous account key. If the iteration was previously interrupted, this value can be + /// The cursor for the hashed entries. + pub hashed_cursor: H, + /// The previous hashed key. If the iteration was previously interrupted, this value can be /// used to resume iterating from the last returned leaf node. - previous_account_key: Option, + previous_hashed_key: Option, - /// Current hashed account entry. - current_hashed_entry: Option<(B256, Account)>, + /// Current hashed entry. + current_hashed_entry: Option<(B256, ::Value)>, /// Flag indicating whether we should check the current walker key. current_walker_key_checked: bool, } -impl AccountNodeIter { - /// Creates a new `AccountNodeIter`. - pub fn new(walker: TrieWalker, hashed_account_cursor: H) -> Self { +impl TrieNodeIter { + /// Creates a new [TrieNodeIter]. + pub fn new(walker: TrieWalker, hashed_cursor: H) -> Self { Self { walker, - hashed_account_cursor, - previous_account_key: None, + hashed_cursor, + previous_hashed_key: None, current_hashed_entry: None, current_walker_key_checked: false, } } - /// Create new `AccountNodeIter` by creating hashed account cursor from factory. - pub fn from_factory>( - walker: TrieWalker, - factory: F, - ) -> Result { - Ok(Self::new(walker, factory.hashed_account_cursor()?)) - } - - /// Sets the last iterated account key and returns the modified `AccountNodeIter`. + /// Sets the last iterated hashed key and returns the modified [TrieNodeIter]. /// This is used to resume iteration from the last checkpoint. - pub fn with_last_account_key(mut self, previous_account_key: B256) -> Self { - self.previous_account_key = Some(previous_account_key); + pub fn with_last_hashed_key(mut self, previous_hashed_key: B256) -> Self { + self.previous_hashed_key = Some(previous_hashed_key); self } } -impl AccountNodeIter +impl TrieNodeIter where C: TrieCursor, - H: HashedCursor, + H: HashedCursor, { - /// Return the next account trie node to be added to the hash builder. + /// Return the next trie node to be added to the hash builder. /// /// Returns the nodes using this algorithm: /// 1. Return the current intermediate branch node if it hasn't been updated. /// 2. Advance the trie walker to the next intermediate branch node and retrieve next /// unprocessed key. - /// 3. Reposition the hashed account cursor on the next unprocessed key. - /// 4. Return every hashed account entry up to the key of the current intermediate branch node. + /// 3. Reposition the hashed cursor on the next unprocessed key. + /// 4. Return every hashed entry up to the key of the current intermediate branch node. /// 5. Repeat. /// /// NOTE: The iteration will start from the key of the previous hashed entry if it was supplied. - pub fn try_next(&mut self) -> Result, DatabaseError> { + pub fn try_next( + &mut self, + ) -> Result::Value>>, DatabaseError> { loop { // If the walker has a key... if let Some(key) = self.walker.key() { - // Check if the current walker key is unchecked and there's no previous account key - if !self.current_walker_key_checked && self.previous_account_key.is_none() { + // Check if the current walker key is unchecked and there's no previous hashed key + if !self.current_walker_key_checked && self.previous_hashed_key.is_none() { self.current_walker_key_checked = true; // If it's possible to skip the current node in the walker, return a branch node if self.walker.can_skip_current_node { - return Ok(Some(AccountNode::Branch(TrieBranchNode::new( + return Ok(Some(TrieElement::Branch(TrieBranchNode::new( key.clone(), self.walker.hash().unwrap(), self.walker.children_are_in_trie(), @@ -121,26 +102,26 @@ where } } - // If there's a hashed address and account... - if let Some((hashed_address, account)) = self.current_hashed_entry.take() { - // If the walker's key is less than the unpacked hashed address, reset the checked - // status and continue - if self.walker.key().map_or(false, |key| key < &Nibbles::unpack(hashed_address)) { + // If there's a hashed entry... + if let Some((hashed_key, value)) = self.current_hashed_entry.take() { + // If the walker's key is less than the unpacked hashed key, + // reset the checked status and continue + if self.walker.key().map_or(false, |key| key < &Nibbles::unpack(hashed_key)) { self.current_walker_key_checked = false; continue } // Set the next hashed entry as a leaf node and return - self.current_hashed_entry = self.hashed_account_cursor.next()?; - return Ok(Some(AccountNode::Leaf(hashed_address, account))) + self.current_hashed_entry = self.hashed_cursor.next()?; + return Ok(Some(TrieElement::Leaf(hashed_key, value))) } - // Handle seeking and advancing based on the previous account key - match self.previous_account_key.take() { - Some(account_key) => { - // Seek to the previous account key and get the next hashed entry - self.hashed_account_cursor.seek(account_key)?; - self.current_hashed_entry = self.hashed_account_cursor.next()?; + // Handle seeking and advancing based on the previous hashed key + match self.previous_hashed_key.take() { + Some(hashed_key) => { + // Seek to the previous hashed key and get the next hashed entry + self.hashed_cursor.seek(hashed_key)?; + self.current_hashed_entry = self.hashed_cursor.next()?; } None => { // Get the seek key and set the current hashed entry based on walker's next @@ -149,7 +130,7 @@ where Some(key) => key, None => break, // no more keys }; - self.current_hashed_entry = self.hashed_account_cursor.seek(seek_key)?; + self.current_hashed_entry = self.hashed_cursor.seek(seek_key)?; self.walker.advance()?; } } @@ -158,91 +139,3 @@ where Ok(None) } } - -/// An iterator over existing intermediate storage branch nodes and updated leaf nodes. -#[derive(Debug)] -pub struct StorageNodeIter { - /// Underlying walker over intermediate nodes. - pub walker: TrieWalker, - /// The cursor for the hashed storage entries. - pub hashed_storage_cursor: H, - - /// Current hashed storage entry. - current_hashed_entry: Option<(B256, U256)>, - /// Flag indicating whether we should check the current walker key. - current_walker_key_checked: bool, -} - -impl StorageNodeIter { - /// Creates a new instance of StorageNodeIter. - pub fn new(walker: TrieWalker, hashed_storage_cursor: H) -> Self { - Self { - walker, - hashed_storage_cursor, - current_walker_key_checked: false, - current_hashed_entry: None, - } - } -} - -impl StorageNodeIter -where - C: TrieCursor, - H: HashedStorageCursor, -{ - /// Return the next storage trie node to be added to the hash builder. - /// - /// Returns the nodes using this algorithm: - /// 1. Return the current intermediate branch node if it hasn't been updated. - /// 2. Advance the trie walker to the next intermediate branch node and retrieve next - /// unprocessed key. - /// 3. Reposition the hashed storage cursor on the next unprocessed key. - /// 4. Return every hashed storage entry up to the key of the current intermediate branch node. - /// 5. Repeat. - pub fn try_next(&mut self) -> Result, DatabaseError> { - loop { - // Check if there's a key in the walker. - if let Some(key) = self.walker.key() { - // Check if the walker key hasn't been checked yet. - if !self.current_walker_key_checked { - self.current_walker_key_checked = true; - // Check if the current node can be skipped in the walker. - if self.walker.can_skip_current_node { - // Return a branch node based on the walker's properties. - return Ok(Some(StorageNode::Branch(TrieBranchNode::new( - key.clone(), - self.walker.hash().unwrap(), - self.walker.children_are_in_trie(), - )))) - } - } - } - - // Check for a current hashed storage entry. - if let Some((hashed_key, value)) = self.current_hashed_entry.take() { - // Compare keys and proceed accordingly. - if self.walker.key().map_or(false, |key| key < &Nibbles::unpack(hashed_key)) { - self.current_walker_key_checked = false; - continue - } - - // Move to the next hashed storage entry and return the corresponding leaf node. - self.current_hashed_entry = self.hashed_storage_cursor.next()?; - return Ok(Some(StorageNode::Leaf(hashed_key, value))) - } - - // Attempt to get the next unprocessed key from the walker. - match self.walker.next_unprocessed_key() { - Some(seek_key) => { - // Seek and update the current hashed entry based on the new seek key. - self.current_hashed_entry = self.hashed_storage_cursor.seek(seek_key)?; - self.walker.advance()?; - } - // No more keys to process, break the loop. - None => break, - }; - } - - Ok(None) // Return None if no more nodes are available. - } -} diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs index 094372a851fa2..56e1acb861232 100644 --- a/crates/trie/src/proof.rs +++ b/crates/trie/src/proof.rs @@ -1,6 +1,6 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, - node_iter::{AccountNode, AccountNodeIter, StorageNode, StorageNodeIter}, + node_iter::{TrieElement, TrieNodeIter}, prefix_set::PrefixSetMut, trie_cursor::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}, walker::TrieWalker, @@ -64,13 +64,13 @@ where let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut account_rlp = Vec::with_capacity(128); - let mut account_node_iter = AccountNodeIter::new(walker, hashed_account_cursor); + let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); while let Some(account_node) = account_node_iter.try_next()? { match account_node { - AccountNode::Branch(node) => { + TrieElement::Branch(node) => { hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } - AccountNode::Leaf(hashed_address, account) => { + TrieElement::Leaf(hashed_address, account) => { let storage_root = if hashed_address == target_hashed_address { let (storage_root, storage_proofs) = self.storage_root_with_proofs(hashed_address, slots)?; @@ -129,13 +129,13 @@ where let retainer = ProofRetainer::from_iter(target_nibbles); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - let mut storage_node_iter = StorageNodeIter::new(walker, hashed_storage_cursor); + let mut storage_node_iter = TrieNodeIter::new(walker, hashed_storage_cursor); while let Some(node) = storage_node_iter.try_next()? { match node { - StorageNode::Branch(node) => { + TrieElement::Branch(node) => { hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } - StorageNode::Leaf(hashed_slot, value) => { + TrieElement::Leaf(hashed_slot, value) => { let nibbles = Nibbles::unpack(hashed_slot); if let Some(proof) = proofs.iter_mut().find(|proof| proof.nibbles == nibbles) { proof.set_value(value); diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index 55ee1bebdbed6..5f24d1f05165d 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -1,6 +1,6 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, - node_iter::{AccountNode, AccountNodeIter, StorageNode, StorageNodeIter}, + node_iter::{TrieElement, TrieNodeIter}, prefix_set::{PrefixSet, PrefixSetLoader, TriePrefixSets}, progress::{IntermediateStateRootState, StateRootProgress}, stats::TrieTracker, @@ -216,6 +216,7 @@ where let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?; + let hashed_account_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; let (mut hash_builder, mut account_node_iter) = match self.previous_state { Some(state) => { let hash_builder = state.hash_builder.with_updates(retain_updates); @@ -225,17 +226,15 @@ where self.prefix_sets.account_prefix_set, ) .with_updates(retain_updates); - let node_iter = - AccountNodeIter::from_factory(walker, self.hashed_cursor_factory.clone())? - .with_last_account_key(state.last_account_key); + let node_iter = TrieNodeIter::new(walker, hashed_account_cursor) + .with_last_hashed_key(state.last_account_key); (hash_builder, node_iter) } None => { let hash_builder = HashBuilder::default().with_updates(retain_updates); let walker = TrieWalker::new(trie_cursor, self.prefix_sets.account_prefix_set) .with_updates(retain_updates); - let node_iter = - AccountNodeIter::from_factory(walker, self.hashed_cursor_factory.clone())?; + let node_iter = TrieNodeIter::new(walker, hashed_account_cursor); (hash_builder, node_iter) } }; @@ -244,11 +243,11 @@ where let mut hashed_entries_walked = 0; while let Some(node) = account_node_iter.try_next()? { match node { - AccountNode::Branch(node) => { + TrieElement::Branch(node) => { tracker.inc_branch(); hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } - AccountNode::Leaf(hashed_address, account) => { + TrieElement::Leaf(hashed_address, account) => { tracker.inc_leaf(); hashed_entries_walked += 1; @@ -501,14 +500,14 @@ where let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let mut storage_node_iter = StorageNodeIter::new(walker, hashed_storage_cursor); + let mut storage_node_iter = TrieNodeIter::new(walker, hashed_storage_cursor); while let Some(node) = storage_node_iter.try_next()? { match node { - StorageNode::Branch(node) => { + TrieElement::Branch(node) => { tracker.inc_branch(); hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } - StorageNode::Leaf(hashed_slot, value) => { + TrieElement::Leaf(hashed_slot, value) => { tracker.inc_leaf(); hash_builder.add_leaf( Nibbles::unpack(hashed_slot), From beaa0fe2622042392c7d309c490dbeb6f9769ffa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 15:32:16 +0200 Subject: [PATCH 639/700] chore: add internal boxed error variant (#8412) --- crates/consensus/beacon/src/engine/error.rs | 24 ++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index 245199f53223c..92bd031e5666c 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -52,22 +52,29 @@ pub enum BeaconForkChoiceUpdateError { /// Thrown when a forkchoice update resulted in an error. #[error("forkchoice update error: {0}")] ForkchoiceUpdateError(#[from] ForkchoiceUpdateError), - /// Internal errors, for example, error while reading from the database. - #[error(transparent)] - Internal(Box), /// Thrown when the engine task is unavailable/stopped. #[error("beacon consensus engine task stopped")] EngineUnavailable, + /// An internal error occurred, not necessarily related to the update. + #[error(transparent)] + Internal(Box), +} + +impl BeaconForkChoiceUpdateError { + /// Create a new internal error. + pub fn internal(e: E) -> Self { + Self::Internal(Box::new(e)) + } } impl From for BeaconForkChoiceUpdateError { fn from(e: RethError) -> Self { - Self::Internal(Box::new(e)) + Self::internal(e) } } impl From for BeaconForkChoiceUpdateError { fn from(e: reth_interfaces::db::DatabaseError) -> Self { - Self::Internal(Box::new(e.into())) + Self::internal(e) } } @@ -87,3 +94,10 @@ pub enum BeaconOnNewPayloadError { #[error(transparent)] Internal(Box), } + +impl BeaconOnNewPayloadError { + /// Create a new internal error. + pub fn internal(e: E) -> Self { + Self::Internal(Box::new(e)) + } +} From a14e54922ae3ef3bb56c0b391d0aac2d65ec28f9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 15:32:33 +0200 Subject: [PATCH 640/700] chore: remove more reth-interfaces from provider (#8410) --- .../provider/src/providers/database/mod.rs | 17 +++++++---------- .../provider/src/providers/database/provider.rs | 11 ++++------- .../provider/src/traits/header_sync_gap.rs | 5 +++-- .../storage/provider/src/traits/tree_viewer.rs | 3 +-- testing/ef-tests/src/cases/blockchain_test.rs | 3 ++- 5 files changed, 17 insertions(+), 22 deletions(-) diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 60dc635eb3af2..4c7efafe8fc7a 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -51,7 +51,7 @@ impl ProviderFactory { db: DB, chain_spec: Arc, static_files_path: PathBuf, - ) -> RethResult> { + ) -> ProviderResult> { Ok(Self { db: Arc::new(db), chain_spec, @@ -169,7 +169,7 @@ impl HeaderSyncGapProvider for ProviderFactory { &self, mode: HeaderSyncMode, highest_uninterrupted_block: BlockNumber, - ) -> RethResult { + ) -> ProviderResult { self.provider()?.sync_gap(mode, highest_uninterrupted_block) } } @@ -570,7 +570,7 @@ impl Clone for ProviderFactory { } #[cfg(test)] mod tests { - use super::ProviderFactory; + use super::*; use crate::{ providers::StaticFileWriter, test_utils::create_test_provider_factory, BlockHashReader, BlockNumReader, BlockWriter, HeaderSyncGapProvider, HeaderSyncMode, TransactionsProvider, @@ -583,12 +583,9 @@ mod tests { tables, test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; - use reth_interfaces::{ - test_utils::{ - generators, - generators::{random_block, random_header}, - }, - RethError, + use reth_interfaces::test_utils::{ + generators, + generators::{random_block, random_header}, }; use reth_primitives::{ hex_literal::hex, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, StaticFileSegment, @@ -740,7 +737,7 @@ mod tests { // Empty database assert_matches!( provider.sync_gap(mode.clone(), checkpoint), - Err(RethError::Provider(ProviderError::HeaderNotFound(block_number))) + Err(ProviderError::HeaderNotFound(block_number)) if block_number.as_number().unwrap() == checkpoint ); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 643bc23e65e4e..b3c2608bf7226 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -27,11 +27,7 @@ use reth_db::{ BlockNumberList, DatabaseError, }; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::{ - p2p::headers::downloader::SyncTarget, - provider::{ProviderResult, RootMismatch}, - RethResult, -}; +use reth_interfaces::p2p::headers::downloader::SyncTarget; use reth_primitives::{ keccak256, revm::{config::revm_spec, env::fill_block_env}, @@ -43,6 +39,7 @@ use reth_primitives::{ StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; +use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, updates::TrieUpdates, @@ -1095,7 +1092,7 @@ impl HeaderSyncGapProvider for DatabaseProvider { &self, mode: HeaderSyncMode, highest_uninterrupted_block: BlockNumber, - ) -> RethResult { + ) -> ProviderResult { let static_file_provider = self.static_file_provider(); // Make sure Headers static file is at the same height. If it's further, this @@ -1119,7 +1116,7 @@ impl HeaderSyncGapProvider for DatabaseProvider { } Ordering::Less => { // There's either missing or corrupted files. - return Err(ProviderError::HeaderNotFound(next_static_file_block_num.into()).into()) + return Err(ProviderError::HeaderNotFound(next_static_file_block_num.into())) } Ordering::Equal => {} } diff --git a/crates/storage/provider/src/traits/header_sync_gap.rs b/crates/storage/provider/src/traits/header_sync_gap.rs index 576a26a9e8c74..8a60eb15a77d3 100644 --- a/crates/storage/provider/src/traits/header_sync_gap.rs +++ b/crates/storage/provider/src/traits/header_sync_gap.rs @@ -1,6 +1,7 @@ use auto_impl::auto_impl; -use reth_interfaces::{p2p::headers::downloader::SyncTarget, RethResult}; +use reth_interfaces::p2p::headers::downloader::SyncTarget; use reth_primitives::{BlockHashOrNumber, BlockNumber, SealedHeader, B256}; +use reth_storage_errors::provider::ProviderResult; use tokio::sync::watch; /// The header sync mode. @@ -46,5 +47,5 @@ pub trait HeaderSyncGapProvider: Send + Sync { &self, mode: HeaderSyncMode, highest_uninterrupted_block: BlockNumber, - ) -> RethResult; + ) -> ProviderResult; } diff --git a/crates/storage/provider/src/traits/tree_viewer.rs b/crates/storage/provider/src/traits/tree_viewer.rs index db3b19c4d611f..cd5eaab1835b0 100644 --- a/crates/storage/provider/src/traits/tree_viewer.rs +++ b/crates/storage/provider/src/traits/tree_viewer.rs @@ -1,6 +1,5 @@ use crate::{BlockchainTreePendingStateProvider, CanonStateSubscriptions}; - -use reth_interfaces::blockchain_tree::{BlockchainTreeEngine, BlockchainTreeViewer}; +use reth_blockchain_tree_api::{BlockchainTreeEngine, BlockchainTreeViewer}; /// Helper trait to combine all the traits we need for the BlockchainProvider /// diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 27f62f886906c..6ea0fad702f91 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -87,7 +87,8 @@ impl Case for BlockchainTestCase { db.as_ref(), Arc::new(case.network.clone().into()), static_files_dir_path, - )? + ) + .map_err(|err| Error::RethError(err.into()))? .provider_rw() .unwrap(); From 07dfb9fdc43cf63d0312ac64f37dd1f43e6fc537 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 27 May 2024 15:36:56 +0200 Subject: [PATCH 641/700] chore(engine): tree action on downloaded block (#8409) --- crates/consensus/beacon/src/engine/mod.rs | 158 +++++++++++----------- 1 file changed, 79 insertions(+), 79 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index c1ef6228746e4..039c14e422d41 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1271,84 +1271,6 @@ where Ok(PayloadStatus::new(status, latest_valid_hash)) } - /// Invoked if we successfully downloaded a new block from the network. - /// - /// This will attempt to insert the block into the tree. - /// - /// There are several scenarios: - /// - /// ## [BlockStatus::Valid] - /// - /// The block is connected to the current canonical chain and is valid. - /// If the block is an ancestor of the current forkchoice head, then we can try again to make - /// the chain canonical. - /// - /// ## [BlockStatus::Disconnected] - /// - /// The block is not connected to the canonical chain, and we need to download the missing - /// parent first. - /// - /// ## Insert Error - /// - /// If the insertion into the tree failed, then the block was well-formed (valid hash), but its - /// chain is invalid, which means the FCU that triggered the download is invalid. Here we can - /// stop because there's nothing to do here and the engine needs to wait for another FCU. - fn on_downloaded_block(&mut self, block: SealedBlock) { - let downloaded_num_hash = block.num_hash(); - trace!(target: "consensus::engine", hash=?block.hash(), number=%block.number, "Downloaded full block"); - // check if the block's parent is already marked as invalid - if self.check_invalid_ancestor_with_head(block.parent_hash, block.hash()).is_some() { - // can skip this invalid block - return - } - - match self - .blockchain - .insert_block_without_senders(block, BlockValidationKind::SkipStateRootValidation) - { - Ok(status) => { - match status { - InsertPayloadOk::Inserted(BlockStatus::Valid(_)) => { - // block is connected to the canonical chain and is valid. - // if it's not connected to current canonical head, the state root - // has not been validated. - if let Err((hash, error)) = - self.try_make_sync_target_canonical(downloaded_num_hash) - { - if error.is_fatal() { - error!(target: "consensus::engine", %error, "Encountered fatal error while making sync target canonical: {:?}, {:?}", error, hash); - } else if !error.is_block_hash_not_found() { - debug!( - target: "consensus::engine", - "Unexpected error while making sync target canonical: {:?}, {:?}", - error, - hash - ) - } - } - } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - missing_ancestor: missing_parent, - }) => { - // block is not connected to the canonical head, we need to download its - // missing branch first - self.on_disconnected_block(downloaded_num_hash, missing_parent); - } - _ => (), - } - } - Err(err) => { - warn!(target: "consensus::engine", %err, "Failed to insert downloaded block"); - if err.kind().is_invalid_block() { - let (block, err) = err.split(); - warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid"); - - self.invalid_headers.insert(block.header); - } - } - } - } - /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. /// /// This mainly compares the missing parent of the downloaded block with the current canonical @@ -1476,7 +1398,15 @@ where ) -> Result { let outcome = match event { EngineSyncEvent::FetchedFullBlock(block) => { - self.on_downloaded_block(block); + trace!(target: "consensus::engine", hash=?block.hash(), number=%block.number, "Downloaded full block"); + // Insert block only if the block's parent is not marked as invalid + if self.check_invalid_ancestor_with_head(block.parent_hash, block.hash()).is_none() + { + let previous_action = self + .blockchain_tree_action + .replace(BlockchainTreeAction::InsertDownloadedPayload { block }); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); + } EngineEventOutcome::Processed } EngineSyncEvent::PipelineStarted(target) => { @@ -1794,6 +1724,55 @@ where trace!(target: "consensus::engine", ?status, "Returning payload status"); let _ = tx.send(Ok(status)); } + + BlockchainTreeAction::InsertDownloadedPayload { block } => { + let downloaded_num_hash = block.num_hash(); + match self.blockchain.insert_block_without_senders( + block, + BlockValidationKind::SkipStateRootValidation, + ) { + Ok(status) => { + match status { + InsertPayloadOk::Inserted(BlockStatus::Valid(_)) => { + // block is connected to the canonical chain and is valid. + // if it's not connected to current canonical head, the state root + // has not been validated. + if let Err((hash, error)) = + self.try_make_sync_target_canonical(downloaded_num_hash) + { + if error.is_fatal() { + error!(target: "consensus::engine", %error, "Encountered fatal error while making sync target canonical: {:?}, {:?}", error, hash); + } else if !error.is_block_hash_not_found() { + debug!( + target: "consensus::engine", + "Unexpected error while making sync target canonical: {:?}, {:?}", + error, + hash + ) + } + } + } + InsertPayloadOk::Inserted(BlockStatus::Disconnected { + missing_ancestor: missing_parent, + }) => { + // block is not connected to the canonical head, we need to download + // its missing branch first + self.on_disconnected_block(downloaded_num_hash, missing_parent); + } + _ => (), + } + } + Err(err) => { + warn!(target: "consensus::engine", %err, "Failed to insert downloaded block"); + if err.kind().is_invalid_block() { + let (block, err) = err.split(); + warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid"); + + self.invalid_headers.insert(block.header); + } + } + } + } }; Ok(EngineEventOutcome::Processed) } @@ -1935,6 +1914,27 @@ enum BlockchainTreeAction { status: PayloadStatus, tx: oneshot::Sender>, }, + /// Action to insert a new block that we successfully downloaded from the network. + /// There are several outcomes for inserting a downloaded block into the tree: + /// + /// ## [BlockStatus::Valid] + /// + /// The block is connected to the current canonical chain and is valid. + /// If the block is an ancestor of the current forkchoice head, then we can try again to + /// make the chain canonical. + /// + /// ## [BlockStatus::Disconnected] + /// + /// The block is not connected to the canonical chain, and we need to download the + /// missing parent first. + /// + /// ## Insert Error + /// + /// If the insertion into the tree failed, then the block was well-formed (valid hash), + /// but its chain is invalid, which means the FCU that triggered the + /// download is invalid. Here we can stop because there's nothing to do here + /// and the engine needs to wait for another FCU. + InsertDownloadedPayload { block: SealedBlock }, } /// Represents outcomes of processing an engine event From e54398308b7439e2297623a9117964d97d106586 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 15:42:55 +0200 Subject: [PATCH 642/700] chore: rm NetworkError variant from RethError (#8413) --- Cargo.lock | 1 - .../beacon/src/engine/hooks/controller.rs | 2 +- crates/interfaces/Cargo.toml | 1 - crates/interfaces/src/error.rs | 30 ++++++++++++------- crates/prune/src/error.rs | 2 +- crates/rpc/rpc/src/eth/api/mod.rs | 4 +-- .../provider/src/providers/database/mod.rs | 10 ++++--- 7 files changed, 30 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 60f18e1d6144f..073bc5b5cbf30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7073,7 +7073,6 @@ dependencies = [ "reth-consensus", "reth-execution-errors", "reth-fs-util", - "reth-network-api", "reth-network-p2p", "reth-storage-errors", "thiserror", diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 7916928dbe7a6..a2845c9cce175 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -330,7 +330,7 @@ mod tests { let hook_ro_name = "read-only"; let mut hook_ro = TestHook::new_ro(hook_ro_name); hook_ro.add_result(Ok(EngineHookEvent::Started)); - hook_ro.add_result(Err(RethError::Custom("something went wrong".to_string()))); + hook_ro.add_result(Err(RethError::msg("something went wrong"))); let mut hooks = EngineHooks::new(); hooks.add(hook_rw_1); diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index a5c01ecb92c7e..836ddebb5d34b 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -15,7 +15,6 @@ reth-blockchain-tree-api.workspace = true reth-consensus.workspace = true reth-execution-errors.workspace = true reth-fs-util.workspace = true -reth-network-api.workspace = true reth-network-p2p.workspace = true reth-storage-errors.workspace = true diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index f38742ab51fbb..ddb4e151f4661 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -2,8 +2,8 @@ use crate::blockchain_tree::error::{BlockchainTreeError, CanonicalError}; use reth_consensus::ConsensusError; use reth_execution_errors::BlockExecutionError; use reth_fs_util::FsPathError; -use reth_network_api::NetworkError; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; +use std::fmt::Display; /// Result alias for [`RethError`]. pub type RethResult = Result; @@ -31,17 +31,28 @@ pub enum RethError { #[error(transparent)] Provider(#[from] ProviderError), - /// Errors related to networking. - #[error(transparent)] - Network(#[from] NetworkError), - /// Canonical errors encountered. #[error(transparent)] Canonical(#[from] CanonicalError), - /// Custom error message. - #[error("{0}")] - Custom(String), + /// Any other error. + #[error(transparent)] + Other(Box), +} + +impl RethError { + /// Create a new `RethError` from a given error. + pub fn other(error: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + RethError::Other(Box::new(error)) + } + + /// Create a new `RethError` from a given message. + pub fn msg(msg: impl Display) -> Self { + RethError::Other(msg.to_string().into()) + } } impl From for RethError { @@ -52,7 +63,7 @@ impl From for RethError { impl From for RethError { fn from(err: FsPathError) -> Self { - RethError::Custom(err.to_string()) + RethError::other(err) } } @@ -72,6 +83,5 @@ mod size_asserts { static_assert_size!(ConsensusError, 48); static_assert_size!(DatabaseError, 40); static_assert_size!(ProviderError, 48); - static_assert_size!(NetworkError, 0); static_assert_size!(CanonicalError, 56); } diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index bdf5bacc1cdf7..49333b4db437c 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -26,7 +26,7 @@ impl From for RethError { fn from(err: PrunerError) -> Self { match err { PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { - RethError::Custom(err.to_string()) + RethError::other(err) } PrunerError::Interface(err) => err, PrunerError::Database(err) => RethError::Database(err), diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 6c936808e9964..d7ec6a7db4573 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -14,7 +14,7 @@ use crate::eth::{ use async_trait::async_trait; use reth_evm::ConfigureEvm; -use reth_interfaces::RethResult; +use reth_interfaces::{RethError, RethResult}; use reth_network_api::NetworkInfo; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, @@ -391,7 +391,7 @@ where /// /// Note: This returns an `U64`, since this should return as hex string. async fn protocol_version(&self) -> RethResult { - let status = self.network().network_status().await?; + let status = self.network().network_status().await.map_err(RethError::other)?; Ok(U64::from(status.protocol_version)) } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 4c7efafe8fc7a..e8987b7d44c31 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -7,7 +7,10 @@ use crate::{ ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; +use reth_db::{ + database::Database, init_db, mdbx::DatabaseArguments, models::StoredBlockBodyIndices, + DatabaseEnv, +}; use reth_evm::ConfigureEvmEnv; use reth_interfaces::{RethError, RethResult}; use reth_primitives::{ @@ -17,6 +20,7 @@ use reth_primitives::{ SealedHeader, StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; +use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ ops::{RangeBounds, RangeInclusive}, @@ -29,8 +33,6 @@ mod metrics; mod provider; pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; -use reth_db::mdbx::DatabaseArguments; -use reth_storage_errors::provider::ProviderResult; /// A common provider that fetches data from a database or static file. /// @@ -87,7 +89,7 @@ impl ProviderFactory { static_files_path: PathBuf, ) -> RethResult { Ok(ProviderFactory:: { - db: Arc::new(init_db(path, args).map_err(|e| RethError::Custom(e.to_string()))?), + db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider: StaticFileProvider::new(static_files_path)?, }) From 4545b015ef3f6b51a72ca0f0a2c0a8cad13c6276 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 27 May 2024 17:47:05 +0200 Subject: [PATCH 643/700] chore(rpc): remove redundant trait bounds (#8414) --- crates/net/eth-wire-types/src/receipts.rs | 6 +- crates/rpc/rpc/src/eth/api/transactions.rs | 136 +++++++++------------ 2 files changed, 65 insertions(+), 77 deletions(-) diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index 3d653b594befb..72424b0bd0a7a 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -87,8 +87,9 @@ mod tests { ); } - #[test] // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + #[allow(clippy::needless_update)] fn encode_receipts() { let expected = hex!("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); let mut data = vec![]; @@ -120,8 +121,9 @@ mod tests { assert_eq!(data, expected); } - #[test] // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + #[allow(clippy::needless_update)] fn decode_receipts() { let data = hex!("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); let request = RequestPair::::decode(&mut &data[..]).unwrap(); diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index dc01dc12c3859..17d2f85c0eb96 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -1367,11 +1367,7 @@ where impl EthApi where - Pool: TransactionPool + Clone + 'static, - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Network: NetworkInfo + Send + Sync + 'static, - EvmConfig: ConfigureEvm + 'static, + Self: Send + Sync + 'static, { /// Spawns the given closure on a new blocking tracing task async fn spawn_tracing_task_with(&self, f: F) -> EthResult @@ -1390,11 +1386,11 @@ where impl EthApi where - Pool: TransactionPool + Clone + 'static, + Pool: TransactionPool + 'static, Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Network: NetworkInfo + Send + Sync + 'static, - EvmConfig: ConfigureEvm + 'static, + Network: NetworkInfo + 'static, + EvmConfig: ConfigureEvm, { /// Returns the gas price if it is set, otherwise fetches a suggested gas price for legacy /// transactions. @@ -1449,13 +1445,67 @@ where None => self.blob_base_fee().await, } } + + pub(crate) fn sign_request( + &self, + from: &Address, + request: TypedTransactionRequest, + ) -> EthResult { + for signer in self.inner.signers.read().iter() { + if signer.is_signer_for(from) { + return match signer.sign_transaction(request, from) { + Ok(tx) => Ok(tx), + Err(e) => Err(e.into()), + } + } + } + Err(EthApiError::InvalidTransactionSignature) + } + + /// Get Transaction by [BlockId] and the index of the transaction within that Block. + /// + /// Returns `Ok(None)` if the block does not exist, or the block as fewer transactions + pub(crate) async fn transaction_by_block_and_tx_index( + &self, + block_id: impl Into, + index: Index, + ) -> EthResult> { + if let Some(block) = self.block_with_senders(block_id.into()).await? { + let block_hash = block.hash(); + let block_number = block.number; + let base_fee_per_gas = block.base_fee_per_gas; + if let Some(tx) = block.into_transactions_ecrecovered().nth(index.into()) { + return Ok(Some(from_recovered_with_block_context( + tx, + block_hash, + block_number, + base_fee_per_gas, + index.into(), + ))) + } + } + + Ok(None) + } + + pub(crate) async fn raw_transaction_by_block_and_tx_index( + &self, + block_id: impl Into, + index: Index, + ) -> EthResult> { + if let Some(block) = self.block_with_senders(block_id.into()).await? { + if let Some(tx) = block.transactions().nth(index.into()) { + return Ok(Some(tx.envelope_encoded())) + } + } + + Ok(None) + } } impl EthApi where - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Network: NetworkInfo + 'static, + Provider: BlockReaderIdExt + ChainSpecProvider, { /// Helper function for `eth_getTransactionReceipt` /// @@ -1545,70 +1595,6 @@ where } } -impl EthApi -where - Pool: TransactionPool + 'static, - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Network: NetworkInfo + Send + Sync + 'static, - EvmConfig: ConfigureEvm + 'static, -{ - pub(crate) fn sign_request( - &self, - from: &Address, - request: TypedTransactionRequest, - ) -> EthResult { - for signer in self.inner.signers.read().iter() { - if signer.is_signer_for(from) { - return match signer.sign_transaction(request, from) { - Ok(tx) => Ok(tx), - Err(e) => Err(e.into()), - } - } - } - Err(EthApiError::InvalidTransactionSignature) - } - - /// Get Transaction by [BlockId] and the index of the transaction within that Block. - /// - /// Returns `Ok(None)` if the block does not exist, or the block as fewer transactions - pub(crate) async fn transaction_by_block_and_tx_index( - &self, - block_id: impl Into, - index: Index, - ) -> EthResult> { - if let Some(block) = self.block_with_senders(block_id.into()).await? { - let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; - if let Some(tx) = block.into_transactions_ecrecovered().nth(index.into()) { - return Ok(Some(from_recovered_with_block_context( - tx, - block_hash, - block_number, - base_fee_per_gas, - index.into(), - ))) - } - } - - Ok(None) - } - - pub(crate) async fn raw_transaction_by_block_and_tx_index( - &self, - block_id: impl Into, - index: Index, - ) -> EthResult> { - if let Some(block) = self.block_with_senders(block_id.into()).await? { - if let Some(tx) = block.transactions().nth(index.into()) { - return Ok(Some(tx.envelope_encoded())) - } - } - - Ok(None) - } -} /// Represents from where a transaction was fetched. #[derive(Debug, Clone, Eq, PartialEq)] pub enum TransactionSource { From c8a18a2f268d4417fabf06903bf0d3ba00b7942a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 27 May 2024 18:02:17 +0200 Subject: [PATCH 644/700] chore: fix lint (#8415) From 749d68b5e4f53b463722a171b19e126c31fce2ba Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 19:47:09 +0200 Subject: [PATCH 645/700] chore: move EvmEnvProvider (#8420) --- Cargo.lock | 1 + crates/evm/Cargo.toml | 1 + crates/evm/src/lib.rs | 1 + .../src/traits/evm_env.rs => evm/src/provider.rs} | 9 ++++++--- crates/storage/provider/src/traits/mod.rs | 4 ++-- 5 files changed, 11 insertions(+), 5 deletions(-) rename crates/{storage/provider/src/traits/evm_env.rs => evm/src/provider.rs} (94%) diff --git a/Cargo.lock b/Cargo.lock index 073bc5b5cbf30..b9496703d735c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6989,6 +6989,7 @@ dependencies = [ name = "reth-evm" version = "0.2.0-beta.7" dependencies = [ + "auto_impl", "futures-util", "parking_lot 0.12.3", "reth-execution-errors", diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 183d9f694c553..bc94dcd1753bc 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -19,6 +19,7 @@ reth-storage-errors.workspace = true revm.workspace = true +auto_impl.workspace = true futures-util.workspace = true parking_lot = { workspace = true, optional = true } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 94cac8bccd4eb..93e8035258cf7 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -16,6 +16,7 @@ use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, pub mod either; pub mod execute; +pub mod provider; #[cfg(any(test, feature = "test-utils"))] /// test helpers for mocking executor diff --git a/crates/storage/provider/src/traits/evm_env.rs b/crates/evm/src/provider.rs similarity index 94% rename from crates/storage/provider/src/traits/evm_env.rs rename to crates/evm/src/provider.rs index cecedad0c912d..abf04be8938c3 100644 --- a/crates/storage/provider/src/traits/evm_env.rs +++ b/crates/evm/src/provider.rs @@ -1,12 +1,15 @@ -use reth_evm::ConfigureEvmEnv; +//! Provider trait for populating the EVM environment. + +use crate::ConfigureEvmEnv; use reth_primitives::{BlockHashOrNumber, Header}; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; -/// A provider type that knows chain specific information required to configure an +/// A provider type that knows chain specific information required to configure a /// [CfgEnvWithHandlerCfg]. /// -/// This type is mainly used to provide required data to configure the EVM environment. +/// This type is mainly used to provide required data to configure the EVM environment that is +/// usually stored on disk. #[auto_impl::auto_impl(&, Arc)] pub trait EvmEnvProvider: Send + Sync { /// Fills the [CfgEnvWithHandlerCfg] and [BlockEnv] fields with values specific to the given diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 10984240a5d84..b63c8298dfcef 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -18,8 +18,8 @@ pub use block_hash::BlockHashReader; mod block_id; pub use block_id::{BlockIdReader, BlockNumReader}; -mod evm_env; -pub use evm_env::EvmEnvProvider; +// Re-export for convenience +pub use reth_evm::provider::EvmEnvProvider; mod chain_info; pub use chain_info::CanonChainTracker; From 21b23862fdff694604d55dc74403820ade1b17a6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 May 2024 19:55:11 +0200 Subject: [PATCH 646/700] chore: rm reth-interfaces from reth-trie (#8419) --- Cargo.lock | 3 ++- crates/trie/Cargo.toml | 4 +++- crates/trie/src/proof.rs | 6 +++--- crates/trie/src/state.rs | 2 +- crates/trie/src/trie.rs | 2 +- crates/trie/src/updates.rs | 3 +-- 6 files changed, 11 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9496703d735c..697036d008c2d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8101,10 +8101,11 @@ dependencies = [ "once_cell", "proptest", "reth-db", - "reth-interfaces", + "reth-execution-errors", "reth-metrics", "reth-primitives", "reth-provider", + "reth-storage-errors", "revm", "serde_json", "similar-asserts", diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index 39662908673df..e20e6dd6fd770 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -14,8 +14,9 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-interfaces.workspace = true +reth-execution-errors.workspace = true reth-db.workspace = true + revm.workspace = true # alloy @@ -41,6 +42,7 @@ triehash = { version = "0.8", optional = true } reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-storage-errors.workspace = true # trie triehash = "0.8" diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs index 56e1acb861232..3b212bd519c17 100644 --- a/crates/trie/src/proof.rs +++ b/crates/trie/src/proof.rs @@ -7,7 +7,7 @@ use crate::{ }; use alloy_rlp::{BufMut, Encodable}; use reth_db::{tables, transaction::DbTx}; -use reth_interfaces::trie::{StateRootError, StorageRootError}; +use reth_execution_errors::{StateRootError, StorageRootError}; use reth_primitives::{ constants::EMPTY_ROOT_HASH, keccak256, @@ -168,9 +168,9 @@ mod tests { use crate::StateRoot; use once_cell::sync::Lazy; use reth_db::database::Database; - use reth_interfaces::RethResult; use reth_primitives::{Account, Bytes, Chain, ChainSpec, StorageEntry, HOLESKY, MAINNET, U256}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter, ProviderFactory}; + use reth_storage_errors::provider::ProviderResult; use std::{str::FromStr, sync::Arc}; /* @@ -201,7 +201,7 @@ mod tests { fn insert_genesis( provider_factory: &ProviderFactory, chain_spec: Arc, - ) -> RethResult { + ) -> ProviderResult { let mut provider = provider_factory.provider_rw()?; // Hash accounts and insert them into hashing table. diff --git a/crates/trie/src/state.rs b/crates/trie/src/state.rs index 34b948346a3df..9c8886017ae60 100644 --- a/crates/trie/src/state.rs +++ b/crates/trie/src/state.rs @@ -11,7 +11,7 @@ use reth_db::{ transaction::DbTx, DatabaseError, }; -use reth_interfaces::trie::StateRootError; +use reth_execution_errors::StateRootError; use reth_primitives::{ keccak256, revm::compat::into_reth_acc, trie::Nibbles, Account, Address, BlockNumber, B256, U256, diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index 5f24d1f05165d..0729776efad2d 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -10,7 +10,7 @@ use crate::{ }; use alloy_rlp::{BufMut, Encodable}; use reth_db::transaction::DbTx; -use reth_interfaces::trie::{StateRootError, StorageRootError}; +use reth_execution_errors::{StateRootError, StorageRootError}; use reth_primitives::{ constants::EMPTY_ROOT_HASH, keccak256, diff --git a/crates/trie/src/updates.rs b/crates/trie/src/updates.rs index fc263aefb8cde..1d31ee31fea4e 100644 --- a/crates/trie/src/updates.rs +++ b/crates/trie/src/updates.rs @@ -1,3 +1,4 @@ +use crate::walker::TrieWalker; use derive_more::Deref; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, @@ -13,8 +14,6 @@ use reth_primitives::{ }; use std::collections::{hash_map::IntoIter, HashMap, HashSet}; -use crate::walker::TrieWalker; - /// The key of a trie node. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum TrieKey { From 48be58cbb4b0458bb54a95482ec8ef26a9773325 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 27 May 2024 20:07:50 +0200 Subject: [PATCH 647/700] chore(engine): reduce number of canonical tip lookups from engine (#8421) --- crates/blockchain-tree-api/src/lib.rs | 11 +++++--- crates/blockchain-tree/src/blockchain_tree.rs | 23 +++++++++++----- crates/consensus/beacon/src/engine/mod.rs | 27 ++++++++++--------- 3 files changed, 40 insertions(+), 21 deletions(-) diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs index 113e951e6a790..e17b790a42f10 100644 --- a/crates/blockchain-tree-api/src/lib.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -162,7 +162,10 @@ impl std::fmt::Display for BlockValidationKind { pub enum CanonicalOutcome { /// The block is already canonical. AlreadyCanonical { - /// The corresponding [SealedHeader] that is already canonical. + /// Block number and hash of current head. + head: BlockNumHash, + /// The corresponding [SealedHeader] that was attempted to be made a current head and + /// is already canonical. header: SealedHeader, }, /// Committed the block to the database. @@ -176,7 +179,7 @@ impl CanonicalOutcome { /// Returns the header of the block that was made canonical. pub fn header(&self) -> &SealedHeader { match self { - CanonicalOutcome::AlreadyCanonical { header } => header, + CanonicalOutcome::AlreadyCanonical { header, .. } => header, CanonicalOutcome::Committed { head } => head, } } @@ -184,7 +187,7 @@ impl CanonicalOutcome { /// Consumes the outcome and returns the header of the block that was made canonical. pub fn into_header(self) -> SealedHeader { match self { - CanonicalOutcome::AlreadyCanonical { header } => header, + CanonicalOutcome::AlreadyCanonical { header, .. } => header, CanonicalOutcome::Committed { head } => head, } } @@ -209,6 +212,8 @@ pub enum BlockStatus { /// If block is valid and block forks off canonical chain. /// If blocks is not connected to canonical chain. Disconnected { + /// Current canonical head. + head: BlockNumHash, /// The lowest ancestor block that is not connected to the canonical chain. missing_ancestor: BlockNumHash, }, diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index db91023149754..c60719d4d876c 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -221,7 +221,10 @@ where // check if block is disconnected if let Some(block) = self.state.buffered_blocks.block(&block.hash) { - return Ok(Some(BlockStatus::Disconnected { missing_ancestor: block.parent_num_hash() })) + return Ok(Some(BlockStatus::Disconnected { + head: self.state.block_indices.canonical_tip(), + missing_ancestor: block.parent_num_hash(), + })) } Ok(None) @@ -388,7 +391,10 @@ where .lowest_ancestor(&block_hash) .ok_or(BlockchainTreeError::BlockBufferingFailed { block_hash })?; - Ok(BlockStatus::Disconnected { missing_ancestor: lowest_ancestor.parent_num_hash() }) + Ok(BlockStatus::Disconnected { + head: self.state.block_indices.canonical_tip(), + missing_ancestor: lowest_ancestor.parent_num_hash(), + }) } /// This tries to append the given block to the canonical chain. @@ -1064,7 +1070,9 @@ where hash: block_hash, })) } - return Ok(CanonicalOutcome::AlreadyCanonical { header }) + + let head = self.state.block_indices.canonical_tip(); + return Ok(CanonicalOutcome::AlreadyCanonical { header, head }) } let Some(chain_id) = self.block_indices().get_block_chain_id(&block_hash) else { @@ -2001,18 +2009,20 @@ mod tests { let mut canon_notif = tree.subscribe_canon_state(); // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); + let head = BlockNumHash::new(10, B256::ZERO); + tree.make_canonical(head.hash).unwrap(); // make sure is_block_hash_canonical returns true for genesis block tree.is_block_hash_canonical(&B256::ZERO).unwrap(); // make genesis block 10 as finalized - tree.finalize_block(10); + tree.finalize_block(head.number); // block 2 parent is not known, block2 is buffered. assert_eq!( tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), InsertPayloadOk::Inserted(BlockStatus::Disconnected { + head, missing_ancestor: block2.parent_num_hash() }) ); @@ -2029,7 +2039,7 @@ mod tests { assert_eq!( tree.is_block_known(block2.num_hash()).unwrap(), - Some(BlockStatus::Disconnected { missing_ancestor: block2.parent_num_hash() }) + Some(BlockStatus::Disconnected { head, missing_ancestor: block2.parent_num_hash() }) ); // check if random block is known @@ -2328,6 +2338,7 @@ mod tests { assert_eq!( tree.insert_block(block2b.clone(), BlockValidationKind::Exhaustive).unwrap(), InsertPayloadOk::Inserted(BlockStatus::Disconnected { + head: block2.header.num_hash(), missing_ancestor: block2b.parent_num_hash() }) ); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 039c14e422d41..e29ddd62495ba 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -393,8 +393,8 @@ where match make_canonical_result { Ok(outcome) => { let should_update_head = match &outcome { - CanonicalOutcome::AlreadyCanonical { header } => { - self.on_head_already_canonical(header, &mut attrs) + CanonicalOutcome::AlreadyCanonical { head, header } => { + self.on_head_already_canonical(head, header, &mut attrs) } CanonicalOutcome::Committed { head } => { // new VALID update that moved the canonical chain forward @@ -448,6 +448,7 @@ where /// Returns `true` if the head needs to be updated. fn on_head_already_canonical( &self, + head: &BlockNumHash, header: &SealedHeader, attrs: &mut Option, ) -> bool { @@ -457,7 +458,7 @@ where debug!( target: "consensus::engine", fcu_head_num=?header.number, - current_head_num=?self.blockchain.canonical_tip().number, + current_head_num=?head.number, "[Optimism] Allowing beacon reorg to old head" ); return true @@ -469,14 +470,14 @@ where // and deemed `VALID`. In the case of such an event, client software MUST return // `{payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash, // validationError: null}, payloadId: null}` - if self.blockchain.canonical_tip() != header.num_hash() { + if head != &header.num_hash() { attrs.take(); } debug!( target: "consensus::engine", fcu_head_num=?header.number, - current_head_num=?self.blockchain.canonical_tip().number, + current_head_num=?head.number, "Ignoring beacon update to old head" ); false @@ -1285,12 +1286,11 @@ where &mut self, downloaded_block: BlockNumHash, missing_parent: BlockNumHash, + head: BlockNumHash, ) { // compare the missing parent with the canonical tip - let canonical_tip_num = self.blockchain.canonical_tip().number; - if let Some(target) = self.can_pipeline_sync_to_finalized( - canonical_tip_num, + head.number, missing_parent.number, Some(downloaded_block), ) { @@ -1310,9 +1310,7 @@ where // * the missing parent block num >= canonical tip num, but the number of missing blocks is // less than the pipeline threshold // * this case represents a potentially long range of blocks to download and execute - if let Some(distance) = - self.distance_from_local_tip(canonical_tip_num, missing_parent.number) - { + if let Some(distance) = self.distance_from_local_tip(head.number, missing_parent.number) { self.sync.download_block_range(missing_parent.hash, distance) } else { // This happens when the missing parent is on an outdated @@ -1753,11 +1751,16 @@ where } } InsertPayloadOk::Inserted(BlockStatus::Disconnected { + head, missing_ancestor: missing_parent, }) => { // block is not connected to the canonical head, we need to download // its missing branch first - self.on_disconnected_block(downloaded_num_hash, missing_parent); + self.on_disconnected_block( + downloaded_num_hash, + missing_parent, + head, + ); } _ => (), } From 66c072c92819fdf1857152865c32d51d8800b25d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 27 May 2024 20:46:30 +0200 Subject: [PATCH 648/700] fix(metrics): bug tx fetcher metrics (#8416) --- crates/net/network/src/transactions/fetcher.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 491c24f0fe15e..c5272ecc3aaae 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -430,7 +430,7 @@ impl TransactionFetcher { let budget_find_idle_fallback_peer = self .search_breadth_budget_find_idle_fallback_peer(&has_capacity_wrt_pending_pool_imports); - let acc = &mut search_durations.fill_request; + let acc = &mut search_durations.find_idle_peer; let peer_id = duration_metered_exec!( { let Some(peer_id) = self.find_any_idle_fallback_peer_for_any_pending_hash( @@ -460,7 +460,7 @@ impl TransactionFetcher { &has_capacity_wrt_pending_pool_imports, ); - let acc = &mut search_durations.find_idle_peer; + let acc = &mut search_durations.fill_request; duration_metered_exec!( { self.fill_request_from_hashes_pending_fetch( From 20aeb2be0d0931f719e37860368866eabdc6e457 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 02:06:09 +0200 Subject: [PATCH 649/700] chore: use BlockId functions directly (#8417) --- crates/rpc/rpc/src/eth/api/state.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 11 +++-------- crates/rpc/rpc/src/trace.rs | 6 ++---- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs index 144b1504f719b..61566a355c1af 100644 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ b/crates/rpc/rpc/src/eth/api/state.rs @@ -45,7 +45,7 @@ where address: Address, block_id: Option, ) -> EthResult { - if block_id == Some(BlockId::Number(BlockNumberOrTag::Pending)) { + if block_id == Some(BlockId::pending()) { let address_txs = self.pool().get_transactions_by_sender(address); if let Some(highest_nonce) = address_txs.iter().map(|item| item.transaction.nonce()).max() diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 17d2f85c0eb96..538aa606b8168 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -787,9 +787,7 @@ where None => return Ok(None), Some(tx) => { let res = match tx { - tx @ TransactionSource::Pool(_) => { - (tx, BlockId::Number(BlockNumberOrTag::Pending)) - } + tx @ TransactionSource::Pool(_) => (tx, BlockId::pending()), TransactionSource::Block { transaction, index, @@ -873,17 +871,14 @@ where // set nonce if not already set before if request.nonce.is_none() { - let nonce = - self.get_transaction_count(from, Some(BlockId::Number(BlockNumberOrTag::Pending)))?; + let nonce = self.get_transaction_count(from, Some(BlockId::pending()))?; // note: `.to()` can't panic because the nonce is constructed from a `u64` request.nonce = Some(nonce.to::()); } let chain_id = self.chain_id(); - let estimated_gas = self - .estimate_gas_at(request.clone(), BlockId::Number(BlockNumberOrTag::Pending), None) - .await?; + let estimated_gas = self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?; let gas_limit = estimated_gas; let TransactionRequest { diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 0a1494e0e97ef..b2104ff47a5bf 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -7,9 +7,7 @@ use crate::eth::{ use async_trait::async_trait; use jsonrpsee::core::RpcResult as Result; use reth_consensus_common::calc::{base_block_reward, block_reward}; -use reth_primitives::{ - revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, SealedHeader, B256, U256, -}; +use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, Bytes, SealedHeader, B256, U256}; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; @@ -133,7 +131,7 @@ where calls: Vec<(TransactionRequest, HashSet)>, block_id: Option, ) -> EthResult> { - let at = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Pending)); + let at = block_id.unwrap_or(BlockId::pending()); let (cfg, block_env, at) = self.inner.eth_api.evm_env_at(at).await?; let gas_limit = self.inner.eth_api.call_gas_limit(); From 1e2a0c372ec58c76eca26436df9dc205a64ee312 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 28 May 2024 12:17:20 +0200 Subject: [PATCH 650/700] chore(tree): misc tree cleanup (#8423) --- crates/blockchain-tree/src/blockchain_tree.rs | 62 +++++++------------ crates/blockchain-tree/src/shareable.rs | 4 +- 2 files changed, 26 insertions(+), 40 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index c60719d4d876c..42ba451ab194f 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -65,13 +65,14 @@ pub struct BlockchainTree { externals: TreeExternals, /// Tree configuration config: BlockchainTreeConfig, + /// Prune modes. + prune_modes: Option, /// Broadcast channel for canon state changes notifications. canon_state_notification_sender: CanonStateNotificationSender, - /// Metrics for the blockchain tree. - metrics: TreeMetrics, /// Metrics for sync stages. sync_metrics_tx: Option, - prune_modes: Option, + /// Metrics for the blockchain tree. + metrics: TreeMetrics, } impl BlockchainTree { @@ -148,10 +149,10 @@ where config.max_unconnected_blocks(), ), config, + prune_modes, canon_state_notification_sender, - metrics: Default::default(), sync_metrics_tx: None, - prune_modes, + metrics: Default::default(), }) } @@ -190,14 +191,14 @@ where &self, block: BlockNumHash, ) -> Result, InsertBlockErrorKind> { + // check if block is canonical + if self.is_block_hash_canonical(&block.hash)? { + return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))) + } + let last_finalized_block = self.block_indices().last_finalized_block(); // check db if block is finalized. if block.number <= last_finalized_block { - // check if block is canonical - if self.is_block_hash_canonical(&block.hash)? { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))) - } - // check if block is inside database if self.externals.provider_factory.provider()?.block_number(block.hash)?.is_some() { return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))) @@ -209,11 +210,6 @@ where .into()) } - // check if block is part of canonical chain - if self.is_block_hash_canonical(&block.hash)? { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))) - } - // is block inside chain if let Some(attachment) = self.is_block_inside_sidechain(&block) { return Ok(Some(BlockStatus::Valid(attachment))) @@ -240,7 +236,7 @@ where /// /// Caution: This will not return blocks from the canonical chain. #[inline] - pub fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { + pub fn sidechain_block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { self.state.block_by_hash(block_hash) } @@ -262,15 +258,10 @@ where self.state.receipts_by_block_hash(block_hash) } - /// Returns true if the block is included in a side-chain. - fn is_block_hash_inside_sidechain(&self, block_hash: BlockHash) -> bool { - self.block_by_hash(block_hash).is_some() - } - /// Returns the block that's considered the `Pending` block, if it exists. pub fn pending_block(&self) -> Option<&SealedBlock> { let b = self.block_indices().pending_block_num_hash()?; - self.block_by_hash(b.hash) + self.sidechain_block_by_hash(b.hash) } /// Return items needed to execute on the pending state. @@ -302,16 +293,14 @@ where // get parent hashes let mut parent_block_hashes = self.all_chain_hashes(chain_id); - let first_pending_block_number = - if let Some(key_value) = parent_block_hashes.first_key_value() { - *key_value.0 - } else { - debug!(target: "blockchain_tree", ?chain_id, "No blockhashes stored"); - return None - }; + let Some((first_pending_block_number, _)) = parent_block_hashes.first_key_value() + else { + debug!(target: "blockchain_tree", ?chain_id, "No block hashes stored"); + return None + }; let canonical_chain = canonical_chain .iter() - .filter(|&(key, _)| key < first_pending_block_number) + .filter(|&(key, _)| &key < first_pending_block_number) .collect::>(); parent_block_hashes.extend(canonical_chain); @@ -916,8 +905,8 @@ where // check unconnected block buffer for children of the chains let mut all_chain_blocks = Vec::new(); for (_, chain) in self.state.chains.iter() { - for (&number, blocks) in chain.blocks().iter() { - all_chain_blocks.push(BlockNumHash { number, hash: blocks.hash() }) + for (&number, block) in chain.blocks().iter() { + all_chain_blocks.push(BlockNumHash { number, hash: block.hash() }) } } for block in all_chain_blocks.into_iter() { @@ -940,14 +929,11 @@ where let include_blocks = self.state.buffered_blocks.remove_block_with_children(&new_block.hash); // then try to reinsert them into the tree for block in include_blocks.into_iter() { - // dont fail on error, just ignore the block. + // don't fail on error, just ignore the block. let _ = self .try_insert_validated_block(block, BlockValidationKind::SkipStateRootValidation) .map_err(|err| { - debug!( - target: "blockchain_tree", %err, - "Failed to insert buffered block", - ); + debug!(target: "blockchain_tree", %err, "Failed to insert buffered block"); err }); } @@ -1004,7 +990,7 @@ where header = provider.header_by_number(num)?; } - if header.is_none() && self.is_block_hash_inside_sidechain(*hash) { + if header.is_none() && self.sidechain_block_by_hash(*hash).is_some() { return Ok(None) } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 52a98e84dac3e..0c9d803d5662a 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -118,12 +118,12 @@ where fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); - self.tree.read().block_by_hash(hash).map(|b| b.header.clone()) + self.tree.read().sidechain_block_by_hash(hash).map(|b| b.header.clone()) } fn block_by_hash(&self, block_hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); - self.tree.read().block_by_hash(block_hash).cloned() + self.tree.read().sidechain_block_by_hash(block_hash).cloned() } fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option { From d5bdb8ecdee9db22b9d401262e891c01d8ba217f Mon Sep 17 00:00:00 2001 From: 0xAtreides <103257861+JackG-eth@users.noreply.github.com> Date: Tue, 28 May 2024 11:25:51 +0100 Subject: [PATCH 651/700] feat: sidecar fetcher (#7443) Co-authored-by: Matthias Seitz --- Cargo.lock | 16 + Cargo.toml | 1 + .../beacon-api-sidecar-fetcher/Cargo.toml | 20 ++ .../beacon-api-sidecar-fetcher/src/main.rs | 98 ++++++ .../src/mined_sidecar.rs | 278 ++++++++++++++++++ 5 files changed, 413 insertions(+) create mode 100644 examples/beacon-api-sidecar-fetcher/Cargo.toml create mode 100644 examples/beacon-api-sidecar-fetcher/src/main.rs create mode 100644 examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs diff --git a/Cargo.lock b/Cargo.lock index 697036d008c2d..77e4205eac15e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1106,6 +1106,22 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "beacon-api-sidecar-fetcher" +version = "0.1.0" +dependencies = [ + "alloy-rpc-types-beacon", + "clap", + "eyre", + "futures-util", + "reqwest 0.12.4", + "reth", + "reth-node-ethereum", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "beacon-api-sse" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 0f924cfcf9da1..1aa1f1bc261e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,6 +79,7 @@ members = [ "crates/transaction-pool/", "crates/trie-parallel/", "crates/trie/", + "examples/beacon-api-sidecar-fetcher/", "examples/beacon-api-sse/", "examples/bsc-p2p", "examples/custom-dev-node/", diff --git a/examples/beacon-api-sidecar-fetcher/Cargo.toml b/examples/beacon-api-sidecar-fetcher/Cargo.toml new file mode 100644 index 0000000000000..8d7fd39ef70c0 --- /dev/null +++ b/examples/beacon-api-sidecar-fetcher/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "beacon-api-sidecar-fetcher" +version = "0.1.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-node-ethereum.workspace = true + +alloy-rpc-types-beacon.workspace = true + +serde.workspace = true +serde_json.workspace = true +clap.workspace = true +futures-util.workspace = true +eyre.workspace = true +thiserror.workspace = true +reqwest.workspace = true diff --git a/examples/beacon-api-sidecar-fetcher/src/main.rs b/examples/beacon-api-sidecar-fetcher/src/main.rs new file mode 100644 index 0000000000000..f3c7a843a396b --- /dev/null +++ b/examples/beacon-api-sidecar-fetcher/src/main.rs @@ -0,0 +1,98 @@ +//! Run with +//! +//! ```not_rust +//! cargo run -p beacon-api-beacon-sidecar-fetcher --node -- full +//! ``` +//! +//! This launches a regular reth instance and subscribes to payload attributes event stream. +//! +//! **NOTE**: This expects that the CL client is running an http server on `localhost:5052` and is +//! configured to emit payload attributes events. +//! +//! See beacon Node API: + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use std::{ + collections::VecDeque, + net::{IpAddr, Ipv4Addr}, +}; + +use clap::Parser; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use mined_sidecar::MinedSidecarStream; +use reth::{builder::NodeHandle, cli::Cli, primitives::B256, providers::CanonStateSubscriptions}; +use reth_node_ethereum::EthereumNode; + +pub mod mined_sidecar; + +fn main() { + Cli::::parse() + .run(|builder, args| async move { + // launch the node + let NodeHandle { node, node_exit_future } = + builder.node(EthereumNode::default()).launch().await?; + + let notifications: reth::providers::CanonStateNotificationStream = + node.provider.canonical_state_stream(); + + let pool = node.pool.clone(); + + let mut sidecar_stream = MinedSidecarStream { + events: notifications, + pool, + beacon_config: args, + client: reqwest::Client::new(), + pending_requests: FuturesUnordered::new(), + queued_actions: VecDeque::new(), + }; + + while let Some(result) = sidecar_stream.next().await { + match result { + Ok(blob_transaction) => { + // Handle successful transaction + println!("Processed BlobTransaction: {:?}", blob_transaction); + } + Err(e) => { + // Handle errors specifically + eprintln!("Failed to process transaction: {:?}", e); + } + } + } + node_exit_future.await + }) + .unwrap(); +} + +/// Our custom cli args extension that adds one flag to reth default CLI. +#[derive(Debug, Clone, clap::Parser)] +pub struct BeaconSidecarConfig { + /// Beacon Node http server address + #[arg(long = "cl.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] + pub cl_addr: IpAddr, + /// Beacon Node http server port to listen on + #[arg(long = "cl.port", default_value_t = 5052)] + pub cl_port: u16, +} + +impl Default for BeaconSidecarConfig { + /// Default setup for lighthouse client + fn default() -> Self { + Self { + cl_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), // Equivalent to Ipv4Addr::LOCALHOST + cl_port: 5052, + } + } +} + +impl BeaconSidecarConfig { + /// Returns the http url of the beacon node + pub fn http_base_url(&self) -> String { + format!("http://{}:{}", self.cl_addr, self.cl_port) + } + + /// Returns the URL to the beacon sidecars endpoint + pub fn sidecar_url(&self, block_root: B256) -> String { + format!("{}/eth/v1/beacon/blob_sidecars/{}", self.http_base_url(), block_root) + } +} diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs new file mode 100644 index 0000000000000..5f5f4cbf1eb0a --- /dev/null +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -0,0 +1,278 @@ +use crate::BeaconSidecarConfig; +use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; +use eyre::Result; +use futures_util::{stream::FuturesUnordered, Future, Stream, StreamExt}; +use reqwest::{Error, StatusCode}; +use reth::{ + primitives::{BlobTransaction, SealedBlockWithSenders, B256}, + providers::CanonStateNotification, + transaction_pool::{BlobStoreError, TransactionPoolExt}, +}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::VecDeque, + pin::Pin, + task::{Context, Poll}, +}; +use thiserror::Error; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockMetadata { + pub block_hash: B256, + pub block_number: u64, + pub gas_used: u64, +} + +#[derive(Debug, Clone)] +pub struct MinedBlob { + pub transaction: BlobTransaction, + pub block_metadata: BlockMetadata, +} + +#[derive(Debug, Clone)] +pub struct ReorgedBlob { + pub transaction_hash: B256, + pub block_metadata: BlockMetadata, +} + +#[derive(Debug, Clone)] +pub enum BlobTransactionEvent { + Mined(MinedBlob), + Reorged(ReorgedBlob), +} + +/// SideCarError Handles Errors from both EL and CL +#[derive(Debug, Error)] +pub enum SideCarError { + #[error("Reqwest encountered an error: {0}")] + ReqwestError(Error), + + #[error("Failed to fetch transactions from the blobstore: {0}")] + TransactionPoolError(BlobStoreError), + + #[error("400: {0}")] + InvalidBlockID(String), + + #[error("404: {0}")] + BlockNotFound(String), + + #[error("500: {0}")] + InternalError(String), + + #[error("Network error: {0}")] + NetworkError(String), + + #[error("Data parsing error: {0}")] + DeserializationError(String), + + #[error("{0} Error: {1}")] + UnknownError(u16, String), +} +/// Futures associated with retrieving blob data from the beacon client +type SidecarsFuture = + Pin, SideCarError>> + Send>>; + +/// A Stream that processes CanonStateNotifications and retrieves BlobTransactions from the beacon +/// client. +/// +/// First checks if the blob sidecar for a given EIP4844 is stored locally, if not attempts to +/// retrieve it from the CL Layer +#[must_use = "streams do nothing unless polled"] +pub struct MinedSidecarStream { + pub events: St, + pub pool: P, + pub beacon_config: BeaconSidecarConfig, + pub client: reqwest::Client, + pub pending_requests: FuturesUnordered, + pub queued_actions: VecDeque, +} + +impl MinedSidecarStream +where + St: Stream + Send + Unpin + 'static, + P: TransactionPoolExt + Unpin + 'static, +{ + fn process_block(&mut self, block: &SealedBlockWithSenders) { + let txs: Vec<_> = block + .transactions() + .filter(|tx| tx.is_eip4844()) + .map(|tx| (tx.clone(), tx.blob_versioned_hashes().unwrap().len())) + .collect(); + + let mut all_blobs_available = true; + let mut actions_to_queue: Vec = Vec::new(); + + if txs.is_empty() { + return; + } + + match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| tx.hash()).collect()) { + Ok(blobs) => { + for ((tx, _), sidecar) in txs.iter().zip(blobs.iter()) { + let transaction = BlobTransaction::try_from_signed(tx.clone(), sidecar.clone()) + .expect("should not fail to convert blob tx if it is already eip4844"); + + let block_metadata = BlockMetadata { + block_hash: block.hash(), + block_number: block.number, + gas_used: block.gas_used, + }; + actions_to_queue.push(BlobTransactionEvent::Mined(MinedBlob { + transaction, + block_metadata, + })); + } + } + Err(_err) => { + all_blobs_available = false; + } + }; + + // if any blob is missing we must instead query the consensus layer. + if all_blobs_available { + self.queued_actions.extend(actions_to_queue); + } else { + let client_clone = self.client.clone(); + let block_root = block.hash(); + let block_clone = block.clone(); + let sidecar_url = self.beacon_config.sidecar_url(block_root); + let query = + Box::pin(fetch_blobs_for_block(client_clone, sidecar_url, block_clone, txs)); + self.pending_requests.push(query); + } + } +} + +impl Stream for MinedSidecarStream +where + St: Stream + Send + Unpin + 'static, + P: TransactionPoolExt + Unpin + 'static, +{ + type Item = Result; + + /// Attempt to pull the next BlobTransaction from the stream. + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + // Request locally first, otherwise request from CL + loop { + if let Some(mined_sidecar) = this.queued_actions.pop_front() { + return Poll::Ready(Some(Ok(mined_sidecar))); + } + + // Check if any pending requests are ready and append to buffer + while let Poll::Ready(Some(pending_result)) = this.pending_requests.poll_next_unpin(cx) + { + match pending_result { + Ok(mined_sidecars) => { + for sidecar in mined_sidecars { + this.queued_actions.push_back(sidecar); + } + } + Err(err) => return Poll::Ready(Some(Err(err))), + } + } + + while let Poll::Ready(Some(notification)) = this.events.poll_next_unpin(cx) { + { + match notification { + CanonStateNotification::Commit { new } => { + for (_, block) in new.blocks().iter() { + this.process_block(block); + } + } + CanonStateNotification::Reorg { old, new } => { + // handle reorged blocks + for (_, block) in old.blocks().iter() { + let txs: Vec = block + .transactions() + .filter(|tx: &&reth::primitives::TransactionSigned| { + tx.is_eip4844() + }) + .map(|tx| { + let transaction_hash = tx.hash(); + let block_metadata = BlockMetadata { + block_hash: new.tip().block.hash(), + block_number: new.tip().block.number, + gas_used: new.tip().block.gas_used, + }; + BlobTransactionEvent::Reorged(ReorgedBlob { + transaction_hash, + block_metadata, + }) + }) + .collect(); + this.queued_actions.extend(txs); + } + + for (_, block) in new.blocks().iter() { + this.process_block(block); + } + } + } + } + } + } + } +} + +/// Query the Beacon Layer for missing BlobTransactions +async fn fetch_blobs_for_block( + client: reqwest::Client, + url: String, + block: SealedBlockWithSenders, + txs: Vec<(reth::primitives::TransactionSigned, usize)>, +) -> Result, SideCarError> { + let response = match client.get(url).header("Accept", "application/json").send().await { + Ok(response) => response, + Err(err) => return Err(SideCarError::ReqwestError(err)), + }; + + if !response.status().is_success() { + return match response.status() { + StatusCode::BAD_REQUEST => { + Err(SideCarError::InvalidBlockID("Invalid request to server.".to_string())) + } + StatusCode::NOT_FOUND => { + Err(SideCarError::BlockNotFound("Requested block not found.".to_string())) + } + StatusCode::INTERNAL_SERVER_ERROR => { + Err(SideCarError::InternalError("Server encountered an error.".to_string())) + } + _ => Err(SideCarError::UnknownError( + response.status().as_u16(), + "Unhandled HTTP status.".to_string(), + )), + }; + } + + let bytes = match response.bytes().await { + Ok(b) => b, + Err(e) => return Err(SideCarError::NetworkError(e.to_string())), + }; + + let blobs_bundle: BeaconBlobBundle = match serde_json::from_slice(&bytes) { + Ok(b) => b, + Err(e) => return Err(SideCarError::DeserializationError(e.to_string())), + }; + + let mut sidecar_iterator = SidecarIterator::new(blobs_bundle); + + let sidecars: Vec = txs + .iter() + .filter_map(|(tx, blob_len)| { + sidecar_iterator.next_sidecar(*blob_len).map(|sidecar| { + let transaction = BlobTransaction::try_from_signed(tx.clone(), sidecar) + .expect("should not fail to convert blob tx if it is already eip4844"); + let block_metadata = BlockMetadata { + block_hash: block.hash(), + block_number: block.number, + gas_used: block.gas_used, + }; + BlobTransactionEvent::Mined(MinedBlob { transaction, block_metadata }) + }) + }) + .collect(); + + Ok(sidecars) +} From 14f035633000084a4c717fe4b7e597a987148468 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 14:39:51 +0200 Subject: [PATCH 652/700] chore: extract bundle state type (#8425) --- Cargo.lock | 11 + Cargo.toml | 2 + crates/evm/execution-types/Cargo.toml | 21 ++ crates/evm/execution-types/src/bundle.rs | 305 ++++++++++++++++ crates/evm/execution-types/src/lib.rs | 12 + crates/storage/provider/Cargo.toml | 3 +- .../bundle_state_with_receipts.rs | 338 ++---------------- 7 files changed, 376 insertions(+), 316 deletions(-) create mode 100644 crates/evm/execution-types/Cargo.toml create mode 100644 crates/evm/execution-types/src/bundle.rs create mode 100644 crates/evm/execution-types/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 77e4205eac15e..21bb6b125f1f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7054,6 +7054,16 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-execution-types" +version = "0.2.0-beta.7" +dependencies = [ + "reth-evm", + "reth-primitives", + "reth-trie", + "revm", +] + [[package]] name = "reth-exex" version = "0.2.0-beta.7" @@ -7661,6 +7671,7 @@ dependencies = [ "reth-db", "reth-evm", "reth-execution-errors", + "reth-execution-types", "reth-fs-util", "reth-interfaces", "reth-metrics", diff --git a/Cargo.toml b/Cargo.toml index 1aa1f1bc261e7..bb9900ae1ddc3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ members = [ "crates/etl/", "crates/evm/", "crates/evm/execution-errors", + "crates/evm/execution-types", "crates/exex/", "crates/interfaces/", "crates/metrics/", @@ -245,6 +246,7 @@ reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } reth-evm-optimism = { path = "crates/optimism/evm" } reth-execution-errors = { path = "crates/evm/execution-errors" } +reth-execution-types = { path = "crates/evm/execution-types" } reth-exex = { path = "crates/exex" } reth-fs-util = { path = "crates/fs-util" } reth-interfaces = { path = "crates/interfaces" } diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml new file mode 100644 index 0000000000000..c998f9a9aa5cd --- /dev/null +++ b/crates/evm/execution-types/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "reth-execution-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-primitives.workspace = true +reth-trie.workspace = true +reth-evm.workspace = true + +revm.workspace = true + +[features] +optimism = [] \ No newline at end of file diff --git a/crates/evm/execution-types/src/bundle.rs b/crates/evm/execution-types/src/bundle.rs new file mode 100644 index 0000000000000..2bc7eda45ae30 --- /dev/null +++ b/crates/evm/execution-types/src/bundle.rs @@ -0,0 +1,305 @@ +use reth_evm::execute::BatchBlockExecutionOutput; +use reth_primitives::{ + logs_bloom, + revm::compat::{into_reth_acc, into_revm_acc}, + Account, Address, BlockNumber, Bloom, Bytecode, Log, Receipt, Receipts, StorageEntry, B256, + U256, +}; +use reth_trie::HashedPostState; +use revm::{ + db::{states::BundleState, BundleAccount}, + primitives::AccountInfo, +}; +use std::collections::HashMap; + +/// Bundle state of post execution changes and reverts. +/// +/// Aggregates the changes over an arbitrary number of blocks. +#[derive(Default, Debug, Clone, PartialEq, Eq)] +pub struct BundleStateWithReceipts { + /// Bundle state with reverts. + pub bundle: BundleState, + /// The collection of receipts. + /// Outer vector stores receipts for each block sequentially. + /// The inner vector stores receipts ordered by transaction number. + /// + /// If receipt is None it means it is pruned. + pub receipts: Receipts, + /// First block of bundle state. + pub first_block: BlockNumber, +} + +// TODO(mattsse): unify the types, currently there's a cyclic dependency between +impl From for BundleStateWithReceipts { + fn from(value: BatchBlockExecutionOutput) -> Self { + let BatchBlockExecutionOutput { bundle, receipts, first_block } = value; + Self { bundle, receipts, first_block } + } +} + +// TODO(mattsse): unify the types, currently there's a cyclic dependency between +impl From for BatchBlockExecutionOutput { + fn from(value: BundleStateWithReceipts) -> Self { + let BundleStateWithReceipts { bundle, receipts, first_block } = value; + Self { bundle, receipts, first_block } + } +} + +/// Type used to initialize revms bundle state. +pub type BundleStateInit = + HashMap, Option, HashMap)>; + +/// Types used inside RevertsInit to initialize revms reverts. +pub type AccountRevertInit = (Option>, Vec); + +/// Type used to initialize revms reverts. +pub type RevertsInit = HashMap>; + +impl BundleStateWithReceipts { + /// Create Bundle State. + pub fn new(bundle: BundleState, receipts: Receipts, first_block: BlockNumber) -> Self { + Self { bundle, receipts, first_block } + } + + /// Create new bundle state with receipts. + pub fn new_init( + state_init: BundleStateInit, + revert_init: RevertsInit, + contracts_init: Vec<(B256, Bytecode)>, + receipts: Receipts, + first_block: BlockNumber, + ) -> Self { + // sort reverts by block number + let mut reverts = revert_init.into_iter().collect::>(); + reverts.sort_unstable_by_key(|a| a.0); + + // initialize revm bundle + let bundle = BundleState::new( + state_init.into_iter().map(|(address, (original, present, storage))| { + ( + address, + original.map(into_revm_acc), + present.map(into_revm_acc), + storage.into_iter().map(|(k, s)| (k.into(), s)).collect(), + ) + }), + reverts.into_iter().map(|(_, reverts)| { + // does not needs to be sorted, it is done when taking reverts. + reverts.into_iter().map(|(address, (original, storage))| { + ( + address, + original.map(|i| i.map(into_revm_acc)), + storage.into_iter().map(|entry| (entry.key.into(), entry.value)), + ) + }) + }), + contracts_init.into_iter().map(|(code_hash, bytecode)| (code_hash, bytecode.0)), + ); + + Self { bundle, receipts, first_block } + } + + /// Return revm bundle state. + pub fn state(&self) -> &BundleState { + &self.bundle + } + + /// Returns mutable revm bundle state. + pub fn state_mut(&mut self) -> &mut BundleState { + &mut self.bundle + } + + /// Set first block. + pub fn set_first_block(&mut self, first_block: BlockNumber) { + self.first_block = first_block; + } + + /// Return iterator over all accounts + pub fn accounts_iter(&self) -> impl Iterator)> { + self.bundle.state().iter().map(|(a, acc)| (*a, acc.info.as_ref())) + } + + /// Return iterator over all [BundleAccount]s in the bundle + pub fn bundle_accounts_iter(&self) -> impl Iterator { + self.bundle.state().iter().map(|(a, acc)| (*a, acc)) + } + + /// Get account if account is known. + pub fn account(&self, address: &Address) -> Option> { + self.bundle.account(address).map(|a| a.info.clone().map(into_reth_acc)) + } + + /// Get storage if value is known. + /// + /// This means that depending on status we can potentially return U256::ZERO. + pub fn storage(&self, address: &Address, storage_key: U256) -> Option { + self.bundle.account(address).and_then(|a| a.storage_slot(storage_key)) + } + + /// Return bytecode if known. + pub fn bytecode(&self, code_hash: &B256) -> Option { + self.bundle.bytecode(code_hash).map(Bytecode) + } + + /// Returns [HashedPostState] for this bundle state. + /// See [HashedPostState::from_bundle_state] for more info. + pub fn hash_state_slow(&self) -> HashedPostState { + HashedPostState::from_bundle_state(&self.bundle.state) + } + + /// Transform block number to the index of block. + fn block_number_to_index(&self, block_number: BlockNumber) -> Option { + if self.first_block > block_number { + return None + } + let index = block_number - self.first_block; + if index >= self.receipts.len() as u64 { + return None + } + Some(index as usize) + } + + /// Returns an iterator over all block logs. + pub fn logs(&self, block_number: BlockNumber) -> Option> { + let index = self.block_number_to_index(block_number)?; + Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs.iter())).flatten()) + } + + /// Return blocks logs bloom + pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { + Some(logs_bloom(self.logs(block_number)?)) + } + + /// Returns the receipt root for all recorded receipts. + /// Note: this function calculated Bloom filters for every receipt and created merkle trees + /// of receipt. This is a expensive operation. + pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option { + #[cfg(feature = "optimism")] + panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); + #[cfg(not(feature = "optimism"))] + self.receipts.root_slow(self.block_number_to_index(_block_number)?) + } + + /// Returns the receipt root for all recorded receipts. + /// Note: this function calculated Bloom filters for every receipt and created merkle trees + /// of receipt. This is a expensive operation. + #[cfg(feature = "optimism")] + pub fn optimism_receipts_root_slow( + &self, + block_number: BlockNumber, + chain_spec: &reth_primitives::ChainSpec, + timestamp: u64, + ) -> Option { + self.receipts.optimism_root_slow( + self.block_number_to_index(block_number)?, + chain_spec, + timestamp, + ) + } + + /// Returns reference to receipts. + pub fn receipts(&self) -> &Receipts { + &self.receipts + } + + /// Returns mutable reference to receipts. + pub fn receipts_mut(&mut self) -> &mut Receipts { + &mut self.receipts + } + + /// Return all block receipts + pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { + let Some(index) = self.block_number_to_index(block_number) else { return &[] }; + &self.receipts[index] + } + + /// Is bundle state empty of blocks. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Number of blocks in bundle state. + pub fn len(&self) -> usize { + self.receipts.len() + } + + /// Return first block of the bundle + pub fn first_block(&self) -> BlockNumber { + self.first_block + } + + /// Revert the state to the given block number. + /// + /// Returns false if the block number is not in the bundle state. + /// + /// # Note + /// + /// The provided block number will stay inside the bundle state. + pub fn revert_to(&mut self, block_number: BlockNumber) -> bool { + let Some(index) = self.block_number_to_index(block_number) else { return false }; + + // +1 is for number of blocks that we have as index is included. + let new_len = index + 1; + let rm_trx: usize = self.len() - new_len; + + // remove receipts + self.receipts.truncate(new_len); + // Revert last n reverts. + self.bundle.revert(rm_trx); + + true + } + + /// Splits the block range state at a given block number. + /// Returns two split states ([..at], [at..]). + /// The plain state of the 2nd bundle state will contain extra changes + /// that were made in state transitions belonging to the lower state. + /// + /// # Panics + /// + /// If the target block number is not included in the state block range. + pub fn split_at(self, at: BlockNumber) -> (Option, Self) { + if at == self.first_block { + return (None, self) + } + + let (mut lower_state, mut higher_state) = (self.clone(), self); + + // Revert lower state to [..at]. + lower_state.revert_to(at.checked_sub(1).unwrap()); + + // Truncate higher state to [at..]. + let at_idx = higher_state.block_number_to_index(at).unwrap(); + higher_state.receipts = Receipts::from_vec(higher_state.receipts.split_off(at_idx)); + higher_state.bundle.take_n_reverts(at_idx); + higher_state.first_block = at; + + (Some(lower_state), higher_state) + } + + /// Extend one state from another + /// + /// For state this is very sensitive operation and should be used only when + /// we know that other state was build on top of this one. + /// In most cases this would be true. + pub fn extend(&mut self, other: Self) { + self.bundle.extend(other.bundle); + self.receipts.extend(other.receipts.receipt_vec); + } + + /// Prepends present the state with the given BundleState. + /// It adds changes from the given state but does not override any existing changes. + /// + /// Reverts and receipts are not updated. + pub fn prepend_state(&mut self, mut other: BundleState) { + let other_len = other.reverts.len(); + // take this bundle + let this_bundle = std::mem::take(&mut self.bundle); + // extend other bundle with this + other.extend(this_bundle); + // discard other reverts + other.take_n_reverts(other_len); + // swap bundles + std::mem::swap(&mut self.bundle, &mut other) + } +} diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs new file mode 100644 index 0000000000000..576913997fc2f --- /dev/null +++ b/crates/evm/execution-types/src/lib.rs @@ -0,0 +1,12 @@ +//! Commonly used types for (EVM) block execution. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod bundle; +pub use bundle::*; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index c9eb5f7e378d4..d7cc0280fdf63 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-blockchain-tree-api.workspace = true reth-execution-errors.workspace = true +reth-execution-types.workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true reth-storage-errors.workspace = true @@ -68,4 +69,4 @@ rand.workspace = true [features] test-utils = ["alloy-rlp", "reth-db/test-utils", "reth-nippy-jar/test-utils"] -optimism = ["reth-primitives/optimism"] +optimism = ["reth-primitives/optimism", "reth-execution-types/optimism"] diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 52c9366fde89a..931194d8975a0 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -7,311 +7,11 @@ use reth_db::{ tables, transaction::{DbTx, DbTxMut}, }; -use reth_evm::execute::BatchBlockExecutionOutput; -use reth_primitives::{ - logs_bloom, - revm::compat::{into_reth_acc, into_revm_acc}, - Account, Address, BlockHash, BlockNumber, Bloom, Bytecode, Log, Receipt, Receipts, - StaticFileSegment, StorageEntry, B256, U256, -}; +use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_trie::HashedPostState; -pub use revm::db::states::OriginalValuesKnown; -use revm::{ - db::{states::BundleState, BundleAccount}, - primitives::AccountInfo, -}; -use std::collections::HashMap; - -/// Bundle state of post execution changes and reverts -#[derive(Default, Debug, Clone, PartialEq, Eq)] -pub struct BundleStateWithReceipts { - /// Bundle state with reverts. - bundle: BundleState, - /// The collection of receipts. - /// Outer vector stores receipts for each block sequentially. - /// The inner vector stores receipts ordered by transaction number. - /// - /// If receipt is None it means it is pruned. - receipts: Receipts, - /// First block of bundle state. - first_block: BlockNumber, -} - -// TODO(mattsse): unify the types, currently there's a cyclic dependency between -impl From for BundleStateWithReceipts { - fn from(value: BatchBlockExecutionOutput) -> Self { - let BatchBlockExecutionOutput { bundle, receipts, first_block } = value; - Self { bundle, receipts, first_block } - } -} - -// TODO(mattsse): unify the types, currently there's a cyclic dependency between -impl From for BatchBlockExecutionOutput { - fn from(value: BundleStateWithReceipts) -> Self { - let BundleStateWithReceipts { bundle, receipts, first_block } = value; - Self { bundle, receipts, first_block } - } -} - -/// Type used to initialize revms bundle state. -pub type BundleStateInit = - HashMap, Option, HashMap)>; - -/// Types used inside RevertsInit to initialize revms reverts. -pub type AccountRevertInit = (Option>, Vec); - -/// Type used to initialize revms reverts. -pub type RevertsInit = HashMap>; - -impl BundleStateWithReceipts { - /// Create Bundle State. - pub fn new(bundle: BundleState, receipts: Receipts, first_block: BlockNumber) -> Self { - Self { bundle, receipts, first_block } - } - - /// Create new bundle state with receipts. - pub fn new_init( - state_init: BundleStateInit, - revert_init: RevertsInit, - contracts_init: Vec<(B256, Bytecode)>, - receipts: Receipts, - first_block: BlockNumber, - ) -> Self { - // sort reverts by block number - let mut reverts = revert_init.into_iter().collect::>(); - reverts.sort_unstable_by_key(|a| a.0); - - // initialize revm bundle - let bundle = BundleState::new( - state_init.into_iter().map(|(address, (original, present, storage))| { - ( - address, - original.map(into_revm_acc), - present.map(into_revm_acc), - storage.into_iter().map(|(k, s)| (k.into(), s)).collect(), - ) - }), - reverts.into_iter().map(|(_, reverts)| { - // does not needs to be sorted, it is done when taking reverts. - reverts.into_iter().map(|(address, (original, storage))| { - ( - address, - original.map(|i| i.map(into_revm_acc)), - storage.into_iter().map(|entry| (entry.key.into(), entry.value)), - ) - }) - }), - contracts_init.into_iter().map(|(code_hash, bytecode)| (code_hash, bytecode.0)), - ); - - Self { bundle, receipts, first_block } - } - - /// Return revm bundle state. - pub fn state(&self) -> &BundleState { - &self.bundle - } - - /// Returns mutable revm bundle state. - pub fn state_mut(&mut self) -> &mut BundleState { - &mut self.bundle - } - /// Set first block. - pub fn set_first_block(&mut self, first_block: BlockNumber) { - self.first_block = first_block; - } - - /// Return iterator over all accounts - pub fn accounts_iter(&self) -> impl Iterator)> { - self.bundle.state().iter().map(|(a, acc)| (*a, acc.info.as_ref())) - } - - /// Return iterator over all [BundleAccount]s in the bundle - pub fn bundle_accounts_iter(&self) -> impl Iterator { - self.bundle.state().iter().map(|(a, acc)| (*a, acc)) - } - - /// Get account if account is known. - pub fn account(&self, address: &Address) -> Option> { - self.bundle.account(address).map(|a| a.info.clone().map(into_reth_acc)) - } - - /// Get storage if value is known. - /// - /// This means that depending on status we can potentially return U256::ZERO. - pub fn storage(&self, address: &Address, storage_key: U256) -> Option { - self.bundle.account(address).and_then(|a| a.storage_slot(storage_key)) - } - - /// Return bytecode if known. - pub fn bytecode(&self, code_hash: &B256) -> Option { - self.bundle.bytecode(code_hash).map(Bytecode) - } - - /// Returns [HashedPostState] for this bundle state. - /// See [HashedPostState::from_bundle_state] for more info. - pub fn hash_state_slow(&self) -> HashedPostState { - HashedPostState::from_bundle_state(&self.bundle.state) - } - - /// Transform block number to the index of block. - fn block_number_to_index(&self, block_number: BlockNumber) -> Option { - if self.first_block > block_number { - return None - } - let index = block_number - self.first_block; - if index >= self.receipts.len() as u64 { - return None - } - Some(index as usize) - } - - /// Returns an iterator over all block logs. - pub fn logs(&self, block_number: BlockNumber) -> Option> { - let index = self.block_number_to_index(block_number)?; - Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs.iter())).flatten()) - } - - /// Return blocks logs bloom - pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { - Some(logs_bloom(self.logs(block_number)?)) - } - - /// Returns the receipt root for all recorded receipts. - /// Note: this function calculated Bloom filters for every receipt and created merkle trees - /// of receipt. This is a expensive operation. - pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option { - #[cfg(feature = "optimism")] - panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); - #[cfg(not(feature = "optimism"))] - self.receipts.root_slow(self.block_number_to_index(_block_number)?) - } - - /// Returns the receipt root for all recorded receipts. - /// Note: this function calculated Bloom filters for every receipt and created merkle trees - /// of receipt. This is a expensive operation. - #[cfg(feature = "optimism")] - pub fn optimism_receipts_root_slow( - &self, - block_number: BlockNumber, - chain_spec: &reth_primitives::ChainSpec, - timestamp: u64, - ) -> Option { - self.receipts.optimism_root_slow( - self.block_number_to_index(block_number)?, - chain_spec, - timestamp, - ) - } - - /// Returns reference to receipts. - pub fn receipts(&self) -> &Receipts { - &self.receipts - } - - /// Returns mutable reference to receipts. - pub fn receipts_mut(&mut self) -> &mut Receipts { - &mut self.receipts - } - - /// Return all block receipts - pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { - let Some(index) = self.block_number_to_index(block_number) else { return &[] }; - &self.receipts[index] - } - - /// Is bundle state empty of blocks. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Number of blocks in bundle state. - pub fn len(&self) -> usize { - self.receipts.len() - } - - /// Return first block of the bundle - pub fn first_block(&self) -> BlockNumber { - self.first_block - } - - /// Revert the state to the given block number. - /// - /// Returns false if the block number is not in the bundle state. - /// - /// # Note - /// - /// The provided block number will stay inside the bundle state. - pub fn revert_to(&mut self, block_number: BlockNumber) -> bool { - let Some(index) = self.block_number_to_index(block_number) else { return false }; - - // +1 is for number of blocks that we have as index is included. - let new_len = index + 1; - let rm_trx: usize = self.len() - new_len; - - // remove receipts - self.receipts.truncate(new_len); - // Revert last n reverts. - self.bundle.revert(rm_trx); - - true - } - - /// Splits the block range state at a given block number. - /// Returns two split states ([..at], [at..]). - /// The plain state of the 2nd bundle state will contain extra changes - /// that were made in state transitions belonging to the lower state. - /// - /// # Panics - /// - /// If the target block number is not included in the state block range. - pub fn split_at(self, at: BlockNumber) -> (Option, Self) { - if at == self.first_block { - return (None, self) - } - - let (mut lower_state, mut higher_state) = (self.clone(), self); - - // Revert lower state to [..at]. - lower_state.revert_to(at.checked_sub(1).unwrap()); - - // Truncate higher state to [at..]. - let at_idx = higher_state.block_number_to_index(at).unwrap(); - higher_state.receipts = Receipts::from_vec(higher_state.receipts.split_off(at_idx)); - higher_state.bundle.take_n_reverts(at_idx); - higher_state.first_block = at; - - (Some(lower_state), higher_state) - } - - /// Extend one state from another - /// - /// For state this is very sensitive operation and should be used only when - /// we know that other state was build on top of this one. - /// In most cases this would be true. - pub fn extend(&mut self, other: Self) { - self.bundle.extend(other.bundle); - self.receipts.extend(other.receipts.receipt_vec); - } - - /// Prepends present the state with the given BundleState. - /// It adds changes from the given state but does not override any existing changes. - /// - /// Reverts and receipts are not updated. - pub fn prepend_state(&mut self, mut other: BundleState) { - let other_len = other.reverts.len(); - // take this bundle - let this_bundle = std::mem::take(&mut self.bundle); - // extend other bundle with this - other.extend(this_bundle); - // discard other reverts - other.take_n_reverts(other_len); - // swap bundles - std::mem::swap(&mut self.bundle, &mut other) - } -} +pub use reth_execution_types::*; +pub use revm::db::states::OriginalValuesKnown; impl StateWriter for BundleStateWithReceipts { fn write_to_storage( @@ -380,29 +80,37 @@ impl BundleStateDataProvider for BundleStateWithReceipts { #[cfg(test)] mod tests { - use super::*; - use crate::{test_utils::create_test_provider_factory, AccountReader}; - use reth_db::{ - cursor::DbDupCursorRO, - database::Database, - models::{AccountBeforeTx, BlockNumberAddress}, - test_utils::create_test_rw_db, - }; - use reth_primitives::keccak256; - use reth_trie::{test_utils::state_root, StateRoot}; + use std::collections::{BTreeMap, HashMap}; + use revm::{ db::{ states::{ bundle_state::BundleRetention, changes::PlainStorageRevert, PlainStorageChangeset, }, - EmptyDB, + BundleState, EmptyDB, }, primitives::{ Account as RevmAccount, AccountInfo as RevmAccountInfo, AccountStatus, StorageSlot, }, DatabaseCommit, State, }; - use std::collections::BTreeMap; + + use reth_db::{ + cursor::DbDupCursorRO, + database::Database, + models::{AccountBeforeTx, BlockNumberAddress}, + test_utils::create_test_rw_db, + }; + use reth_primitives::{ + keccak256, + revm::compat::{into_reth_acc, into_revm_acc}, + Account, Address, Receipt, Receipts, StorageEntry, B256, U256, + }; + use reth_trie::{test_utils::state_root, StateRoot}; + + use crate::{test_utils::create_test_provider_factory, AccountReader}; + + use super::*; #[test] fn write_to_db_account_info() { From c16fb4e49a6992c8750072ec841d6c6cf473517a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 15:25:31 +0200 Subject: [PATCH 653/700] feat: extract storage traits to storage-api (#8427) --- Cargo.lock | 14 + Cargo.toml | 2 + crates/storage/provider/Cargo.toml | 1 + .../bundle_state_with_receipts.rs | 21 +- crates/storage/provider/src/traits/block.rs | 266 +----------------- crates/storage/provider/src/traits/mod.rs | 43 +-- crates/storage/provider/src/traits/state.rs | 254 +---------------- crates/storage/storage-api/Cargo.toml | 24 ++ .../src/traits => storage-api/src}/account.rs | 0 crates/storage/storage-api/src/block.rs | 261 +++++++++++++++++ .../traits => storage-api/src}/block_hash.rs | 3 +- .../traits => storage-api/src}/block_id.rs | 2 +- .../src/traits => storage-api/src}/header.rs | 3 +- crates/storage/storage-api/src/lib.rs | 42 +++ .../traits => storage-api/src}/receipts.rs | 6 +- crates/storage/storage-api/src/state.rs | 257 +++++++++++++++++ .../src/traits => storage-api/src}/storage.rs | 8 +- .../src}/transactions.rs | 3 +- .../src/traits => storage-api/src}/trie.rs | 3 +- .../traits => storage-api/src}/withdrawals.rs | 0 20 files changed, 630 insertions(+), 583 deletions(-) create mode 100644 crates/storage/storage-api/Cargo.toml rename crates/storage/{provider/src/traits => storage-api/src}/account.rs (100%) create mode 100644 crates/storage/storage-api/src/block.rs rename crates/storage/{provider/src/traits => storage-api/src}/block_hash.rs (95%) rename crates/storage/{provider/src/traits => storage-api/src}/block_id.rs (99%) rename crates/storage/{provider/src/traits => storage-api/src}/header.rs (97%) create mode 100644 crates/storage/storage-api/src/lib.rs rename crates/storage/{provider/src/traits => storage-api/src}/receipts.rs (99%) create mode 100644 crates/storage/storage-api/src/state.rs rename crates/storage/{provider/src/traits => storage-api/src}/storage.rs (95%) rename crates/storage/{provider/src/traits => storage-api/src}/transactions.rs (99%) rename crates/storage/{provider/src/traits => storage-api/src}/trie.rs (94%) rename crates/storage/{provider/src/traits => storage-api/src}/withdrawals.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 21bb6b125f1f2..81570563db442 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7677,6 +7677,7 @@ dependencies = [ "reth-metrics", "reth-nippy-jar", "reth-primitives", + "reth-storage-api", "reth-storage-errors", "reth-trie", "revm", @@ -8018,6 +8019,19 @@ dependencies = [ "strum", ] +[[package]] +name = "reth-storage-api" +version = "0.2.0-beta.7" +dependencies = [ + "auto_impl", + "reth-db", + "reth-execution-types", + "reth-primitives", + "reth-storage-errors", + "reth-trie", + "revm", +] + [[package]] name = "reth-storage-errors" version = "0.2.0-beta.7" diff --git a/Cargo.toml b/Cargo.toml index bb9900ae1ddc3..4b34cbb5cb8a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,6 +74,7 @@ members = [ "crates/storage/libmdbx-rs/mdbx-sys/", "crates/storage/nippy-jar/", "crates/storage/provider/", + "crates/storage/storage-api/", "crates/tasks/", "crates/tokio-util/", "crates/tracing/", @@ -288,6 +289,7 @@ reth-stages = { path = "crates/stages" } reth-stages-api = { path = "crates/stages-api" } reth-static-file = { path = "crates/static-file" } reth-static-file-types = { path = "crates/static-file-types" } +reth-storage-api = { path = "crates/storage/storage-api" } reth-storage-errors = { path = "crates/storage/errors" } reth-tasks = { path = "crates/tasks" } reth-testing-utils = { path = "testing/testing-utils" } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index d7cc0280fdf63..5fde8879769e5 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -20,6 +20,7 @@ reth-primitives.workspace = true reth-fs-util.workspace = true reth-storage-errors.workspace = true reth-interfaces.workspace = true +reth-storage-api.workspace = true reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 931194d8975a0..90f7e5f8afc4d 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -1,16 +1,12 @@ -use crate::{ - providers::StaticFileProviderRWRefMut, BundleStateDataProvider, StateChanges, StateReverts, - StateWriter, -}; +use crate::{providers::StaticFileProviderRWRefMut, StateChanges, StateReverts, StateWriter}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, tables, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; - pub use reth_execution_types::*; +use reth_primitives::StaticFileSegment; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; pub use revm::db::states::OriginalValuesKnown; impl StateWriter for BundleStateWithReceipts { @@ -67,17 +63,6 @@ impl StateWriter for BundleStateWithReceipts { } } -impl BundleStateDataProvider for BundleStateWithReceipts { - fn state(&self) -> &BundleStateWithReceipts { - self - } - - /// Always returns [None] because we don't have any information about the block header. - fn block_hash(&self, _block_number: BlockNumber) -> Option { - None - } -} - #[cfg(test)] mod tests { use std::collections::{BTreeMap, HashMap}; diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 99984d346b8fc..a10773a068d1b 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,269 +1,13 @@ -use crate::{ - BlockIdReader, BlockNumReader, BundleStateWithReceipts, Chain, HeaderProvider, ReceiptProvider, - ReceiptProviderIdExt, TransactionsProvider, WithdrawalsProvider, -}; -use auto_impl::auto_impl; +use crate::{BundleStateWithReceipts, Chain}; use reth_db::models::StoredBlockBodyIndices; -use reth_primitives::{ - Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, Header, - PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, B256, -}; +use reth_primitives::{BlockNumber, PruneModes, SealedBlockWithSenders}; +use reth_storage_api::BlockReader; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::ops::RangeInclusive; -/// Enum to control transaction hash inclusion. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] -pub enum TransactionVariant { - /// Indicates that transactions should be processed without including their hashes. - NoHash, - /// Indicates that transactions should be processed along with their hashes. - #[default] - WithHash, -} - -/// A helper enum that represents the origin of the requested block. -/// -/// This helper type's sole purpose is to give the caller more control over from where blocks can be -/// fetched. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] -pub enum BlockSource { - /// Check all available sources. - /// - /// Note: it's expected that looking up pending blocks is faster than looking up blocks in the - /// database so this prioritizes Pending > Database. - #[default] - Any, - /// The block was fetched from the pending block source, the blockchain tree that buffers - /// blocks that are not yet finalized. - Pending, - /// The block was fetched from the database. - Database, -} - -impl BlockSource { - /// Returns `true` if the block source is `Pending` or `Any`. - pub fn is_pending(&self) -> bool { - matches!(self, BlockSource::Pending | BlockSource::Any) - } - - /// Returns `true` if the block source is `Database` or `Any`. - pub fn is_database(&self) -> bool { - matches!(self, BlockSource::Database | BlockSource::Any) - } -} - -/// Api trait for fetching `Block` related data. -/// -/// If not requested otherwise, implementers of this trait should prioritize fetching blocks from -/// the database. -#[auto_impl::auto_impl(&, Arc)] -pub trait BlockReader: - BlockNumReader - + HeaderProvider - + TransactionsProvider - + ReceiptProvider - + WithdrawalsProvider - + Send - + Sync -{ - /// Tries to find in the given block source. - /// - /// Note: this only operates on the hash because the number might be ambiguous. - /// - /// Returns `None` if block is not found. - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult>; - - /// Returns the block with given id from the database. - /// - /// Returns `None` if block is not found. - fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; - - /// Returns the pending block if available - /// - /// Note: This returns a [SealedBlock] because it's expected that this is sealed by the provider - /// and the caller does not know the hash. - fn pending_block(&self) -> ProviderResult>; - - /// Returns the pending block if available - /// - /// Note: This returns a [SealedBlockWithSenders] because it's expected that this is sealed by - /// the provider and the caller does not know the hash. - fn pending_block_with_senders(&self) -> ProviderResult>; - - /// Returns the pending block and receipts if available. - fn pending_block_and_receipts(&self) -> ProviderResult)>>; - - /// Returns the ommers/uncle headers of the given block from the database. - /// - /// Returns `None` if block is not found. - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; - - /// Returns the block with matching hash from the database. - /// - /// Returns `None` if block is not found. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { - self.block(hash.into()) - } - - /// Returns the block with matching number from database. - /// - /// Returns `None` if block is not found. - fn block_by_number(&self, num: u64) -> ProviderResult> { - self.block(num.into()) - } - - /// Returns the block body indices with matching number from database. - /// - /// Returns `None` if block is not found. - fn block_body_indices(&self, num: u64) -> ProviderResult>; - - /// Returns the block with senders with matching number or hash from database. - /// - /// Returns the block's transactions in the requested variant. - /// - /// Returns `None` if block is not found. - fn block_with_senders( - &self, - id: BlockHashOrNumber, - transaction_kind: TransactionVariant, - ) -> ProviderResult>; - - /// Returns all blocks in the given inclusive range. - /// - /// Note: returns only available blocks - fn block_range(&self, range: RangeInclusive) -> ProviderResult>; - - /// retrieves a range of blocks from the database, along with the senders of each - /// transaction in the blocks. - /// - /// The `transaction_kind` parameter determines whether to return its hash - fn block_with_senders_range( - &self, - range: RangeInclusive, - ) -> ProviderResult>; -} - -/// Trait extension for `BlockReader`, for types that implement `BlockId` conversion. -/// -/// The `BlockReader` trait should be implemented on types that can retrieve a block from either -/// a block number or hash. However, it might be desirable to fetch a block from a `BlockId` type, -/// which can be a number, hash, or tag such as `BlockNumberOrTag::Safe`. -/// -/// Resolving tags requires keeping track of block hashes or block numbers associated with the tag, -/// so this trait can only be implemented for types that implement `BlockIdReader`. The -/// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and -/// retrieving the block should be done using the type's `BlockReader` methods. -#[auto_impl::auto_impl(&, Arc)] -pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { - /// Returns the block with matching tag from the database - /// - /// Returns `None` if block is not found. - fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { - self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.block(num.into())) - } - - /// Returns the pending block header if available - /// - /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the - /// provider and the caller does not know the hash. - fn pending_header(&self) -> ProviderResult> { - self.sealed_header_by_id(BlockNumberOrTag::Pending.into()) - } - - /// Returns the latest block header if available - /// - /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the - /// provider and the caller does not know the hash. - fn latest_header(&self) -> ProviderResult> { - self.sealed_header_by_id(BlockNumberOrTag::Latest.into()) - } - - /// Returns the safe block header if available - /// - /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the - /// provider and the caller does not know the hash. - fn safe_header(&self) -> ProviderResult> { - self.sealed_header_by_id(BlockNumberOrTag::Safe.into()) - } - - /// Returns the finalized block header if available - /// - /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the - /// provider and the caller does not know the hash. - fn finalized_header(&self) -> ProviderResult> { - self.sealed_header_by_id(BlockNumberOrTag::Finalized.into()) - } - - /// Returns the block with the matching [BlockId] from the database. - /// - /// Returns `None` if block is not found. - fn block_by_id(&self, id: BlockId) -> ProviderResult>; - - /// Returns the block with senders with matching [BlockId]. - /// - /// Returns the block's transactions in the requested variant. - /// - /// Returns `None` if block is not found. - fn block_with_senders_by_id( - &self, - id: BlockId, - transaction_kind: TransactionVariant, - ) -> ProviderResult> { - match id { - BlockId::Hash(hash) => { - self.block_with_senders(hash.block_hash.into(), transaction_kind) - } - BlockId::Number(num) => self.convert_block_number(num)?.map_or_else( - || Ok(None), - |num| self.block_with_senders(num.into(), transaction_kind), - ), - } - } - - /// Returns the header with matching tag from the database - /// - /// Returns `None` if header is not found. - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { - self.convert_block_number(id)? - .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into())) - } - - /// Returns the header with matching tag from the database - /// - /// Returns `None` if header is not found. - fn sealed_header_by_number_or_tag( - &self, - id: BlockNumberOrTag, - ) -> ProviderResult> { - self.convert_block_number(id)? - .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? - .map_or_else(|| Ok(None), |h| Ok(Some(h.seal_slow()))) - } - - /// Returns the sealed header with the matching `BlockId` from the database. - /// - /// Returns `None` if header is not found. - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult>; - - /// Returns the header with the matching `BlockId` from the database. - /// - /// Returns `None` if header is not found. - fn header_by_id(&self, id: BlockId) -> ProviderResult>; - - /// Returns the ommers with the matching tag from the database. - fn ommers_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult>> { - self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.ommers(num.into())) - } - - /// Returns the ommers with the matching `BlockId` from the database. - /// - /// Returns `None` if block is not found. - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; -} - /// BlockExecution Writer -#[auto_impl(&, Arc, Box)] +#[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockExecutionWriter: BlockWriter + BlockReader + Send + Sync { /// Get range of blocks and its execution result fn get_block_and_execution_range( @@ -289,7 +33,7 @@ pub trait BlockExecutionWriter: BlockWriter + BlockReader + Send + Sync { } /// Block Writer -#[auto_impl(&, Arc, Box)] +#[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockWriter: Send + Sync { /// Insert full block and make it canonical. Parent tx num and transition id is taken from /// parent block in database. diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index b63c8298dfcef..a4233cd8135a0 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -1,53 +1,22 @@ //! Collection of common provider traits. -mod account; -pub use account::{AccountExtReader, AccountReader, ChangeSetReader}; - -mod storage; -pub use storage::StorageReader; - -mod block; -pub use block::{ - BlockExecutionWriter, BlockReader, BlockReaderIdExt, BlockSource, BlockWriter, - TransactionVariant, -}; - -mod block_hash; -pub use block_hash::BlockHashReader; - -mod block_id; -pub use block_id::{BlockIdReader, BlockNumReader}; +// Re-export all the traits +pub use reth_storage_api::*; // Re-export for convenience pub use reth_evm::provider::EvmEnvProvider; +mod block; +pub use block::*; + mod chain_info; pub use chain_info::CanonChainTracker; -mod header; -pub use header::HeaderProvider; - mod header_sync_gap; pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode}; -mod receipts; -pub use receipts::{ReceiptProvider, ReceiptProviderIdExt}; - mod state; -pub use state::{ - BlockchainTreePendingStateProvider, BundleStateDataProvider, BundleStateForkProvider, - FullBundleStateDataProvider, StateProvider, StateProviderBox, StateProviderFactory, - StateWriter, -}; - -mod trie; -pub use trie::StateRootProvider; - -mod transactions; -pub use transactions::{TransactionsProvider, TransactionsProviderExt}; - -mod withdrawals; -pub use withdrawals::WithdrawalsProvider; +pub use state::StateWriter; mod chain; pub use chain::{ diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index f31469a3def89..b7450bd4f028c 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -1,256 +1,10 @@ -use super::AccountReader; -use crate::{ - providers::StaticFileProviderRWRefMut, BlockHashReader, BlockIdReader, BundleStateWithReceipts, - StateRootProvider, -}; -use auto_impl::auto_impl; +use crate::providers::StaticFileProviderRWRefMut; use reth_db::transaction::{DbTx, DbTxMut}; -use reth_primitives::{ - trie::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, - Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, -}; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_storage_errors::provider::ProviderResult; use revm::db::OriginalValuesKnown; -/// Type alias of boxed [StateProvider]. -pub type StateProviderBox = Box; - -/// An abstraction for a type that provides state data. -#[auto_impl(&, Arc, Box)] -pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + Send + Sync { - /// Get storage of given account. - fn storage( - &self, - account: Address, - storage_key: StorageKey, - ) -> ProviderResult>; - - /// Get account code by its hash - fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult>; - - /// Get account and storage proofs. - fn proof(&self, address: Address, keys: &[B256]) -> ProviderResult; - - /// Get account code by its address. - /// - /// Returns `None` if the account doesn't exist or account is not a contract - fn account_code(&self, addr: Address) -> ProviderResult> { - // Get basic account information - // Returns None if acc doesn't exist - let acc = match self.basic_account(addr)? { - Some(acc) => acc, - None => return Ok(None), - }; - - if let Some(code_hash) = acc.bytecode_hash { - if code_hash == KECCAK_EMPTY { - return Ok(None) - } - // Get the code from the code hash - return self.bytecode_by_hash(code_hash) - } - - // Return `None` if no code hash is set - Ok(None) - } - - /// Get account balance by its address. - /// - /// Returns `None` if the account doesn't exist - fn account_balance(&self, addr: Address) -> ProviderResult> { - // Get basic account information - // Returns None if acc doesn't exist - match self.basic_account(addr)? { - Some(acc) => Ok(Some(acc.balance)), - None => Ok(None), - } - } - - /// Get account nonce by its address. - /// - /// Returns `None` if the account doesn't exist - fn account_nonce(&self, addr: Address) -> ProviderResult> { - // Get basic account information - // Returns None if acc doesn't exist - match self.basic_account(addr)? { - Some(acc) => Ok(Some(acc.nonce)), - None => Ok(None), - } - } -} - -/// Light wrapper that returns `StateProvider` implementations that correspond to the given -/// `BlockNumber`, the latest state, or the pending state. -/// -/// This type differentiates states into `historical`, `latest` and `pending`, where the `latest` -/// block determines what is historical or pending: `[historical..latest..pending]`. -/// -/// The `latest` state represents the state after the most recent block has been committed to the -/// database, `historical` states are states that have been committed to the database before the -/// `latest` state, and `pending` states are states that have not yet been committed to the -/// database which may or may not become the `latest` state, depending on consensus. -/// -/// Note: the `pending` block is considered the block that extends the canonical chain but one and -/// has the `latest` block as its parent. -/// -/// All states are _inclusive_, meaning they include _all_ all changes made (executed transactions) -/// in their respective blocks. For example [StateProviderFactory::history_by_block_number] for -/// block number `n` will return the state after block `n` was executed (transactions, withdrawals). -/// In other words, all states point to the end of the state's respective block, which is equivalent -/// to state at the beginning of the child block. -/// -/// This affects tracing, or replaying blocks, which will need to be executed on top of the state of -/// the parent block. For example, in order to trace block `n`, the state after block `n - 1` needs -/// to be used, since block `n` was executed on its parent block's state. -#[auto_impl(&, Arc, Box)] -pub trait StateProviderFactory: BlockIdReader + Send + Sync { - /// Storage provider for latest block. - fn latest(&self) -> ProviderResult; - - /// Returns a [StateProvider] indexed by the given [BlockId]. - /// - /// Note: if a number or hash is provided this will __only__ look at historical(canonical) - /// state. - fn state_by_block_id(&self, block_id: BlockId) -> ProviderResult { - match block_id { - BlockId::Number(block_number) => self.state_by_block_number_or_tag(block_number), - BlockId::Hash(block_hash) => self.history_by_block_hash(block_hash.into()), - } - } - - /// Returns a [StateProvider] indexed by the given block number or tag. - /// - /// Note: if a number is provided this will only look at historical(canonical) state. - fn state_by_block_number_or_tag( - &self, - number_or_tag: BlockNumberOrTag, - ) -> ProviderResult { - match number_or_tag { - BlockNumberOrTag::Latest => self.latest(), - BlockNumberOrTag::Finalized => { - // we can only get the finalized state by hash, not by num - let hash = match self.finalized_block_hash()? { - Some(hash) => hash, - None => return Err(ProviderError::FinalizedBlockNotFound), - }; - // only look at historical state - self.history_by_block_hash(hash) - } - BlockNumberOrTag::Safe => { - // we can only get the safe state by hash, not by num - let hash = match self.safe_block_hash()? { - Some(hash) => hash, - None => return Err(ProviderError::SafeBlockNotFound), - }; - - self.history_by_block_hash(hash) - } - BlockNumberOrTag::Earliest => self.history_by_block_number(0), - BlockNumberOrTag::Pending => self.pending(), - BlockNumberOrTag::Number(num) => { - // Note: The `BlockchainProvider` could also lookup the tree for the given block number, if for example the block number is `latest + 1`, however this should only support canonical state: - self.history_by_block_number(num) - } - } - } - - /// Returns a historical [StateProvider] indexed by the given historic block number. - /// - /// - /// Note: this only looks at historical blocks, not pending blocks. - fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult; - - /// Returns a historical [StateProvider] indexed by the given block hash. - /// - /// Note: this only looks at historical blocks, not pending blocks. - fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult; - - /// Returns _any_[StateProvider] with matching block hash. - /// - /// This will return a [StateProvider] for either a historical or pending block. - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult; - - /// Storage provider for pending state. - /// - /// Represents the state at the block that extends the canonical chain by one. - /// If there's no `pending` block, then this is equal to [StateProviderFactory::latest] - fn pending(&self) -> ProviderResult; - - /// Storage provider for pending state for the given block hash. - /// - /// Represents the state at the block that extends the canonical chain. - /// - /// If the block couldn't be found, returns `None`. - fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult>; - - /// Return a [StateProvider] that contains bundle state data provider. - /// Used to inspect or execute transaction on the pending state. - fn pending_with_provider( - &self, - bundle_state_data: Box, - ) -> ProviderResult; -} - -/// Blockchain trait provider that gives access to the blockchain state that is not yet committed -/// (pending). -pub trait BlockchainTreePendingStateProvider: Send + Sync { - /// Returns a state provider that includes all state changes of the given (pending) block hash. - /// - /// In other words, the state provider will return the state after all transactions of the given - /// hash have been executed. - fn pending_state_provider( - &self, - block_hash: BlockHash, - ) -> ProviderResult> { - self.find_pending_state_provider(block_hash) - .ok_or(ProviderError::StateForHashNotFound(block_hash)) - } - - /// Returns state provider if a matching block exists. - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option>; -} - -/// Post state data needed for execution on it. -/// -/// State contains: -/// * [`BundleStateWithReceipts`] contains all changed of accounts and storage of pending chain -/// * block hashes of pending chain and canonical blocks. -#[auto_impl(&, Box)] -pub trait BundleStateDataProvider: Send + Sync { - /// Return post state - fn state(&self) -> &BundleStateWithReceipts; - /// Return block hash by block number of pending or canonical chain. - fn block_hash(&self, block_number: BlockNumber) -> Option; -} - -/// Fork data needed for execution on it. -/// -/// It contains a canonical fork, the block on what pending chain was forked from. -#[auto_impl(&, Box)] -pub trait BundleStateForkProvider { - /// Return canonical fork, the block on what post state was forked from. - /// - /// Needed to create state provider. - fn canonical_fork(&self) -> BlockNumHash; -} - -/// Full post state data needed for execution on it. -/// This trait is used to create a state provider over pending state. -/// -/// This trait is a combination of [`BundleStateDataProvider`] and [`BundleStateForkProvider`]. -/// -/// Pending state contains: -/// * [`BundleStateWithReceipts`] contains all changed of accounts and storage of pending chain -/// * block hashes of pending chain and canonical blocks. -/// * canonical fork, the block on what pending chain was forked from. -pub trait FullBundleStateDataProvider: BundleStateDataProvider + BundleStateForkProvider {} - -impl FullBundleStateDataProvider for T where T: BundleStateDataProvider + BundleStateForkProvider {} - -/// A helper trait for [BundleStateWithReceipts] to write state and receipts to storage. +/// A helper trait for [BundleStateWithReceipts](reth_execution_types::BundleStateWithReceipts) to +/// write state and receipts to storage. pub trait StateWriter { /// Write the data and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml new file mode 100644 index 0000000000000..46fd824cd76b1 --- /dev/null +++ b/crates/storage/storage-api/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "reth-storage-api" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Reth storage provider traits and types" + +[lints] +workspace = true + +[dependencies] +# reth +reth-execution-types.workspace = true +reth-db.workspace = true +reth-primitives.workspace = true +reth-storage-errors.workspace = true +reth-trie.workspace = true + +revm.workspace = true + +auto_impl.workspace = true \ No newline at end of file diff --git a/crates/storage/provider/src/traits/account.rs b/crates/storage/storage-api/src/account.rs similarity index 100% rename from crates/storage/provider/src/traits/account.rs rename to crates/storage/storage-api/src/account.rs diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs new file mode 100644 index 0000000000000..070e31ed99329 --- /dev/null +++ b/crates/storage/storage-api/src/block.rs @@ -0,0 +1,261 @@ +use crate::{ + BlockIdReader, BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, + TransactionsProvider, WithdrawalsProvider, +}; +use reth_db::models::StoredBlockBodyIndices; +use reth_primitives::{ + Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, Header, + Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, B256, +}; +use reth_storage_errors::provider::ProviderResult; +use std::ops::RangeInclusive; + +/// Enum to control transaction hash inclusion. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] +pub enum TransactionVariant { + /// Indicates that transactions should be processed without including their hashes. + NoHash, + /// Indicates that transactions should be processed along with their hashes. + #[default] + WithHash, +} + +/// A helper enum that represents the origin of the requested block. +/// +/// This helper type's sole purpose is to give the caller more control over from where blocks can be +/// fetched. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] +pub enum BlockSource { + /// Check all available sources. + /// + /// Note: it's expected that looking up pending blocks is faster than looking up blocks in the + /// database so this prioritizes Pending > Database. + #[default] + Any, + /// The block was fetched from the pending block source, the blockchain tree that buffers + /// blocks that are not yet finalized. + Pending, + /// The block was fetched from the database. + Database, +} + +impl BlockSource { + /// Returns `true` if the block source is `Pending` or `Any`. + pub fn is_pending(&self) -> bool { + matches!(self, BlockSource::Pending | BlockSource::Any) + } + + /// Returns `true` if the block source is `Database` or `Any`. + pub fn is_database(&self) -> bool { + matches!(self, BlockSource::Database | BlockSource::Any) + } +} + +/// Api trait for fetching `Block` related data. +/// +/// If not requested otherwise, implementers of this trait should prioritize fetching blocks from +/// the database. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockReader: + BlockNumReader + + HeaderProvider + + TransactionsProvider + + ReceiptProvider + + WithdrawalsProvider + + Send + + Sync +{ + /// Tries to find in the given block source. + /// + /// Note: this only operates on the hash because the number might be ambiguous. + /// + /// Returns `None` if block is not found. + fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult>; + + /// Returns the block with given id from the database. + /// + /// Returns `None` if block is not found. + fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; + + /// Returns the pending block if available + /// + /// Note: This returns a [SealedBlock] because it's expected that this is sealed by the provider + /// and the caller does not know the hash. + fn pending_block(&self) -> ProviderResult>; + + /// Returns the pending block if available + /// + /// Note: This returns a [SealedBlockWithSenders] because it's expected that this is sealed by + /// the provider and the caller does not know the hash. + fn pending_block_with_senders(&self) -> ProviderResult>; + + /// Returns the pending block and receipts if available. + fn pending_block_and_receipts(&self) -> ProviderResult)>>; + + /// Returns the ommers/uncle headers of the given block from the database. + /// + /// Returns `None` if block is not found. + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; + + /// Returns the block with matching hash from the database. + /// + /// Returns `None` if block is not found. + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + self.block(hash.into()) + } + + /// Returns the block with matching number from database. + /// + /// Returns `None` if block is not found. + fn block_by_number(&self, num: u64) -> ProviderResult> { + self.block(num.into()) + } + + /// Returns the block body indices with matching number from database. + /// + /// Returns `None` if block is not found. + fn block_body_indices(&self, num: u64) -> ProviderResult>; + + /// Returns the block with senders with matching number or hash from database. + /// + /// Returns the block's transactions in the requested variant. + /// + /// Returns `None` if block is not found. + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>; + + /// Returns all blocks in the given inclusive range. + /// + /// Note: returns only available blocks + fn block_range(&self, range: RangeInclusive) -> ProviderResult>; + + /// retrieves a range of blocks from the database, along with the senders of each + /// transaction in the blocks. + /// + /// The `transaction_kind` parameter determines whether to return its hash + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>; +} + +/// Trait extension for `BlockReader`, for types that implement `BlockId` conversion. +/// +/// The `BlockReader` trait should be implemented on types that can retrieve a block from either +/// a block number or hash. However, it might be desirable to fetch a block from a `BlockId` type, +/// which can be a number, hash, or tag such as `BlockNumberOrTag::Safe`. +/// +/// Resolving tags requires keeping track of block hashes or block numbers associated with the tag, +/// so this trait can only be implemented for types that implement `BlockIdReader`. The +/// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and +/// retrieving the block should be done using the type's `BlockReader` methods. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { + /// Returns the block with matching tag from the database + /// + /// Returns `None` if block is not found. + fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.block(num.into())) + } + + /// Returns the pending block header if available + /// + /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the + /// provider and the caller does not know the hash. + fn pending_header(&self) -> ProviderResult> { + self.sealed_header_by_id(BlockNumberOrTag::Pending.into()) + } + + /// Returns the latest block header if available + /// + /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the + /// provider and the caller does not know the hash. + fn latest_header(&self) -> ProviderResult> { + self.sealed_header_by_id(BlockNumberOrTag::Latest.into()) + } + + /// Returns the safe block header if available + /// + /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the + /// provider and the caller does not know the hash. + fn safe_header(&self) -> ProviderResult> { + self.sealed_header_by_id(BlockNumberOrTag::Safe.into()) + } + + /// Returns the finalized block header if available + /// + /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the + /// provider and the caller does not know the hash. + fn finalized_header(&self) -> ProviderResult> { + self.sealed_header_by_id(BlockNumberOrTag::Finalized.into()) + } + + /// Returns the block with the matching [BlockId] from the database. + /// + /// Returns `None` if block is not found. + fn block_by_id(&self, id: BlockId) -> ProviderResult>; + + /// Returns the block with senders with matching [BlockId]. + /// + /// Returns the block's transactions in the requested variant. + /// + /// Returns `None` if block is not found. + fn block_with_senders_by_id( + &self, + id: BlockId, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + match id { + BlockId::Hash(hash) => { + self.block_with_senders(hash.block_hash.into(), transaction_kind) + } + BlockId::Number(num) => self.convert_block_number(num)?.map_or_else( + || Ok(None), + |num| self.block_with_senders(num.into(), transaction_kind), + ), + } + } + + /// Returns the header with matching tag from the database + /// + /// Returns `None` if header is not found. + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + self.convert_block_number(id)? + .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into())) + } + + /// Returns the header with matching tag from the database + /// + /// Returns `None` if header is not found. + fn sealed_header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { + self.convert_block_number(id)? + .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? + .map_or_else(|| Ok(None), |h| Ok(Some(h.seal_slow()))) + } + + /// Returns the sealed header with the matching `BlockId` from the database. + /// + /// Returns `None` if header is not found. + fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult>; + + /// Returns the header with the matching `BlockId` from the database. + /// + /// Returns `None` if header is not found. + fn header_by_id(&self, id: BlockId) -> ProviderResult>; + + /// Returns the ommers with the matching tag from the database. + fn ommers_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult>> { + self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.ommers(num.into())) + } + + /// Returns the ommers with the matching `BlockId` from the database. + /// + /// Returns `None` if block is not found. + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; +} diff --git a/crates/storage/provider/src/traits/block_hash.rs b/crates/storage/storage-api/src/block_hash.rs similarity index 95% rename from crates/storage/provider/src/traits/block_hash.rs rename to crates/storage/storage-api/src/block_hash.rs index 7413bb09c2191..bb6ff5a3a0c9e 100644 --- a/crates/storage/provider/src/traits/block_hash.rs +++ b/crates/storage/storage-api/src/block_hash.rs @@ -1,9 +1,8 @@ -use auto_impl::auto_impl; use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching block hashes by number. -#[auto_impl(&, Arc, Box)] +#[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockHashReader: Send + Sync { /// Get the hash of the block with the given number. Returns `None` if no block with this number /// exists. diff --git a/crates/storage/provider/src/traits/block_id.rs b/crates/storage/storage-api/src/block_id.rs similarity index 99% rename from crates/storage/provider/src/traits/block_id.rs rename to crates/storage/storage-api/src/block_id.rs index 8ca2c98f8b06a..7aac3d5c14ff7 100644 --- a/crates/storage/provider/src/traits/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -1,4 +1,4 @@ -use super::BlockHashReader; +use crate::BlockHashReader; use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, ChainInfo, B256}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; diff --git a/crates/storage/provider/src/traits/header.rs b/crates/storage/storage-api/src/header.rs similarity index 97% rename from crates/storage/provider/src/traits/header.rs rename to crates/storage/storage-api/src/header.rs index 4719470a7767c..433c907710444 100644 --- a/crates/storage/provider/src/traits/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,10 +1,9 @@ -use auto_impl::auto_impl; use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, Header, SealedHeader, U256}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; /// Client trait for fetching `Header` related data. -#[auto_impl(&, Arc)] +#[auto_impl::auto_impl(&, Arc)] pub trait HeaderProvider: Send + Sync { /// Check if block is known fn is_known(&self, block_hash: &BlockHash) -> ProviderResult { diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs new file mode 100644 index 0000000000000..fdbb5e9a0ae55 --- /dev/null +++ b/crates/storage/storage-api/src/lib.rs @@ -0,0 +1,42 @@ +//! Collection of traits and types for common storage access. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod account; +pub use account::*; + +mod block; +pub use block::*; + +mod block_id; +pub use block_id::*; + +mod block_hash; +pub use block_hash::*; + +mod header; +pub use header::*; + +mod receipts; +pub use receipts::*; + +mod state; +pub use state::*; + +mod storage; +pub use storage::*; + +mod transactions; +pub use transactions::*; + +mod trie; +pub use trie::*; + +mod withdrawals; +pub use withdrawals::*; diff --git a/crates/storage/provider/src/traits/receipts.rs b/crates/storage/storage-api/src/receipts.rs similarity index 99% rename from crates/storage/provider/src/traits/receipts.rs rename to crates/storage/storage-api/src/receipts.rs index 138adcfa779cf..b050ca3e248c2 100644 --- a/crates/storage/provider/src/traits/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -1,9 +1,7 @@ -use std::ops::RangeBounds; - +use crate::BlockIdReader; use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumberOrTag, Receipt, TxHash, TxNumber}; use reth_storage_errors::provider::ProviderResult; - -use crate::BlockIdReader; +use std::ops::RangeBounds; /// Client trait for fetching [Receipt] data . #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs new file mode 100644 index 0000000000000..24209b7ab8d21 --- /dev/null +++ b/crates/storage/storage-api/src/state.rs @@ -0,0 +1,257 @@ +use super::{AccountReader, BlockHashReader, BlockIdReader, StateRootProvider}; +use auto_impl::auto_impl; +use reth_execution_types::BundleStateWithReceipts; +use reth_primitives::{ + trie::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, + Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, +}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; + +/// Type alias of boxed [StateProvider]. +pub type StateProviderBox = Box; + +/// An abstraction for a type that provides state data. +#[auto_impl(&, Arc, Box)] +pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + Send + Sync { + /// Get storage of given account. + fn storage( + &self, + account: Address, + storage_key: StorageKey, + ) -> ProviderResult>; + + /// Get account code by its hash + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult>; + + /// Get account and storage proofs. + fn proof(&self, address: Address, keys: &[B256]) -> ProviderResult; + + /// Get account code by its address. + /// + /// Returns `None` if the account doesn't exist or account is not a contract + fn account_code(&self, addr: Address) -> ProviderResult> { + // Get basic account information + // Returns None if acc doesn't exist + let acc = match self.basic_account(addr)? { + Some(acc) => acc, + None => return Ok(None), + }; + + if let Some(code_hash) = acc.bytecode_hash { + if code_hash == KECCAK_EMPTY { + return Ok(None) + } + // Get the code from the code hash + return self.bytecode_by_hash(code_hash) + } + + // Return `None` if no code hash is set + Ok(None) + } + + /// Get account balance by its address. + /// + /// Returns `None` if the account doesn't exist + fn account_balance(&self, addr: Address) -> ProviderResult> { + // Get basic account information + // Returns None if acc doesn't exist + match self.basic_account(addr)? { + Some(acc) => Ok(Some(acc.balance)), + None => Ok(None), + } + } + + /// Get account nonce by its address. + /// + /// Returns `None` if the account doesn't exist + fn account_nonce(&self, addr: Address) -> ProviderResult> { + // Get basic account information + // Returns None if acc doesn't exist + match self.basic_account(addr)? { + Some(acc) => Ok(Some(acc.nonce)), + None => Ok(None), + } + } +} + +/// Light wrapper that returns `StateProvider` implementations that correspond to the given +/// `BlockNumber`, the latest state, or the pending state. +/// +/// This type differentiates states into `historical`, `latest` and `pending`, where the `latest` +/// block determines what is historical or pending: `[historical..latest..pending]`. +/// +/// The `latest` state represents the state after the most recent block has been committed to the +/// database, `historical` states are states that have been committed to the database before the +/// `latest` state, and `pending` states are states that have not yet been committed to the +/// database which may or may not become the `latest` state, depending on consensus. +/// +/// Note: the `pending` block is considered the block that extends the canonical chain but one and +/// has the `latest` block as its parent. +/// +/// All states are _inclusive_, meaning they include _all_ all changes made (executed transactions) +/// in their respective blocks. For example [StateProviderFactory::history_by_block_number] for +/// block number `n` will return the state after block `n` was executed (transactions, withdrawals). +/// In other words, all states point to the end of the state's respective block, which is equivalent +/// to state at the beginning of the child block. +/// +/// This affects tracing, or replaying blocks, which will need to be executed on top of the state of +/// the parent block. For example, in order to trace block `n`, the state after block `n - 1` needs +/// to be used, since block `n` was executed on its parent block's state. +#[auto_impl(&, Arc, Box)] +pub trait StateProviderFactory: BlockIdReader + Send + Sync { + /// Storage provider for latest block. + fn latest(&self) -> ProviderResult; + + /// Returns a [StateProvider] indexed by the given [BlockId]. + /// + /// Note: if a number or hash is provided this will __only__ look at historical(canonical) + /// state. + fn state_by_block_id(&self, block_id: BlockId) -> ProviderResult { + match block_id { + BlockId::Number(block_number) => self.state_by_block_number_or_tag(block_number), + BlockId::Hash(block_hash) => self.history_by_block_hash(block_hash.into()), + } + } + + /// Returns a [StateProvider] indexed by the given block number or tag. + /// + /// Note: if a number is provided this will only look at historical(canonical) state. + fn state_by_block_number_or_tag( + &self, + number_or_tag: BlockNumberOrTag, + ) -> ProviderResult { + match number_or_tag { + BlockNumberOrTag::Latest => self.latest(), + BlockNumberOrTag::Finalized => { + // we can only get the finalized state by hash, not by num + let hash = match self.finalized_block_hash()? { + Some(hash) => hash, + None => return Err(ProviderError::FinalizedBlockNotFound), + }; + // only look at historical state + self.history_by_block_hash(hash) + } + BlockNumberOrTag::Safe => { + // we can only get the safe state by hash, not by num + let hash = match self.safe_block_hash()? { + Some(hash) => hash, + None => return Err(ProviderError::SafeBlockNotFound), + }; + + self.history_by_block_hash(hash) + } + BlockNumberOrTag::Earliest => self.history_by_block_number(0), + BlockNumberOrTag::Pending => self.pending(), + BlockNumberOrTag::Number(num) => { + // Note: The `BlockchainProvider` could also lookup the tree for the given block number, if for example the block number is `latest + 1`, however this should only support canonical state: + self.history_by_block_number(num) + } + } + } + + /// Returns a historical [StateProvider] indexed by the given historic block number. + /// + /// + /// Note: this only looks at historical blocks, not pending blocks. + fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult; + + /// Returns a historical [StateProvider] indexed by the given block hash. + /// + /// Note: this only looks at historical blocks, not pending blocks. + fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult; + + /// Returns _any_[StateProvider] with matching block hash. + /// + /// This will return a [StateProvider] for either a historical or pending block. + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult; + + /// Storage provider for pending state. + /// + /// Represents the state at the block that extends the canonical chain by one. + /// If there's no `pending` block, then this is equal to [StateProviderFactory::latest] + fn pending(&self) -> ProviderResult; + + /// Storage provider for pending state for the given block hash. + /// + /// Represents the state at the block that extends the canonical chain. + /// + /// If the block couldn't be found, returns `None`. + fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult>; + + /// Return a [StateProvider] that contains bundle state data provider. + /// Used to inspect or execute transaction on the pending state. + fn pending_with_provider( + &self, + bundle_state_data: Box, + ) -> ProviderResult; +} + +/// Blockchain trait provider that gives access to the blockchain state that is not yet committed +/// (pending). +pub trait BlockchainTreePendingStateProvider: Send + Sync { + /// Returns a state provider that includes all state changes of the given (pending) block hash. + /// + /// In other words, the state provider will return the state after all transactions of the given + /// hash have been executed. + fn pending_state_provider( + &self, + block_hash: BlockHash, + ) -> ProviderResult> { + self.find_pending_state_provider(block_hash) + .ok_or(ProviderError::StateForHashNotFound(block_hash)) + } + + /// Returns state provider if a matching block exists. + fn find_pending_state_provider( + &self, + block_hash: BlockHash, + ) -> Option>; +} + +/// Post state data needed for execution on it. +/// +/// State contains: +/// * [`BundleStateWithReceipts`] contains all changed of accounts and storage of pending chain +/// * block hashes of pending chain and canonical blocks. +#[auto_impl(&, Box)] +pub trait BundleStateDataProvider: Send + Sync { + /// Return post state + fn state(&self) -> &BundleStateWithReceipts; + /// Return block hash by block number of pending or canonical chain. + fn block_hash(&self, block_number: BlockNumber) -> Option; +} + +impl BundleStateDataProvider for BundleStateWithReceipts { + fn state(&self) -> &BundleStateWithReceipts { + self + } + + /// Always returns [None] because we don't have any information about the block header. + fn block_hash(&self, _block_number: BlockNumber) -> Option { + None + } +} + +/// Fork data needed for execution on it. +/// +/// It contains a canonical fork, the block on what pending chain was forked from. +#[auto_impl(&, Box)] +pub trait BundleStateForkProvider { + /// Return canonical fork, the block on what post state was forked from. + /// + /// Needed to create state provider. + fn canonical_fork(&self) -> BlockNumHash; +} + +/// Full post state data needed for execution on it. +/// This trait is used to create a state provider over pending state. +/// +/// This trait is a combination of [`BundleStateDataProvider`] and [`BundleStateForkProvider`]. +/// +/// Pending state contains: +/// * [`BundleStateWithReceipts`] contains all changed of accounts and storage of pending chain +/// * block hashes of pending chain and canonical blocks. +/// * canonical fork, the block on what pending chain was forked from. +pub trait FullBundleStateDataProvider: BundleStateDataProvider + BundleStateForkProvider {} + +impl FullBundleStateDataProvider for T where T: BundleStateDataProvider + BundleStateForkProvider {} diff --git a/crates/storage/provider/src/traits/storage.rs b/crates/storage/storage-api/src/storage.rs similarity index 95% rename from crates/storage/provider/src/traits/storage.rs rename to crates/storage/storage-api/src/storage.rs index 04cb3a0d2ddf3..a45df9d72f6c7 100644 --- a/crates/storage/provider/src/traits/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -1,14 +1,12 @@ +use reth_primitives::{Address, BlockNumber, StorageEntry, B256}; +use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet}, ops::RangeInclusive, }; -use auto_impl::auto_impl; -use reth_primitives::{Address, BlockNumber, StorageEntry, B256}; -use reth_storage_errors::provider::ProviderResult; - /// Storage reader -#[auto_impl(&, Arc, Box)] +#[auto_impl::auto_impl(&, Arc, Box)] pub trait StorageReader: Send + Sync { /// Get plainstate storages for addresses and storage keys. fn plain_state_storages( diff --git a/crates/storage/provider/src/traits/transactions.rs b/crates/storage/storage-api/src/transactions.rs similarity index 99% rename from crates/storage/provider/src/traits/transactions.rs rename to crates/storage/storage-api/src/transactions.rs index d693c52f80ed5..763632a38331d 100644 --- a/crates/storage/provider/src/traits/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,10 +1,11 @@ +use std::ops::{Range, RangeBounds, RangeInclusive}; + use crate::{BlockNumReader, BlockReader}; use reth_primitives::{ Address, BlockHashOrNumber, BlockNumber, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::ops::{Range, RangeBounds, RangeInclusive}; /// Client trait for fetching [TransactionSigned] related data. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/storage/provider/src/traits/trie.rs b/crates/storage/storage-api/src/trie.rs similarity index 94% rename from crates/storage/provider/src/traits/trie.rs rename to crates/storage/storage-api/src/trie.rs index 52f3317a3f305..083f565492e41 100644 --- a/crates/storage/provider/src/traits/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,11 +1,10 @@ -use auto_impl::auto_impl; use reth_primitives::B256; use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; /// A type that can compute the state root of a given post state. -#[auto_impl(&, Box, Arc)] +#[auto_impl::auto_impl(&, Box, Arc)] pub trait StateRootProvider: Send + Sync { /// Returns the state root of the `BundleState` on top of the current state. /// diff --git a/crates/storage/provider/src/traits/withdrawals.rs b/crates/storage/storage-api/src/withdrawals.rs similarity index 100% rename from crates/storage/provider/src/traits/withdrawals.rs rename to crates/storage/storage-api/src/withdrawals.rs From f6e1c7f76ed83219f0ef94be78856d1bdb1cf14b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 15:43:37 +0200 Subject: [PATCH 654/700] chore: remove reth-interfaces from static file (#8428) --- Cargo.lock | 1 + .../consensus/beacon/src/engine/hooks/static_file.rs | 2 +- crates/static-file/Cargo.toml | 3 ++- crates/static-file/src/segments/headers.rs | 2 +- crates/static-file/src/segments/mod.rs | 2 +- crates/static-file/src/segments/receipts.rs | 2 +- crates/static-file/src/segments/transactions.rs | 2 +- crates/static-file/src/static_file_producer.rs | 11 +++++------ 8 files changed, 13 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 81570563db442..813e595dde95c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8001,6 +8001,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-stages", + "reth-storage-errors", "reth-tokio-util", "tempfile", "tokio", diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 3786e29f87f33..3d78d51d93cc4 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -55,7 +55,7 @@ impl StaticFileHook { match result { Ok(_) => EngineHookEvent::Finished(Ok(())), - Err(err) => EngineHookEvent::Finished(Err(err.into())), + Err(err) => EngineHookEvent::Finished(Err(EngineHookError::Common(err.into()))), } } Err(_) => { diff --git a/crates/static-file/Cargo.toml b/crates/static-file/Cargo.toml index 0f6608c8084df..f73300841609e 100644 --- a/crates/static-file/Cargo.toml +++ b/crates/static-file/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-primitives.workspace = true reth-db.workspace = true reth-provider.workspace = true -reth-interfaces.workspace = true +reth-storage-errors.workspace = true reth-nippy-jar.workspace = true reth-tokio-util.workspace = true @@ -30,6 +30,7 @@ rayon.workspace = true parking_lot = { workspace = true, features = ["send_guard", "arc_lock"] } [dev-dependencies] +reth-interfaces.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } diff --git a/crates/static-file/src/segments/headers.rs b/crates/static-file/src/segments/headers.rs index 4390ff6ed6a5e..2c7e62e5015c6 100644 --- a/crates/static-file/src/segments/headers.rs +++ b/crates/static-file/src/segments/headers.rs @@ -3,12 +3,12 @@ use reth_db::{ cursor::DbCursorRO, database::Database, static_file::create_static_file_T1_T2_T3, tables, transaction::DbTx, RawKey, RawTable, }; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{static_file::SegmentConfig, BlockNumber, StaticFileSegment}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, DatabaseProviderRO, }; +use reth_storage_errors::provider::ProviderResult; use std::{ops::RangeInclusive, path::Path}; /// Static File segment responsible for [StaticFileSegment::Headers] part of data. diff --git a/crates/static-file/src/segments/mod.rs b/crates/static-file/src/segments/mod.rs index be3ca0e716a0d..581a2cba73dba 100644 --- a/crates/static-file/src/segments/mod.rs +++ b/crates/static-file/src/segments/mod.rs @@ -12,7 +12,6 @@ pub use receipts::Receipts; use reth_db::{ cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx, RawKey, RawTable, }; -use reth_interfaces::provider::ProviderResult; use reth_nippy_jar::NippyJar; use reth_primitives::{ static_file::{ @@ -24,6 +23,7 @@ use reth_primitives::{ use reth_provider::{ providers::StaticFileProvider, DatabaseProviderRO, ProviderError, TransactionsProviderExt, }; +use reth_storage_errors::provider::ProviderResult; use std::{ops::RangeInclusive, path::Path}; pub(crate) type Rows = [Vec>; COLUMNS]; diff --git a/crates/static-file/src/segments/receipts.rs b/crates/static-file/src/segments/receipts.rs index 61b191c18c223..85b4863d8addd 100644 --- a/crates/static-file/src/segments/receipts.rs +++ b/crates/static-file/src/segments/receipts.rs @@ -3,7 +3,6 @@ use reth_db::{ cursor::DbCursorRO, database::Database, static_file::create_static_file_T1, tables, transaction::DbTx, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ static_file::{SegmentConfig, SegmentHeader}, BlockNumber, StaticFileSegment, TxNumber, @@ -12,6 +11,7 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockReader, DatabaseProviderRO, TransactionsProviderExt, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ops::RangeInclusive, path::Path}; /// Static File segment responsible for [StaticFileSegment::Receipts] part of data. diff --git a/crates/static-file/src/segments/transactions.rs b/crates/static-file/src/segments/transactions.rs index 36110243c1038..ba174507743a6 100644 --- a/crates/static-file/src/segments/transactions.rs +++ b/crates/static-file/src/segments/transactions.rs @@ -3,7 +3,6 @@ use reth_db::{ cursor::DbCursorRO, database::Database, static_file::create_static_file_T1, tables, transaction::DbTx, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ static_file::{SegmentConfig, SegmentHeader}, BlockNumber, StaticFileSegment, TxNumber, @@ -12,6 +11,7 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockReader, DatabaseProviderRO, TransactionsProviderExt, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ops::RangeInclusive, path::Path}; /// Static File segment responsible for [StaticFileSegment::Transactions] part of data. diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index 4eb0825611423..ef66a12ed1784 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -4,12 +4,12 @@ use crate::{segments, segments::Segment, StaticFileProducerEvent}; use parking_lot::Mutex; use rayon::prelude::*; use reth_db::database::Database; -use reth_interfaces::RethResult; use reth_primitives::{static_file::HighestStaticFiles, BlockNumber, PruneModes}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, ProviderFactory, }; +use reth_storage_errors::provider::ProviderResult; use reth_tokio_util::{EventSender, EventStream}; use std::{ ops::{Deref, RangeInclusive}, @@ -19,7 +19,7 @@ use std::{ use tracing::{debug, trace}; /// Result of [StaticFileProducerInner::run] execution. -pub type StaticFileProducerResult = RethResult; +pub type StaticFileProducerResult = ProviderResult; /// The [StaticFileProducer] instance itself with the result of [StaticFileProducerInner::run] pub type StaticFileProducerWithResult = (StaticFileProducer, StaticFileProducerResult); @@ -154,7 +154,7 @@ impl StaticFileProducerInner { segments.push((Box::new(segments::Receipts), block_range)); } - segments.par_iter().try_for_each(|(segment, block_range)| -> RethResult<()> { + segments.par_iter().try_for_each(|(segment, block_range)| -> ProviderResult<()> { debug!(target: "static_file", segment = %segment.segment(), ?block_range, "StaticFileProducer segment"); let start = Instant::now(); @@ -189,7 +189,7 @@ impl StaticFileProducerInner { pub fn get_static_file_targets( &self, finalized_block_numbers: HighestStaticFiles, - ) -> RethResult { + ) -> ProviderResult { let highest_static_files = self.static_file_provider.get_highest_static_files(); let targets = StaticFileTargets { @@ -252,7 +252,6 @@ mod tests { generators, generators::{random_block_range, random_receipt}, }, - RethError, }; use reth_primitives::{ static_file::HighestStaticFiles, PruneModes, StaticFileSegment, B256, U256, @@ -373,7 +372,7 @@ mod tests { ); assert_matches!( static_file_producer.run(targets), - Err(RethError::Provider(ProviderError::BlockBodyIndicesNotFound(4))) + Err(ProviderError::BlockBodyIndicesNotFound(4)) ); assert_eq!( static_file_provider.get_highest_static_files(), From b4a1b733c93f7e262f1b774722670e08cdcb6276 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 28 May 2024 15:06:28 +0100 Subject: [PATCH 655/700] feat: implement EIP-7685 (#8424) Co-authored-by: Oliver Nordbjerg Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 32 ++++--- bin/reth/src/commands/db/diff.rs | 5 +- bin/reth/src/commands/db/stats.rs | 13 +-- bin/reth/src/commands/debug_cmd/execution.rs | 4 +- bin/reth/src/commands/debug_cmd/merkle.rs | 3 +- bin/reth/src/commands/stage/drop.rs | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 1 + crates/blockchain-tree/src/chain.rs | 8 +- crates/consensus/auto-seal/src/lib.rs | 26 +++-- crates/consensus/auto-seal/src/task.rs | 6 +- crates/consensus/common/src/validation.rs | 34 ++++++- crates/consensus/consensus/Cargo.toml | 1 + crates/consensus/consensus/src/lib.rs | 38 +++++++- crates/consensus/consensus/src/test_utils.rs | 6 +- crates/engine-primitives/src/lib.rs | 27 +++++- crates/ethereum/consensus/src/lib.rs | 8 +- crates/ethereum/consensus/src/validation.rs | 17 +++- crates/ethereum/engine-primitives/src/lib.rs | 3 +- .../ethereum/engine-primitives/src/payload.rs | 28 +++++- crates/ethereum/evm/src/execute.rs | 67 +++++++++---- crates/ethereum/evm/src/lib.rs | 1 + crates/evm/Cargo.toml | 2 +- crates/evm/execution-types/src/bundle.rs | 5 +- crates/evm/src/execute.rs | 31 +++++- crates/evm/src/test_utils.rs | 3 +- crates/net/downloaders/src/bodies/bodies.rs | 7 +- .../net/downloaders/src/bodies/test_utils.rs | 1 + crates/net/downloaders/src/file_client.rs | 20 +--- crates/net/downloaders/src/test_utils/mod.rs | 1 + crates/net/eth-wire-types/src/blocks.rs | 6 ++ crates/net/network/src/eth_requests.rs | 1 + crates/net/network/tests/it/requests.rs | 8 +- crates/net/p2p/src/full_block.rs | 40 +++++--- crates/net/p2p/src/test_utils/generators.rs | 1 + crates/node-core/src/utils.rs | 1 + crates/optimism/consensus/src/lib.rs | 9 +- crates/optimism/evm/src/execute.rs | 10 +- crates/optimism/evm/src/l1.rs | 2 + crates/optimism/node/src/engine.rs | 4 +- crates/optimism/payload/src/builder.rs | 10 +- crates/optimism/payload/src/payload.rs | 32 ++++++- crates/payload/ethereum/Cargo.toml | 4 +- crates/payload/ethereum/src/lib.rs | 75 +++++++++++---- crates/primitives/Cargo.toml | 2 + crates/primitives/src/alloy_compat.rs | 5 + crates/primitives/src/block.rs | 47 ++++++++- crates/primitives/src/chain/spec.rs | 13 ++- crates/primitives/src/header.rs | 41 ++++++-- crates/primitives/src/lib.rs | 4 +- crates/primitives/src/proofs.rs | 12 ++- crates/primitives/src/request.rs | 55 +++++++++++ crates/primitives/src/revm/mod.rs | 2 + crates/primitives/src/transaction/mod.rs | 2 + crates/revm/src/batch.rs | 20 +++- crates/rpc/rpc-api/src/engine.rs | 25 ++++- crates/rpc/rpc-engine-api/src/engine_api.rs | 46 ++++++++- crates/rpc/rpc-engine-api/src/metrics.rs | 4 + crates/rpc/rpc-engine-api/tests/it/payload.rs | 1 + crates/rpc/rpc-types-compat/src/block.rs | 9 +- .../rpc-types-compat/src/engine/payload.rs | 66 ++++++++++--- crates/rpc/rpc/src/eth/api/pending_block.rs | 15 ++- crates/stages/src/stages/bodies.rs | 16 ++++ crates/stages/src/stages/execution.rs | 3 +- crates/stages/src/stages/merkle.rs | 5 +- crates/storage/codecs/Cargo.toml | 15 ++- crates/storage/codecs/src/alloy/mod.rs | 1 + crates/storage/codecs/src/alloy/request.rs | 39 ++++++++ .../storage/db/src/tables/codecs/compact.rs | 1 + crates/storage/db/src/tables/mod.rs | 5 +- .../src/providers/database/metrics.rs | 2 + .../provider/src/providers/database/mod.rs | 18 +++- .../src/providers/database/provider.rs | 95 ++++++++++++++++--- crates/storage/provider/src/providers/mod.rs | 19 +++- .../src/providers/static_file/manager.rs | 13 ++- .../storage/provider/src/test_utils/blocks.rs | 6 +- .../storage/provider/src/test_utils/mock.rs | 15 ++- .../storage/provider/src/test_utils/noop.rs | 12 ++- crates/storage/storage-api/src/block.rs | 3 +- crates/storage/storage-api/src/lib.rs | 3 + crates/storage/storage-api/src/requests.rs | 13 +++ .../src/mined_sidecar.rs | 6 +- examples/custom-engine-types/src/main.rs | 3 +- testing/ef-tests/src/models.rs | 3 + 83 files changed, 1053 insertions(+), 214 deletions(-) create mode 100644 crates/primitives/src/request.rs create mode 100644 crates/storage/codecs/src/alloy/request.rs create mode 100644 crates/storage/storage-api/src/requests.rs diff --git a/Cargo.lock b/Cargo.lock index 813e595dde95c..031ff14752e3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -139,14 +139,17 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "arbitrary", "c-kzg", + "proptest", + "proptest-derive", "serde", ] [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" +source = "git+https://github.com/alloy-rs/alloy#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -195,7 +198,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" +source = "git+https://github.com/alloy-rs/alloy#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -220,7 +223,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" +source = "git+https://github.com/alloy-rs/alloy#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -342,9 +345,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b155716bab55763c95ba212806cf43d05bcc70e5f35b02bad20cf5ec7fe11fed" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -353,9 +356,9 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8037e03c7f462a063f28daec9fda285a9a89da003c552f8637a80b9c8fd96241" +checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", @@ -407,7 +410,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" +source = "git+https://github.com/alloy-rs/alloy#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -488,7 +491,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0f9711692743d0444a887a589ca6786df77568be" +source = "git+https://github.com/alloy-rs/alloy#dd7a999d9efe259c47a34dde046952de795a8f6a" dependencies = [ "alloy-primitives", "serde", @@ -909,11 +912,12 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "9f2776ead772134d55b62dd45e59a79e21612d85d0af729b8b7d3967d601a62a" dependencies = [ "concurrent-queue", + "event-listener 5.3.0", "event-listener-strategy 0.5.2", "futures-core", "pin-project-lite", @@ -1275,7 +1279,7 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.3.0", "async-lock", "async-task", "futures-io", @@ -6585,6 +6589,7 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.7" dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-primitives", @@ -6982,6 +6987,8 @@ name = "reth-ethereum-payload-builder" version = "0.2.0-beta.7" dependencies = [ "reth-basic-payload-builder", + "reth-evm", + "reth-evm-ethereum", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7608,6 +7615,7 @@ name = "reth-primitives" version = "0.2.0-beta.7" dependencies = [ "alloy-chains", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "alloy-primitives", diff --git a/bin/reth/src/commands/db/diff.rs b/bin/reth/src/commands/db/diff.rs index 9c098a50b574c..3c7bfb8c07eae 100644 --- a/bin/reth/src/commands/db/diff.rs +++ b/bin/reth/src/commands/db/diff.rs @@ -6,7 +6,7 @@ use crate::{ use clap::Parser; use reth_db::{ cursor::DbCursorRO, database::Database, open_db_read_only, table::Table, transaction::DbTx, - AccountChangeSets, AccountsHistory, AccountsTrie, BlockBodyIndices, BlockOmmers, + AccountChangeSets, AccountsHistory, AccountsTrie, BlockBodyIndices, BlockOmmers, BlockRequests, BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnv, HashedAccounts, HashedStorages, HeaderNumbers, HeaderTerminalDifficulties, Headers, PlainAccountState, PlainStorageState, PruneCheckpoints, Receipts, StageCheckpointProgresses, StageCheckpoints, StorageChangeSets, @@ -98,6 +98,9 @@ impl Command { Tables::BlockWithdrawals => { find_diffs::(primary_tx, secondary_tx, output_dir)? } + Tables::BlockRequests => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } Tables::TransactionBlocks => { find_diffs::(primary_tx, secondary_tx, output_dir)? } diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index 03c384b2ffc4d..d38c0e21af55f 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -8,12 +8,12 @@ use human_bytes::human_bytes; use itertools::Itertools; use reth_db::{ database::Database, mdbx, static_file::iter_static_files, AccountChangeSets, AccountsHistory, - AccountsTrie, BlockBodyIndices, BlockOmmers, BlockWithdrawals, Bytecodes, CanonicalHeaders, - DatabaseEnv, HashedAccounts, HashedStorages, HeaderNumbers, HeaderTerminalDifficulties, - Headers, PlainAccountState, PlainStorageState, PruneCheckpoints, Receipts, - StageCheckpointProgresses, StageCheckpoints, StorageChangeSets, StoragesHistory, StoragesTrie, - Tables, TransactionBlocks, TransactionHashNumbers, TransactionSenders, Transactions, - VersionHistory, + AccountsTrie, BlockBodyIndices, BlockOmmers, BlockRequests, BlockWithdrawals, Bytecodes, + CanonicalHeaders, DatabaseEnv, HashedAccounts, HashedStorages, HeaderNumbers, + HeaderTerminalDifficulties, Headers, PlainAccountState, PlainStorageState, PruneCheckpoints, + Receipts, StageCheckpointProgresses, StageCheckpoints, StorageChangeSets, StoragesHistory, + StoragesTrie, Tables, TransactionBlocks, TransactionHashNumbers, TransactionSenders, + Transactions, VersionHistory, }; use reth_fs_util as fs; use reth_node_core::dirs::{ChainPath, DataDirPath}; @@ -333,6 +333,7 @@ impl Command { Tables::BlockBodyIndices => viewer.get_checksum::().unwrap(), Tables::BlockOmmers => viewer.get_checksum::().unwrap(), Tables::BlockWithdrawals => viewer.get_checksum::().unwrap(), + Tables::BlockRequests => viewer.get_checksum::().unwrap(), Tables::Bytecodes => viewer.get_checksum::().unwrap(), Tables::CanonicalHeaders => viewer.get_checksum::().unwrap(), Tables::HashedAccounts => viewer.get_checksum::().unwrap(), diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index c07efab2b9fe2..628a6cd2621c1 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -187,7 +187,7 @@ impl Command { match get_single_header(&client, BlockHashOrNumber::Number(block)).await { Ok(tip_header) => { info!(target: "reth::cli", ?block, "Successfully fetched block"); - return Ok(tip_header.hash()); + return Ok(tip_header.hash()) } Err(error) => { error!(target: "reth::cli", ?block, %error, "Failed to fetch the block. Retrying..."); @@ -255,7 +255,7 @@ impl Command { provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); if latest_block_number.unwrap_or_default() >= self.to { info!(target: "reth::cli", latest = latest_block_number, "Nothing to run"); - return Ok(()); + return Ok(()) } let pipeline_events = pipeline.events(); diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 291788bad7579..bfbca46f4ff4a 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -198,7 +198,8 @@ impl Command { PruneModes::none(), ); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; - let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); + let BatchBlockExecutionOutput { bundle, receipts, requests: _, first_block } = + executor.finalize(); BundleStateWithReceipts::new(bundle, receipts, first_block).write_to_storage( provider_rw.tx_ref(), None, diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index fc3ef5768da0c..47af150ce5263 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -110,6 +110,7 @@ impl Command { tx.clear::()?; tx.clear::()?; tx.clear::()?; + tx.clear::()?; tx.put::( StageId::Bodies.to_string(), Default::default(), diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 42ba451ab194f..0f5a249a4591d 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1631,6 +1631,7 @@ mod tests { body: body.clone().into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), withdrawals: Some(Withdrawals::default()), + requests: None, }, body.iter().map(|tx| tx.signer()).collect(), ) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index e8e40cb41ef9b..38806b9da239d 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -9,7 +9,7 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, InsertBlockErrorKind}, BlockAttachment, BlockValidationKind, }; -use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_db::database::Database; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; @@ -210,8 +210,10 @@ impl AppendableChain { let block = block.unseal(); let state = executor.execute((&block, U256::MAX).into())?; - let BlockExecutionOutput { state, receipts, .. } = state; - externals.consensus.validate_block_post_execution(&block, &receipts)?; + let BlockExecutionOutput { state, receipts, requests, .. } = state; + externals + .consensus + .validate_block_post_execution(&block, PostExecutionInput::new(&receipts, &requests))?; let bundle_state = BundleStateWithReceipts::new( state, diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index f318b7adea46e..69ca048e155e7 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -16,14 +16,14 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_beacon_consensus::BeaconEngineMessage; -use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_engine_primitives::EngineTypes; use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ constants::{EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, - ChainSpec, Header, Receipt, Receipts, SealedBlock, SealedHeader, TransactionSigned, + ChainSpec, Header, Receipts, Requests, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, B256, U256, }; use reth_provider::{ @@ -92,7 +92,7 @@ impl Consensus for AutoSealConsensus { fn validate_block_post_execution( &self, _block: &BlockWithSenders, - _receipts: &[Receipt], + _input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError> { Ok(()) } @@ -277,6 +277,7 @@ impl StorageInner { transactions: &[TransactionSigned], ommers: &[Header], withdrawals: Option<&Withdrawals>, + requests: Option<&Requests>, chain_spec: Arc, ) -> Header { let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); @@ -319,6 +320,7 @@ impl StorageInner { excess_blob_gas: None, extra_data: Default::default(), parent_beacon_block_root: None, + requests_root: requests.map(|r| proofs::calculate_requests_root(&r.0)), }; if chain_spec.is_cancun_active_at_timestamp(timestamp) { @@ -354,11 +356,13 @@ impl StorageInner { /// Builds and executes a new block with the given transactions, on the provided executor. /// /// This returns the header of the executed block, as well as the poststate from execution. + #[allow(clippy::too_many_arguments)] pub(crate) fn build_and_execute( &mut self, transactions: Vec, ommers: Vec
, withdrawals: Option, + requests: Option, provider: &Provider, chain_spec: Arc, executor: &Executor, @@ -367,14 +371,20 @@ impl StorageInner { Executor: BlockExecutorProvider, Provider: StateProviderFactory, { - let header = - self.build_header_template(&transactions, &ommers, withdrawals.as_ref(), chain_spec); + let header = self.build_header_template( + &transactions, + &ommers, + withdrawals.as_ref(), + requests.as_ref(), + chain_spec, + ); let block = Block { header, body: transactions, ommers: ommers.clone(), withdrawals: withdrawals.clone(), + requests: requests.clone(), } .with_recovered_senders() .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; @@ -394,8 +404,12 @@ impl StorageInner { block.number, ); + // todo(onbjerg): we should not pass requests around as this is building a block, which + // means we need to extract the requests from the execution output and compute the requests + // root here + let Block { mut header, body, .. } = block.block; - let body = BlockBody { transactions: body, ommers, withdrawals }; + let body = BlockBody { transactions: body, ommers, withdrawals, requests }; trace!(target: "consensus::auto", ?bundle_state, ?header, ?body, "executed block, calculating state root and completing header"); diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 2a5ec4433e494..f047f0a5b758d 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -4,7 +4,7 @@ use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_engine_primitives::EngineTypes; use reth_evm::execute::BlockExecutorProvider; use reth_primitives::{ - Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders, Withdrawals, + Block, ChainSpec, IntoRecoveredTransaction, Requests, SealedBlockWithSenders, Withdrawals, }; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; use reth_rpc_types::engine::ForkchoiceState; @@ -137,12 +137,15 @@ where }) .unzip(); let ommers = vec![]; + // todo(onbjerg): these two dont respect chainspec let withdrawals = Some(Withdrawals::default()); + let requests = Some(Requests::default()); match storage.build_and_execute( transactions.clone(), ommers.clone(), withdrawals.clone(), + requests.clone(), &client, chain_spec, &executor, @@ -201,6 +204,7 @@ where body: transactions, ommers, withdrawals, + requests, }; let sealed_block = block.seal_slow(); diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index ffa48e7715236..ad1e11643d8a0 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -52,6 +52,14 @@ pub fn validate_header_standalone( return Err(ConsensusError::ParentBeaconBlockRootUnexpected) } + if chain_spec.is_prague_active_at_timestamp(header.timestamp) { + if header.requests_root.is_none() { + return Err(ConsensusError::RequestsRootMissing) + } + } else if header.requests_root.is_some() { + return Err(ConsensusError::RequestsRootUnexpected) + } + Ok(()) } @@ -108,6 +116,19 @@ pub fn validate_block_pre_execution( } } + // EIP-7685: General purpose execution layer requests + if chain_spec.is_prague_active_at_timestamp(block.timestamp) { + let requests = block.requests.as_ref().ok_or(ConsensusError::BodyRequestsMissing)?; + let requests_root = reth_primitives::proofs::calculate_requests_root(&requests.0); + let header_requests_root = + block.requests_root.as_ref().ok_or(ConsensusError::RequestsRootMissing)?; + if requests_root != *header_requests_root { + return Err(ConsensusError::BodyRequestsRootDiff( + GotExpected { got: requests_root, expected: *header_requests_root }.into(), + )) + } + } + Ok(()) } @@ -326,6 +347,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None }; // size: 0x9b5 @@ -339,7 +361,16 @@ mod tests { let ommers = Vec::new(); let body = Vec::new(); - (SealedBlock { header: header.seal_slow(), body, ommers, withdrawals: None }, parent) + ( + SealedBlock { + header: header.seal_slow(), + body, + ommers, + withdrawals: None, + requests: None, + }, + parent, + ) } #[test] @@ -419,6 +450,7 @@ mod tests { transactions: vec![transaction], ommers: vec![], withdrawals: Some(Withdrawals::default()), + requests: None, }; let block = SealedBlock::new(header, body); diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 43264872e1f38..8ea4236bfa129 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -11,6 +11,7 @@ repository.workspace = true workspace = true [dependencies] +# reth reth-primitives.workspace = true # misc diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 46fce6d02ee00..d117b2ea2eb14 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -10,7 +10,8 @@ use reth_primitives::{ BlockHash, BlockNumber, BlockWithSenders, Bloom, GotExpected, GotExpectedBoxed, Header, - HeaderValidationError, InvalidTransactionError, Receipt, SealedBlock, SealedHeader, B256, U256, + HeaderValidationError, InvalidTransactionError, Receipt, Request, SealedBlock, SealedHeader, + B256, U256, }; use std::fmt::Debug; @@ -18,6 +19,22 @@ use std::fmt::Debug; /// test helpers for mocking consensus pub mod test_utils; +/// Post execution input passed to [Consensus::validate_block_post_execution]. +#[derive(Debug)] +pub struct PostExecutionInput<'a> { + /// Receipts of the block. + pub receipts: &'a [Receipt], + /// EIP-7685 requests of the block. + pub requests: &'a [Request], +} + +impl<'a> PostExecutionInput<'a> { + /// Creates a new instance of `PostExecutionInput`. + pub fn new(receipts: &'a [Receipt], requests: &'a [Request]) -> Self { + Self { receipts, requests } + } +} + /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] pub trait Consensus: Debug + Send + Sync { @@ -94,7 +111,7 @@ pub trait Consensus: Debug + Send + Sync { fn validate_block_post_execution( &self, block: &BlockWithSenders, - receipts: &[Receipt], + input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError>; } @@ -145,6 +162,11 @@ pub enum ConsensusError { #[error("mismatched block withdrawals root: {0}")] BodyWithdrawalsRootDiff(GotExpectedBoxed), + /// Error when the requests root in the block is different from the expected requests + /// root. + #[error("mismatched block requests root: {0}")] + BodyRequestsRootDiff(GotExpectedBoxed), + /// Error when a block with a specific hash and number is already known. #[error("block with [hash={hash}, number={number}] is already known")] BlockKnown { @@ -212,14 +234,26 @@ pub enum ConsensusError { #[error("missing withdrawals root")] WithdrawalsRootMissing, + /// Error when the requests root is missing. + #[error("missing requests root")] + RequestsRootMissing, + /// Error when an unexpected withdrawals root is encountered. #[error("unexpected withdrawals root")] WithdrawalsRootUnexpected, + /// Error when an unexpected requests root is encountered. + #[error("unexpected requests root")] + RequestsRootUnexpected, + /// Error when withdrawals are missing. #[error("missing withdrawals")] BodyWithdrawalsMissing, + /// Error when requests are missing. + #[error("missing requests")] + BodyRequestsMissing, + /// Error when blob gas used is missing. #[error("missing blob gas used")] BlobGasUsedMissing, diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index a616d4f43b897..546dffab1fd50 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,5 +1,5 @@ -use crate::{Consensus, ConsensusError}; -use reth_primitives::{BlockWithSenders, Header, Receipt, SealedBlock, SealedHeader, U256}; +use crate::{Consensus, ConsensusError, PostExecutionInput}; +use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader, U256}; use std::sync::atomic::{AtomicBool, Ordering}; /// Consensus engine implementation for testing @@ -71,7 +71,7 @@ impl Consensus for TestConsensus { fn validate_block_post_execution( &self, _block: &BlockWithSenders, - _receipts: &[Receipt], + _input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) diff --git a/crates/engine-primitives/src/lib.rs b/crates/engine-primitives/src/lib.rs index aa2c4468158ca..d9cd340a8ec55 100644 --- a/crates/engine-primitives/src/lib.rs +++ b/crates/engine-primitives/src/lib.rs @@ -27,7 +27,7 @@ pub use payload::PayloadOrAttributes; /// The types that are used by the engine API. pub trait EngineTypes: - serde::de::DeserializeOwned + Serialize + fmt::Debug + Unpin + Send + Sync + Clone + DeserializeOwned + Serialize + fmt::Debug + Unpin + Send + Sync + Clone { /// The RPC payload attributes type the CL node emits via the engine API. type PayloadAttributes: PayloadAttributes + Unpin; @@ -43,7 +43,8 @@ pub trait EngineTypes: + Unpin + TryInto + TryInto - + TryInto; + + TryInto + + TryInto; /// Execution Payload V1 type. type ExecutionPayloadV1: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; @@ -51,6 +52,8 @@ pub trait EngineTypes: type ExecutionPayloadV2: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; /// Execution Payload V3 type. type ExecutionPayloadV3: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; + /// Execution Payload V4 type. + type ExecutionPayloadV4: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; /// Validates the presence or exclusion of fork-specific fields based on the payload attributes /// and the message version. @@ -63,8 +66,9 @@ pub trait EngineTypes: /// Validates the timestamp depending on the version called: /// -/// * If V2, this ensure that the payload timestamp is pre-Cancun. +/// * If V2, this ensures that the payload timestamp is pre-Cancun. /// * If V3, this ensures that the payload timestamp is within the Cancun timestamp. +/// * If V4, this ensures that the payload timestamp is within the Prague timestamp. /// /// Otherwise, this will return [EngineObjectValidationError::UnsupportedFork]. pub fn validate_payload_timestamp( @@ -219,6 +223,11 @@ pub fn validate_withdrawals_presence( /// 2. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of the /// payload does not fall within the time frame of the Cancun fork. /// +/// For `engine_newPayloadV4`: +/// +/// 2. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of the +/// payload does not fall within the time frame of the Prague fork. +/// /// Returning the right error code (ie, if the client should return `-38003: Invalid payload /// attributes` is handled by the `message_validation_kind` parameter. If the parameter is /// `MessageValidationKind::Payload`, then the error code will be `-32602: Invalid params`. If the @@ -334,7 +343,7 @@ where } /// The version of Engine API message. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum EngineApiMessageVersion { /// Version 1 V1, @@ -351,3 +360,13 @@ pub enum EngineApiMessageVersion { /// Added in the Prague hardfork. V4, } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn version_ord() { + assert!(EngineApiMessageVersion::V4 > EngineApiMessageVersion::V3); + } +} diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 0264089475ab2..3783063b47f2d 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,12 +8,12 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ validate_block_pre_execution, validate_header_extradata, validate_header_standalone, }; use reth_primitives::{ - BlockWithSenders, Chain, ChainSpec, Hardfork, Header, Receipt, SealedBlock, SealedHeader, + BlockWithSenders, Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, }; use std::{sync::Arc, time::SystemTime}; @@ -131,8 +131,8 @@ impl Consensus for EthBeaconConsensus { fn validate_block_post_execution( &self, block: &BlockWithSenders, - receipts: &[Receipt], + input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, receipts) + validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) } } diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index 11fe54406af86..50d01a0d1a9a0 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,6 +1,7 @@ use reth_consensus::ConsensusError; use reth_primitives::{ - gas_spent_by_transactions, BlockWithSenders, Bloom, ChainSpec, GotExpected, Receipt, B256, + gas_spent_by_transactions, BlockWithSenders, Bloom, ChainSpec, GotExpected, Receipt, Request, + B256, }; /// Validate a block with regard to execution results: @@ -11,6 +12,7 @@ pub fn validate_block_post_execution( block: &BlockWithSenders, chain_spec: &ChainSpec, receipts: &[Receipt], + requests: &[Request], ) -> Result<(), ConsensusError> { // Before Byzantium, receipts contained state root that would mean that expensive // operation as hashing that is required for state root got calculated in every @@ -30,6 +32,19 @@ pub fn validate_block_post_execution( }) } + // Validate that the header requests root matches the calculated requests root + if chain_spec.is_prague_active_at_timestamp(block.timestamp) { + let Some(header_requests_root) = block.header.requests_root else { + return Err(ConsensusError::RequestsRootMissing) + }; + let requests_root = reth_primitives::proofs::calculate_requests_root(requests); + if requests_root != header_requests_root { + return Err(ConsensusError::BodyRequestsRootDiff( + GotExpected::new(requests_root, header_requests_root).into(), + )) + } + } + Ok(()) } diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index cb6d0231eed42..4cfdf70a36789 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -18,7 +18,7 @@ use reth_engine_primitives::{ use reth_primitives::ChainSpec; use reth_rpc_types::{ engine::{ - ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, + ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, PayloadAttributes as EthPayloadAttributes, }, ExecutionPayloadV1, @@ -36,6 +36,7 @@ impl EngineTypes for EthEngineTypes { type ExecutionPayloadV1 = ExecutionPayloadV1; type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; fn validate_version_specific_fields( chain_spec: &ChainSpec, diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 6e753dac9626a..37261d9954784 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -7,11 +7,12 @@ use reth_primitives::{ BlobTransactionSidecar, ChainSpec, Hardfork, Header, SealedBlock, Withdrawals, B256, U256, }; use reth_rpc_types::engine::{ - ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadV1, PayloadAttributes, - PayloadId, + ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, + ExecutionPayloadV1, PayloadAttributes, PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, + block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, + convert_block_to_payload_field_v2, }; use revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use std::convert::Infallible; @@ -128,6 +129,27 @@ impl From for ExecutionPayloadEnvelopeV3 { } } +impl From for ExecutionPayloadEnvelopeV4 { + fn from(value: EthBuiltPayload) -> Self { + let EthBuiltPayload { block, fees, sidecars, .. } = value; + + ExecutionPayloadEnvelopeV4 { + execution_payload: block_to_payload_v4(block), + block_value: fees, + // From the engine API spec: + // + // > Client software **MAY** use any heuristics to decide whether to set + // `shouldOverrideBuilder` flag or not. If client software does not implement any + // heuristic this flag **SHOULD** be set to `false`. + // + // Spec: + // + should_override_builder: false, + blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), + } + } +} + /// Container type for all components required to build a payload. #[derive(Debug, Clone, PartialEq, Eq)] pub struct EthPayloadBuilderAttributes { diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 9998927061bcb..8ae5d3c5035e6 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -13,8 +13,8 @@ use reth_evm::{ ConfigureEvm, }; use reth_primitives::{ - BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Withdrawals, - MAINNET, U256, + BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Request, + Withdrawals, MAINNET, U256, }; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, @@ -98,6 +98,14 @@ where } } +/// Helper type for the output of executing a block. +#[derive(Debug, Clone)] +struct EthExecuteOutput { + receipts: Vec, + requests: Vec, + gas_used: u64, +} + /// Helper container type for EVM with chain spec. #[derive(Debug, Clone)] struct EthEvmExecutor { @@ -118,11 +126,11 @@ where /// # Note /// /// It does __not__ apply post-execution changes. - fn execute_pre_and_transactions( + fn execute_state_transitions( &self, block: &BlockWithSenders, mut evm: Evm<'_, Ext, &mut State>, - ) -> Result<(Vec, u64), BlockExecutionError> + ) -> Result where DB: Database, { @@ -182,7 +190,7 @@ where } drop(evm); - Ok((receipts, cumulative_gas_used)) + Ok(EthExecuteOutput { receipts, requests: vec![], gas_used: cumulative_gas_used }) } } @@ -250,22 +258,22 @@ where &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { + ) -> Result { // 1. prepare state on new block self.on_new_block(&block.header); // 2. configure the evm and execute let env = self.evm_env_for_block(&block.header, total_difficulty); - let (receipts, gas_used) = { + let output = { let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_pre_and_transactions(block, evm) + self.executor.execute_state_transitions(block, evm) }?; // 3. apply post execution changes self.post_execution(block, total_difficulty)?; - Ok((receipts, gas_used)) + Ok(output) } /// Apply settings before a new block is executed. @@ -333,12 +341,13 @@ where /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; + let EthExecuteOutput { receipts, requests, gas_used } = + self.execute_without_verification(block, total_difficulty)?; // NOTE: we need to merge keep the reverts for the bundle retention self.state.merge_transitions(BundleRetention::Reverts); - Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, gas_used }) + Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, requests, gas_used }) } } @@ -375,10 +384,10 @@ where fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, _gas_used) = + let EthExecuteOutput { receipts, requests, gas_used: _ } = self.executor.execute_without_verification(block, total_difficulty)?; - validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?; + validate_block_post_execution(block, self.executor.chain_spec(), &receipts, &[])?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.number); @@ -387,6 +396,9 @@ where // store receipts in the set self.batch_record.save_receipts(receipts)?; + // store requests in the set + self.batch_record.save_requests(requests); + if self.batch_record.first_block().is_none() { self.batch_record.set_first_block(block.number); } @@ -400,6 +412,7 @@ where BatchBlockExecutionOutput::new( self.executor.state.take_bundle(), self.batch_record.take_receipts(), + self.batch_record.take_requests(), self.batch_record.first_block().unwrap_or_default(), ) } @@ -473,6 +486,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + requests: None, }, senders: vec![], }, @@ -503,6 +517,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + requests: None, }, senders: vec![], }, @@ -563,7 +578,13 @@ mod tests { .execute( ( &BlockWithSenders { - block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, + block: Block { + header, + body: vec![], + ommers: vec![], + withdrawals: None, + requests: None, + }, senders: vec![], }, U256::ZERO, @@ -609,7 +630,13 @@ mod tests { executor .execute_without_verification( &BlockWithSenders { - block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, + block: Block { + header, + body: vec![], + ommers: vec![], + withdrawals: None, + requests: None, + }, senders: vec![], }, U256::ZERO, @@ -653,6 +680,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + requests: None, }, senders: vec![], }, @@ -674,7 +702,13 @@ mod tests { .execute_and_verify_one( ( &BlockWithSenders { - block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, + block: Block { + header, + body: vec![], + ommers: vec![], + withdrawals: None, + requests: None, + }, senders: vec![], }, U256::ZERO, @@ -733,6 +767,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + requests: None, }, senders: vec![], }, diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 9e5db6bc25de9..d94ebc968e16b 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -15,6 +15,7 @@ use reth_primitives::{ Address, ChainSpec, Head, Header, TransactionSigned, U256, }; use reth_revm::{Database, EvmBuilder}; + pub mod execute; /// Ethereum DAO hardfork state change data. diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index bc94dcd1753bc..6ac91ca8da4f2 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -27,4 +27,4 @@ parking_lot = { workspace = true, optional = true } parking_lot.workspace = true [features] -test-utils = ["dep:parking_lot"] \ No newline at end of file +test-utils = ["dep:parking_lot"] diff --git a/crates/evm/execution-types/src/bundle.rs b/crates/evm/execution-types/src/bundle.rs index 2bc7eda45ae30..2b823e731712a 100644 --- a/crates/evm/execution-types/src/bundle.rs +++ b/crates/evm/execution-types/src/bundle.rs @@ -32,7 +32,7 @@ pub struct BundleStateWithReceipts { // TODO(mattsse): unify the types, currently there's a cyclic dependency between impl From for BundleStateWithReceipts { fn from(value: BatchBlockExecutionOutput) -> Self { - let BatchBlockExecutionOutput { bundle, receipts, first_block } = value; + let BatchBlockExecutionOutput { bundle, receipts, requests: _, first_block } = value; Self { bundle, receipts, first_block } } } @@ -41,7 +41,8 @@ impl From for BundleStateWithReceipts { impl From for BatchBlockExecutionOutput { fn from(value: BundleStateWithReceipts) -> Self { let BundleStateWithReceipts { bundle, receipts, first_block } = value; - Self { bundle, receipts, first_block } + // TODO(alexey): add requests + Self { bundle, receipts, requests: Vec::default(), first_block } } } diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 6fdd6ebfd0cf6..f459eceb15e21 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,6 +1,8 @@ //! Traits for execution. -use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, Receipts, U256}; +use reth_primitives::{ + BlockNumber, BlockWithSenders, PruneModes, Receipt, Receipts, Request, Requests, U256, +}; use revm::db::BundleState; use revm_primitives::db::Database; @@ -96,6 +98,8 @@ pub struct BlockExecutionOutput { pub state: BundleState, /// All the receipts of the transactions in the block. pub receipts: Vec, + /// All the EIP-7685 requests of the transactions in the block. + pub requests: Vec, /// The total gas used by the block. pub gas_used: u64, } @@ -111,14 +115,26 @@ pub struct BatchBlockExecutionOutput { /// /// If receipt is None it means it is pruned. pub receipts: Receipts, + /// The collection of EIP-7685 requests. + /// Outer vector stores requests for each block sequentially. + /// The inner vector stores requests ordered by transaction number. + /// + /// A transaction may have zero or more requests, so the length of the inner vector is not + /// guaranteed to be the same as the number of transactions. + pub requests: Vec, /// First block of bundle state. pub first_block: BlockNumber, } impl BatchBlockExecutionOutput { /// Create Bundle State. - pub fn new(bundle: BundleState, receipts: Receipts, first_block: BlockNumber) -> Self { - Self { bundle, receipts, first_block } + pub fn new( + bundle: BundleState, + receipts: Receipts, + requests: Vec, + first_block: BlockNumber, + ) -> Self { + Self { bundle, receipts, requests, first_block } } } @@ -260,8 +276,13 @@ mod tests { let provider = TestExecutorProvider; let db = CacheDB::>::default(); let executor = provider.executor(db); - let block = - Block { header: Default::default(), body: vec![], ommers: vec![], withdrawals: None }; + let block = Block { + header: Default::default(), + body: vec![], + ommers: vec![], + withdrawals: None, + requests: None, + }; let block = BlockWithSenders::new(block, Default::default()).unwrap(); let _ = executor.execute(BlockExecutionInput::new(&block, U256::ZERO)); } diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index 910f9d08b2e82..d898165a59e6f 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -50,11 +50,12 @@ impl Executor for MockExecutorProvider { type Error = BlockExecutionError; fn execute(self, _: Self::Input<'_>) -> Result { - let BatchBlockExecutionOutput { bundle, receipts, .. } = + let BatchBlockExecutionOutput { bundle, receipts, requests, first_block: _ } = self.exec_results.lock().pop().unwrap(); Ok(BlockExecutionOutput { state: bundle, receipts: receipts.into_iter().flatten().flatten().collect(), + requests: requests.into_iter().flatten().collect(), gas_used: 0, }) } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index a806f2fa62e40..33139ab501d55 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -655,7 +655,12 @@ mod tests { .map(|block| { ( block.hash(), - BlockBody { transactions: block.body, ommers: block.ommers, withdrawals: None }, + BlockBody { + transactions: block.body, + ommers: block.ommers, + withdrawals: None, + requests: None, + }, ) }) .collect::>(); diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index 40e5301293d80..dadd4b3bdd7bf 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -23,6 +23,7 @@ pub(crate) fn zip_blocks<'a>( body: body.transactions, ommers: body.ommers, withdrawals: body.withdrawals, + requests: body.requests, }) } }) diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index f79b8744fe5b9..6dc07c0da91a3 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -234,6 +234,7 @@ impl FromReader for FileClient { transactions: block.body, ommers: block.ommers, withdrawals: block.withdrawals, + requests: block.requests, }, ); @@ -487,7 +488,7 @@ mod tests { headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_provider::test_utils::create_test_provider_factory; - use std::{mem, sync::Arc}; + use std::sync::Arc; #[tokio::test] async fn streams_bodies_from_buffer() { @@ -601,23 +602,11 @@ mod tests { async fn test_chunk_download_headers_from_file() { reth_tracing::init_test_tracing(); - // rig - - const MAX_BYTE_SIZE_HEADER: usize = 720; - // Generate some random blocks - let (file, headers, bodies) = generate_bodies_file(0..=14).await; - // now try to read them back in chunks. - for header in &headers { - assert_eq!(720, mem::size_of_val(header)) - } + let (file, headers, _) = generate_bodies_file(0..=14).await; // calculate min for chunk byte length range - let mut bodies_sizes = bodies.values().map(|body| body.size()).collect::>(); - bodies_sizes.sort(); - let max_block_size = MAX_BYTE_SIZE_HEADER + bodies_sizes.last().unwrap(); - let chunk_byte_len = rand::thread_rng().gen_range(max_block_size..=max_block_size + 10_000); - + let chunk_byte_len = rand::thread_rng().gen_range(1..=10_000); trace!(target: "downloaders::file::test", chunk_byte_len); // init reader @@ -628,7 +617,6 @@ mod tests { let mut local_header = headers.first().unwrap().clone(); // test - while let Some(client) = reader.next_chunk::().await.unwrap() { let sync_target = client.tip_header().unwrap(); let sync_target_hash = sync_target.hash(); diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index b84c5282baca4..97e30a02dd8b9 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -33,6 +33,7 @@ pub(crate) fn generate_bodies( transactions: block.body, ommers: block.ommers, withdrawals: block.withdrawals, + requests: block.requests, }, ) }) diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index d8c13062d2693..ae5cfee9e1e7e 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -296,6 +296,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None }, ]), }.encode(&mut data); @@ -330,6 +331,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None }, ]), }; @@ -430,9 +432,11 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None }, ], withdrawals: None, + requests: None } ]), }; @@ -504,9 +508,11 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None }, ], withdrawals: None, + requests: None } ]), }; diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 7cca3799570ae..10821dd804cfe 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -166,6 +166,7 @@ where transactions: block.body, ommers: block.ommers, withdrawals: block.withdrawals, + requests: block.requests, }; total_bytes += body.length(); diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 4e36f191c81ed..45c86cb647e8e 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -73,8 +73,12 @@ async fn test_get_body() { let blocks = res.unwrap().1; assert_eq!(blocks.len(), 1); - let expected = - BlockBody { transactions: block.body, ommers: block.ommers, withdrawals: None }; + let expected = BlockBody { + transactions: block.body, + ommers: block.ommers, + withdrawals: None, + requests: None, + }; assert_eq!(blocks[0], expected); } } diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 997ab74bb8961..54c4731fff919 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -313,22 +313,36 @@ fn ensure_valid_body_response( )) } - let withdrawals = match &block.withdrawals { - Some(withdrawals) => withdrawals.as_slice(), - None => &[][..], - }; - if let Some(header_withdrawals_root) = header.withdrawals_root { - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); - if withdrawals_root != header_withdrawals_root { - return Err(ConsensusError::BodyWithdrawalsRootDiff( - GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), - )) + match (header.withdrawals_root, &block.withdrawals) { + (Some(header_withdrawals_root), Some(withdrawals)) => { + let withdrawals = withdrawals.as_slice(); + let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); + if withdrawals_root != header_withdrawals_root { + return Err(ConsensusError::BodyWithdrawalsRootDiff( + GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), + )) + } + } + (None, None) => { + // this is ok because we assume the fork is not active in this case } - return Ok(()) + _ => return Err(ConsensusError::WithdrawalsRootUnexpected), } - if !withdrawals.is_empty() { - return Err(ConsensusError::WithdrawalsRootUnexpected) + match (header.requests_root, &block.requests) { + (Some(header_requests_root), Some(requests)) => { + let requests = requests.0.as_slice(); + let requests_root = reth_primitives::proofs::calculate_requests_root(requests); + if requests_root != header_requests_root { + return Err(ConsensusError::BodyRequestsRootDiff( + GotExpected { got: requests_root, expected: header_requests_root }.into(), + )) + } + } + (None, None) => { + // this is ok because we assume the fork is not active in this case + } + _ => return Err(ConsensusError::RequestsRootUnexpected), } Ok(()) diff --git a/crates/net/p2p/src/test_utils/generators.rs b/crates/net/p2p/src/test_utils/generators.rs index 9da1429ea5193..8056e90ff260d 100644 --- a/crates/net/p2p/src/test_utils/generators.rs +++ b/crates/net/p2p/src/test_utils/generators.rs @@ -157,6 +157,7 @@ pub fn random_block( body: transactions, ommers, withdrawals: None, + requests: None, } } diff --git a/crates/node-core/src/utils.rs b/crates/node-core/src/utils.rs index 963f863c5aae6..13ed6a2471ef4 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node-core/src/utils.rs @@ -119,6 +119,7 @@ where body: block.transactions, ommers: block.ommers, withdrawals: block.withdrawals, + requests: block.requests, }; validate_block_pre_execution(&block, &chain_spec)?; diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 09f9c1f38d163..a9f07f23cf0c1 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,13 +9,12 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] -use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ validate_block_pre_execution, validate_header_extradata, validate_header_standalone, }; use reth_primitives::{ - BlockWithSenders, ChainSpec, Header, Receipt, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, - U256, + BlockWithSenders, ChainSpec, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, }; use std::{sync::Arc, time::SystemTime}; @@ -111,8 +110,8 @@ impl Consensus for OptimismBeaconConsensus { fn validate_block_post_execution( &self, block: &BlockWithSenders, - receipts: &[Receipt], + input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, receipts) + validate_block_post_execution(block, &self.chain_spec, input.receipts) } } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 7df033dc55fe9..aa99b018c219e 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -355,7 +355,12 @@ where // NOTE: we need to merge keep the reverts for the bundle retention self.state.merge_transitions(BundleRetention::Reverts); - Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, gas_used }) + Ok(BlockExecutionOutput { + state: self.state.take_bundle(), + receipts, + requests: vec![], + gas_used, + }) } } @@ -420,6 +425,7 @@ where BatchBlockExecutionOutput::new( self.executor.state.take_bundle(), self.batch_record.take_receipts(), + self.batch_record.take_requests(), self.batch_record.first_block().unwrap_or_default(), ) } @@ -535,6 +541,7 @@ mod tests { body: vec![tx, tx_deposit], ommers: vec![], withdrawals: None, + requests: None, }, senders: vec![addr, addr], }, @@ -616,6 +623,7 @@ mod tests { body: vec![tx, tx_deposit], ommers: vec![], withdrawals: None, + requests: None, }, senders: vec![addr, addr], }, diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 82fbb06e921d4..bd0313087b089 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -280,6 +280,7 @@ mod tests { body: vec![l1_info_tx], ommers: Vec::default(), withdrawals: None, + requests: None, }; let l1_info: L1BlockInfo = extract_l1_info(&mock_block).unwrap(); @@ -301,6 +302,7 @@ mod tests { body: vec![l1_info_tx], ommers: Vec::default(), withdrawals: None, + requests: None, }; let l1_info: L1BlockInfo = extract_l1_info(&mock_block).unwrap(); diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 7382d2184dedb..a01a84d3c7633 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -7,7 +7,8 @@ use reth_optimism_payload_builder::{OptimismBuiltPayload, OptimismPayloadBuilder use reth_primitives::{ChainSpec, Hardfork}; use reth_rpc_types::{ engine::{ - ExecutionPayloadEnvelopeV2, OptimismExecutionPayloadEnvelopeV3, OptimismPayloadAttributes, + ExecutionPayloadEnvelopeV2, OptimismExecutionPayloadEnvelopeV3, + OptimismExecutionPayloadEnvelopeV4, OptimismPayloadAttributes, }, ExecutionPayloadV1, }; @@ -24,6 +25,7 @@ impl EngineTypes for OptimismEngineTypes { type ExecutionPayloadV1 = ExecutionPayloadV1; type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadV3 = OptimismExecutionPayloadEnvelopeV3; + type ExecutionPayloadV4 = OptimismExecutionPayloadEnvelopeV4; fn validate_version_specific_fields( chain_spec: &ChainSpec, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 2794ad96892a5..a6f3d83c4601f 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -34,13 +34,13 @@ pub struct OptimismPayloadBuilder { compute_pending_block: bool, /// The rollup's chain spec. chain_spec: Arc, - + /// The type responsible for creating the evm. evm_config: EvmConfig, } impl OptimismPayloadBuilder { /// OptimismPayloadBuilder constructor. - pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { + pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { compute_pending_block: true, chain_spec, evm_config } } @@ -217,9 +217,10 @@ where blob_gas_used, excess_blob_gas, parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, + requests_root: None, }; - let block = Block { header, body: vec![], ommers: vec![], withdrawals }; + let block = Block { header, body: vec![], ommers: vec![], withdrawals, requests: None }; let sealed_block = block.seal_slow(); Ok(OptimismBuiltPayload::new( @@ -577,10 +578,11 @@ where parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, blob_gas_used, excess_blob_gas, + requests_root: None, }; // seal the block - let block = Block { header, body: executed_txs, ommers: vec![], withdrawals }; + let block = Block { header, body: executed_txs, ommers: vec![], withdrawals, requests: None }; let sealed_block = block.seal_slow(); debug!(target: "payload_builder", ?sealed_block, "sealed built block"); diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 41a3eec9be102..738246bf8c4ce 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -13,10 +13,11 @@ use reth_primitives::{ }; use reth_rpc_types::engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, OptimismExecutionPayloadEnvelopeV3, - OptimismPayloadAttributes, PayloadId, + OptimismExecutionPayloadEnvelopeV4, OptimismPayloadAttributes, PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, + block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, + convert_block_to_payload_field_v2, }; use revm::primitives::HandlerCfg; use std::sync::Arc; @@ -272,6 +273,33 @@ impl From for OptimismExecutionPayloadEnvelopeV3 { } } } +impl From for OptimismExecutionPayloadEnvelopeV4 { + fn from(value: OptimismBuiltPayload) -> Self { + let OptimismBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; + + let parent_beacon_block_root = + if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp()) { + attributes.parent_beacon_block_root().unwrap_or(B256::ZERO) + } else { + B256::ZERO + }; + OptimismExecutionPayloadEnvelopeV4 { + execution_payload: block_to_payload_v4(block), + block_value: fees, + // From the engine API spec: + // + // > Client software **MAY** use any heuristics to decide whether to set + // `shouldOverrideBuilder` flag or not. If client software does not implement any + // heuristic this flag **SHOULD** be set to `false`. + // + // Spec: + // + should_override_builder: false, + blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), + parent_beacon_block_root, + } + } +} /// Generates the payload id for the configured payload from the [OptimismPayloadAttributes]. /// diff --git a/crates/payload/ethereum/Cargo.toml b/crates/payload/ethereum/Cargo.toml index 6598023d7d391..245bc8ebf1aec 100644 --- a/crates/payload/ethereum/Cargo.toml +++ b/crates/payload/ethereum/Cargo.toml @@ -19,9 +19,11 @@ reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true +reth-evm.workspace = true +reth-evm-ethereum.workspace = true # ethereum revm.workspace = true # misc -tracing.workspace = true \ No newline at end of file +tracing.workspace = true diff --git a/crates/payload/ethereum/src/lib.rs b/crates/payload/ethereum/src/lib.rs index e34287f76510a..5822830566005 100644 --- a/crates/payload/ethereum/src/lib.rs +++ b/crates/payload/ethereum/src/lib.rs @@ -13,17 +13,21 @@ use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, pre_block_beacon_root_contract_call, BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig, WithdrawalsOutcome, }; +use reth_evm::ConfigureEvm; +use reth_evm_ethereum::EthEvmConfig; use reth_payload_builder::{ error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes, }; use reth_primitives::{ constants::{ - eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, + eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_ROOT_HASH, + EMPTY_TRANSACTIONS, }, eip4844::calculate_excess_blob_gas, proofs, revm::env::tx_env_with_recovered, - Block, Header, IntoRecoveredTransaction, Receipt, Receipts, EMPTY_OMMER_ROOT_HASH, U256, + Block, Header, IntoRecoveredTransaction, Receipt, Receipts, Requests, EMPTY_OMMER_ROOT_HASH, + U256, }; use reth_provider::{BundleStateWithReceipts, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; @@ -36,13 +40,29 @@ use revm::{ use tracing::{debug, trace, warn}; /// Ethereum payload builder -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -#[non_exhaustive] -pub struct EthereumPayloadBuilder; +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct EthereumPayloadBuilder { + /// The type responsible for creating the evm. + evm_config: EvmConfig, +} + +impl EthereumPayloadBuilder { + /// EthereumPayloadBuilder constructor. + pub const fn new(evm_config: EvmConfig) -> Self { + Self { evm_config } + } +} + +impl Default for EthereumPayloadBuilder { + fn default() -> Self { + Self::new(EthEvmConfig::default()) + } +} // Default implementation of [PayloadBuilder] for unit type -impl PayloadBuilder for EthereumPayloadBuilder +impl PayloadBuilder for EthereumPayloadBuilder where + EvmConfig: ConfigureEvm, Client: StateProviderFactory, Pool: TransactionPool, { @@ -53,7 +73,7 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - default_ethereum_payload_builder(args) + default_ethereum_payload_builder(self.evm_config.clone(), args) } fn build_empty_payload( @@ -122,6 +142,14 @@ where err })?; + // Calculate the requests and the requests root. + let (requests, requests_root) = + if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { + (Some(Requests::default()), Some(EMPTY_ROOT_HASH)) + } else { + (None, None) + }; + // merge all transitions into bundle state, this would apply the withdrawal balance // changes and 4788 contract call db.merge_transitions(BundleRetention::PlainState); @@ -175,9 +203,10 @@ where blob_gas_used, excess_blob_gas, parent_beacon_block_root: attributes.parent_beacon_block_root, + requests_root, }; - let block = Block { header, body: vec![], ommers: vec![], withdrawals }; + let block = Block { header, body: vec![], ommers: vec![], withdrawals, requests }; let sealed_block = block.seal_slow(); Ok(EthBuiltPayload::new(attributes.payload_id(), sealed_block, U256::ZERO)) @@ -190,10 +219,12 @@ where /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] -pub fn default_ethereum_payload_builder( +pub fn default_ethereum_payload_builder( + evm_config: EvmConfig, args: BuildArguments, ) -> Result, PayloadBuilderError> where + EvmConfig: ConfigureEvm, Client: StateProviderFactory, Pool: TransactionPool, { @@ -274,15 +305,14 @@ where } } + let env = EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + tx_env_with_recovered(&tx), + ); + // Configure the environment for the block. - let mut evm = revm::Evm::builder() - .with_db(&mut db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - tx_env_with_recovered(&tx), - )) - .build(); + let mut evm = evm_config.evm_with_env(&mut db, env); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -404,6 +434,14 @@ where blob_gas_used = Some(sum_blob_gas_used); } + // todo: compute requests and requests root + let (requests, requests_root) = + if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { + (Some(Requests::default()), Some(EMPTY_ROOT_HASH)) + } else { + (None, None) + }; + let header = Header { parent_hash: parent_block.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, @@ -425,10 +463,11 @@ where parent_beacon_block_root: attributes.parent_beacon_block_root, blob_gas_used, excess_blob_gas, + requests_root, }; // seal the block - let block = Block { header, body: executed_txs, ommers: vec![], withdrawals }; + let block = Block { header, body: executed_txs, ommers: vec![], withdrawals, requests }; let sealed_block = block.seal_slow(); debug!(target: "payload_builder", ?sealed_block, "sealed built block"); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index f44db3cae1e2e..8660eb5ac5d58 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -22,6 +22,7 @@ revm-primitives = { workspace = true, features = ["serde"] } # ethereum alloy-chains = { workspace = true, features = ["serde", "rlp"] } +alloy-consensus = { workspace = true, features = ["arbitrary", "serde"] } alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-trie = { workspace = true, features = ["serde"] } @@ -91,6 +92,7 @@ pprof = { workspace = true, features = [ "frame-pointer", "criterion", ] } +secp256k1.workspace = true [features] default = ["c-kzg", "zstd-codec"] diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 2cdaee72db4de..bd68860d26e5e 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -47,6 +47,9 @@ impl TryFrom for Block { body, ommers: Default::default(), withdrawals: block.withdrawals.map(Into::into), + // todo(onbjerg): we don't know if this is added to rpc yet, so for now we leave it as + // empty. + requests: None, }) } } @@ -93,6 +96,8 @@ impl TryFrom for Header { timestamp: header.timestamp, transactions_root: header.transactions_root, withdrawals_root: header.withdrawals_root, + // TODO: requests_root: header.requests_root, + requests_root: None, }) } } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 467bca8166ec2..b37d5146fae4c 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,5 +1,5 @@ use crate::{ - Address, Bytes, GotExpected, Header, SealedHeader, TransactionSigned, + Address, Bytes, GotExpected, Header, Requests, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, B256, }; use alloy_rlp::{RlpDecodable, RlpEncodable}; @@ -13,6 +13,16 @@ pub use alloy_eips::eip1898::{ BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, }; +// HACK(onbjerg): we need this to always set `requests` to `None` since we might otherwise generate +// a block with `None` withdrawals and `Some` requests, in which case we end up trying to decode the +// requests as withdrawals +#[cfg(any(feature = "arbitrary", test))] +prop_compose! { + pub fn empty_requests_strategy()(_ in 0..1) -> Option { + None + } +} + /// Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. @@ -45,6 +55,9 @@ pub struct Block { proptest(strategy = "proptest::option::of(proptest::arbitrary::any::())") )] pub withdrawals: Option, + /// Block requests. + #[cfg_attr(any(test, feature = "arbitrary"), proptest(strategy = "empty_requests_strategy()"))] + pub requests: Option, } impl Block { @@ -55,6 +68,7 @@ impl Block { body: self.body, ommers: self.ommers, withdrawals: self.withdrawals, + requests: self.requests, } } @@ -67,6 +81,7 @@ impl Block { body: self.body, ommers: self.ommers, withdrawals: self.withdrawals, + requests: self.requests, } } @@ -257,14 +272,17 @@ pub struct SealedBlock { proptest(strategy = "proptest::option::of(proptest::arbitrary::any::())") )] pub withdrawals: Option, + /// Block requests. + #[cfg_attr(any(test, feature = "arbitrary"), proptest(strategy = "empty_requests_strategy()"))] + pub requests: Option, } impl SealedBlock { /// Create a new sealed block instance using the sealed header and block body. #[inline] pub fn new(header: SealedHeader, body: BlockBody) -> Self { - let BlockBody { transactions, ommers, withdrawals } = body; - Self { header, body: transactions, ommers, withdrawals } + let BlockBody { transactions, ommers, withdrawals, requests } = body; + Self { header, body: transactions, ommers, withdrawals, requests } } /// Header hash. @@ -288,6 +306,7 @@ impl SealedBlock { transactions: self.body, ommers: self.ommers, withdrawals: self.withdrawals, + requests: self.requests, }, ) } @@ -343,6 +362,7 @@ impl SealedBlock { body: self.body, ommers: self.ommers, withdrawals: self.withdrawals, + requests: self.requests, } } @@ -514,16 +534,21 @@ pub struct BlockBody { pub ommers: Vec
, /// Withdrawals in the block. pub withdrawals: Option, + /// Requests in the block. + #[cfg_attr(any(test, feature = "arbitrary"), proptest(strategy = "empty_requests_strategy()"))] + pub requests: Option, } impl BlockBody { /// Create a [`Block`] from the body and its header. + // todo(onbjerg): should this not just take `self`? its used in one place pub fn create_block(&self, header: Header) -> Block { Block { header, body: self.transactions.clone(), ommers: self.ommers.clone(), withdrawals: self.withdrawals.clone(), + requests: self.requests.clone(), } } @@ -543,6 +568,12 @@ impl BlockBody { self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) } + /// Calculate the requests root for the block body, if requests exist. If there are no + /// requests, this will return `None`. + pub fn calculate_requests_root(&self) -> Option { + self.requests.as_ref().map(|r| crate::proofs::calculate_requests_root(&r.0)) + } + /// Calculates a heuristic for the in-memory size of the [BlockBody]. #[inline] pub fn size(&self) -> usize { @@ -558,7 +589,12 @@ impl BlockBody { impl From for BlockBody { fn from(block: Block) -> Self { - Self { transactions: block.body, ommers: block.ommers, withdrawals: block.withdrawals } + Self { + transactions: block.body, + ommers: block.ommers, + withdrawals: block.withdrawals, + requests: block.requests, + } } } @@ -602,6 +638,9 @@ pub fn generate_valid_header( header.parent_beacon_block_root = None; } + // todo(onbjerg): adjust this for eip-7589 + header.requests_root = None; + header } diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 98ec3d972dcc0..3c166b2c7206e 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -1,5 +1,8 @@ use crate::{ - constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, EMPTY_WITHDRAWALS}, + constants::{ + EIP1559_INITIAL_BASE_FEE, EMPTY_RECEIPTS, EMPTY_ROOT_HASH, EMPTY_TRANSACTIONS, + EMPTY_WITHDRAWALS, + }, holesky_nodes, net::{goerli_nodes, mainnet_nodes, sepolia_nodes}, proofs::state_root_ref_unhashed, @@ -612,6 +615,13 @@ impl ChainSpec { (None, None, None) }; + // If Prague is activated at genesis we set requests root to an empty trie root. + let requests_root = if self.is_prague_active_at_timestamp(self.genesis.timestamp) { + Some(EMPTY_ROOT_HASH) + } else { + None + }; + Header { parent_hash: B256::ZERO, number: 0, @@ -633,6 +643,7 @@ impl ChainSpec { parent_beacon_block_root, blob_gas_used, excess_blob_gas, + requests_root, } } diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index db75e1b6d4c0c..b9574785af3df 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -50,7 +50,8 @@ pub struct Header { /// of each transaction in the transactions list portion of the block; formally He. pub receipts_root: B256, /// The Keccak 256-bit hash of the withdrawals list portion of this block. - /// + /// + /// See [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895). pub withdrawals_root: Option, /// The Bloom filter composed from indexable information (logger address and log topics) /// contained in each log entry from the receipt of each transaction in the transactions list; @@ -98,6 +99,11 @@ pub struct Header { /// /// The beacon roots contract handles root storage, enhancing Ethereum's functionalities. pub parent_beacon_block_root: Option, + /// The Keccak 256-bit hash of the root node of the trie structure populated with each + /// [EIP-7685] request in the block body. + /// + /// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685 + pub requests_root: Option, /// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or /// fewer; formally Hx. pub extra_data: Bytes, @@ -126,6 +132,7 @@ impl Default for Header { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, } } } @@ -343,6 +350,10 @@ impl Header { length += parent_beacon_block_root.length(); } + if let Some(requests_root) = self.requests_root { + length += requests_root.length(); + } + length } } @@ -396,15 +407,22 @@ impl Encodable for Header { U256::from(*excess_blob_gas).encode(out); } - // Encode parent beacon block root. If new fields are added, the above pattern will need to + // Encode parent beacon block root. + if let Some(ref parent_beacon_block_root) = self.parent_beacon_block_root { + parent_beacon_block_root.encode(out); + } + + // Encode EIP-7685 requests root + // + // If new fields are added, the above pattern will need to // be repeated and placeholders added. Otherwise, it's impossible to tell _which_ // fields are missing. This is mainly relevant for contrived cases where a header is // created at random, for example: // * A header is created with a withdrawals root, but no base fee. Shanghai blocks are // post-London, so this is technically not valid. However, a tool like proptest would // generate a block like this. - if let Some(ref parent_beacon_block_root) = self.parent_beacon_block_root { - parent_beacon_block_root.encode(out); + if let Some(ref requests_root) = self.requests_root { + requests_root.encode(out); } } @@ -444,6 +462,7 @@ impl Decodable for Header { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; if started_len - buf.len() < rlp_head.payload_length { this.base_fee_per_gas = Some(u64::decode(buf)?); @@ -463,7 +482,14 @@ impl Decodable for Header { this.excess_blob_gas = Some(u64::decode(buf)?); } - // Decode parent beacon block root. If new fields are added, the above pattern will need to + // Decode parent beacon block root. + if started_len - buf.len() < rlp_head.payload_length { + this.parent_beacon_block_root = Some(B256::decode(buf)?); + } + + // Decode requests root. + // + // If new fields are added, the above pattern will need to // be repeated and placeholders decoded. Otherwise, it's impossible to tell _which_ // fields are missing. This is mainly relevant for contrived cases where a header is // created at random, for example: @@ -471,7 +497,7 @@ impl Decodable for Header { // post-London, so this is technically not valid. However, a tool like proptest would // generate a block like this. if started_len - buf.len() < rlp_head.payload_length { - this.parent_beacon_block_root = Some(B256::decode(buf)?); + this.requests_root = Some(B256::decode(buf)?); } let consumed = started_len - buf.len(); @@ -1059,6 +1085,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None }; assert_eq!(header.hash_slow(), expected_hash); } @@ -1183,6 +1210,7 @@ mod tests { blob_gas_used: Some(0x020000), excess_blob_gas: Some(0), parent_beacon_block_root: None, + requests_root: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); @@ -1228,6 +1256,7 @@ mod tests { parent_beacon_block_root: None, blob_gas_used: Some(0), excess_blob_gas: Some(0x1600000), + requests_root: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index b10582cf9e522..7681ddb9b2874 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -39,12 +39,12 @@ pub mod op_mainnet; pub mod proofs; mod prune; mod receipt; +mod request; /// Helpers for working with revm pub mod revm; pub mod stage; pub use reth_static_file_types as static_file; mod storage; -/// Helpers for working with transactions pub mod transaction; pub mod trie; mod withdrawal; @@ -85,6 +85,7 @@ pub use prune::{ pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; +pub use request::Requests; pub use static_file::StaticFileSegment; pub use storage::StorageEntry; @@ -109,6 +110,7 @@ pub use withdrawal::{Withdrawal, Withdrawals}; // Re-exports pub use self::ruint::UintTryTo; +pub use alloy_consensus::Request; pub use alloy_primitives::{ self, address, b256, bloom, bytes, bytes::{Buf, BufMut, BytesMut}, diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index b16fa68793820..e8015a2766e2e 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -4,9 +4,10 @@ use crate::{ constants::EMPTY_OMMER_ROOT_HASH, keccak256, trie::{HashBuilder, Nibbles, TrieAccount}, - Address, Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned, Withdrawal, - B256, U256, + Address, Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Request, TransactionSigned, + Withdrawal, B256, U256, }; +use alloy_eips::eip7685::Encodable7685; use alloy_rlp::Encodable; use itertools::Itertools; @@ -69,6 +70,13 @@ pub fn calculate_receipt_root(receipts: &[ReceiptWithBloom]) -> B256 { ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) } +/// Calculate [EIP-7685](https://eips.ethereum.org/EIPS/eip-7685) requests root. +/// +/// NOTE: The requests are encoded as `id + request` +pub fn calculate_requests_root(requests: &[Request]) -> B256 { + ordered_trie_root_with_encoder(requests, |item, buf| item.encode_7685(buf)) +} + /// Calculates the receipt root for a header. #[cfg(feature = "optimism")] pub fn calculate_receipt_root_optimism( diff --git a/crates/primitives/src/request.rs b/crates/primitives/src/request.rs new file mode 100644 index 0000000000000..ef0f75d544a35 --- /dev/null +++ b/crates/primitives/src/request.rs @@ -0,0 +1,55 @@ +//! EIP-7685 requests. + +use crate::Request; +use alloy_eips::eip7685::{Decodable7685, Encodable7685}; +use alloy_rlp::{Decodable, Encodable}; +use reth_codecs::{main_codec, Compact}; +use revm_primitives::Bytes; + +/// A list of EIP-7685 requests. +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default, Hash)] +pub struct Requests(pub Vec); + +impl From> for Requests { + fn from(requests: Vec) -> Self { + Self(requests) + } +} + +impl IntoIterator for Requests { + type Item = Request; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl Encodable for Requests { + fn encode(&self, out: &mut dyn bytes::BufMut) { + let mut h = alloy_rlp::Header { list: true, payload_length: 0 }; + + let mut encoded = Vec::new(); + for req in self.0.iter() { + let encoded_req = req.encoded_7685(); + h.payload_length += encoded_req.len(); + encoded.push(Bytes::from(encoded_req)); + } + + h.encode(out); + for req in encoded { + req.encode(out); + } + } +} + +impl Decodable for Requests { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + Ok( as Decodable>::decode(buf)? + .into_iter() + .map(|bytes| Request::decode_7685(&mut bytes.as_ref())) + .collect::, alloy_eips::eip7685::Eip7685Error>>() + .map(Self)?) + } +} diff --git a/crates/primitives/src/revm/mod.rs b/crates/primitives/src/revm/mod.rs index 311c5e2b50071..1d527d28ca55d 100644 --- a/crates/primitives/src/revm/mod.rs +++ b/crates/primitives/src/revm/mod.rs @@ -1,3 +1,5 @@ +//! Helpers for working with revm. + /// The `compat` module contains utility functions that perform conversions between reth and revm, /// compare analogous types from the two implementations, and calculate intrinsic gas usage. /// diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index d481bed166f2a..f53405d08a45a 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,3 +1,5 @@ +//! Transaction types. + #[cfg(any(feature = "arbitrary", feature = "zstd-codec"))] use crate::compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}; use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, TxKind, B256, U256}; diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 77f747cd9a85c..4eb29eee74b50 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -3,7 +3,8 @@ use crate::{precompile::Address, primitives::alloy_primitives::BlockNumber}; use reth_execution_errors::BlockExecutionError; use reth_primitives::{ - PruneMode, PruneModes, PruneSegmentError, Receipt, Receipts, MINIMUM_PRUNING_DISTANCE, + PruneMode, PruneModes, PruneSegmentError, Receipt, Receipts, Request, Requests, + MINIMUM_PRUNING_DISTANCE, }; use revm::db::states::bundle_state::BundleRetention; use std::time::Duration; @@ -23,6 +24,13 @@ pub struct BlockBatchRecord { /// /// If receipt is None it means it is pruned. receipts: Receipts, + /// The collection of EIP-7685 requests. + /// Outer vector stores requests for each block sequentially. + /// The inner vector stores requests ordered by transaction number. + /// + /// A transaction may have zero or more requests, so the length of the inner vector is not + /// guaranteed to be the same as the number of transactions. + requests: Vec, /// Memoized address pruning filter. /// Empty implies that there is going to be addresses to include in the filter in a future /// block. None means there isn't any kind of configuration. @@ -75,6 +83,11 @@ impl BlockBatchRecord { std::mem::take(&mut self.receipts) } + /// Returns all recorded requests. + pub fn take_requests(&mut self) -> Vec { + std::mem::take(&mut self.requests) + } + /// Returns the [BundleRetention] for the given block based on the configured prune modes. pub fn bundle_retention(&self, block_number: BlockNumber) -> BundleRetention { if self.tip.map_or(true, |tip| { @@ -155,6 +168,11 @@ impl BlockBatchRecord { Ok(()) } + + /// Save EIP-7685 requests to the executor. + pub fn save_requests(&mut self, requests: Vec) { + self.requests.push(requests.into()); + } } /// Block execution statistics. Contains duration of each step of block execution. diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index be20a4fbe088d..e858f62df0d54 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -9,8 +9,8 @@ use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, B256 use reth_rpc_types::{ engine::{ ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, - ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, - TransitionConfiguration, + ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, + PayloadStatus, TransitionConfiguration, }, state::StateOverride, BlockOverrides, Filter, Log, RichBlock, SyncStatus, TransactionRequest, @@ -46,6 +46,17 @@ pub trait EngineApi { parent_beacon_block_root: B256, ) -> RpcResult; + /// Post Prague payload handler + /// + /// See also + #[method(name = "newPayloadV4")] + async fn new_payload_v4( + &self, + payload: ExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult; + /// See also /// /// Caution: This should not accept the `withdrawals` field in the payload attributes. @@ -116,6 +127,16 @@ pub trait EngineApi { #[method(name = "getPayloadV3")] async fn get_payload_v3(&self, payload_id: PayloadId) -> RpcResult; + /// Post Prague payload handler. + /// + /// See also + /// + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + #[method(name = "getPayloadV4")] + async fn get_payload_v4(&self, payload_id: PayloadId) -> RpcResult; + /// See also #[method(name = "getPayloadBodiesByHashV1")] async fn get_payload_bodies_by_hash_v1( diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 8d51884d59887..c3713f731fad3 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -24,6 +24,10 @@ use std::{sync::Arc, time::Instant}; use tokio::sync::oneshot; use tracing::{trace, warn}; +/// The list of additional V4 caps +// TODO(mattsse): move to alloy +const V4_CAPABILITIES: [&str; 2] = ["engine_getPayloadV4", "engine_newPayloadV4"]; + /// The Engine API response sender. pub type EngineApiSender = oneshot::Sender>; @@ -331,7 +335,7 @@ where pub async fn get_payload_v4( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -508,7 +512,7 @@ where /// validated according to the Shanghai rules, as well as the validity changes from cancun: /// /// - /// * If the version is [EngineApiMessageVersion::V3], then the payload attributes will be + /// * If the version above [EngineApiMessageVersion::V3], then the payload attributes will be /// validated according to the Cancun rules. async fn validate_and_execute_forkchoice( &self, @@ -595,6 +599,22 @@ where Ok(res?) } + async fn new_payload_v4( + &self, + payload: ExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); + let start = Instant::now(); + let res = + EngineApi::new_payload_v4(self, payload, versioned_hashes, parent_beacon_block_root) + .await; + self.inner.metrics.latency.new_payload_v4.record(start.elapsed()); + self.inner.metrics.new_payload_response.update_response_metrics(&res); + Ok(res?) + } + /// Handler for `engine_forkchoiceUpdatedV1` /// See also /// @@ -708,6 +728,26 @@ where Ok(res?) } + /// Handler for `engine_getPayloadV4` + /// + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. + /// + /// See also + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadV4"); + let start = Instant::now(); + let res = EngineApi::get_payload_v4(self, payload_id).await; + self.inner.metrics.latency.get_payload_v4.record(start.elapsed()); + Ok(res?) + } + /// Handler for `engine_getPayloadBodiesByHashV1` /// See also async fn get_payload_bodies_by_hash_v1( @@ -777,7 +817,7 @@ where /// Handler for `engine_exchangeCapabilitiesV1` /// See also async fn exchange_capabilities(&self, _capabilities: Vec) -> RpcResult> { - Ok(CAPABILITIES.into_iter().map(str::to_owned).collect()) + Ok(CAPABILITIES.into_iter().chain(V4_CAPABILITIES.into_iter()).map(str::to_owned).collect()) } } diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index d63611f7da584..b8679c84e8a7c 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -24,6 +24,8 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) new_payload_v2: Histogram, /// Latency for `engine_newPayloadV3` pub(crate) new_payload_v3: Histogram, + /// Latency for `engine_newPayloadV4` + pub(crate) new_payload_v4: Histogram, /// Latency for `engine_forkchoiceUpdatedV1` pub(crate) fork_choice_updated_v1: Histogram, /// Latency for `engine_forkchoiceUpdatedV2` @@ -36,6 +38,8 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) get_payload_v2: Histogram, /// Latency for `engine_getPayloadV3` pub(crate) get_payload_v3: Histogram, + /// Latency for `engine_getPayloadV4` + pub(crate) get_payload_v4: Histogram, /// Latency for `engine_getPayloadBodiesByRangeV1` pub(crate) get_payload_bodies_by_range_v1: Histogram, /// Latency for `engine_getPayloadBodiesByHashV1` diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index e2b39691afea3..a68eef6393bc7 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -28,6 +28,7 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi body: transformed.body, ommers: transformed.ommers, withdrawals: transformed.withdrawals, + requests: transformed.requests, }) .0 } diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 1c2a44ebb6b18..d3840b24e4ee2 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -90,9 +90,11 @@ pub fn from_block_full( )) } -/// Converts from a [reth_primitives::SealedHeader] to a [reth_rpc_types::BlockNumberOrTag] +/// Converts from a [reth_primitives::SealedHeader] to a [reth_rpc_types::Header] /// -/// Note: This does not set the `totalDifficulty` field. +/// # Note +/// +/// This does not set the `totalDifficulty` field. pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) -> Header { let (header, hash) = primitive_header.split(); let PrimitiveHeader { @@ -116,6 +118,7 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) blob_gas_used, excess_blob_gas, parent_beacon_block_root, + requests_root, } = header; Header { @@ -141,7 +144,7 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) excess_blob_gas: excess_blob_gas.map(u128::from), parent_beacon_block_root, total_difficulty: None, - requests_root: None, + requests_root, } } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 9f968a1a4ebd6..e68cee2bbc43f 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -4,7 +4,7 @@ use reth_primitives::{ constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE, MIN_PROTOCOL_BASE_FEE_U256}, proofs::{self}, - Block, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawals, B256, U256, + Block, Header, Request, SealedBlock, TransactionSigned, UintTryTo, Withdrawals, B256, U256, }; use reth_rpc_types::engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, @@ -55,6 +55,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result Result Result Result { - // this performs the same conversion as the underlying V3 payload. - // - // the new request lists (`deposit_requests`, `withdrawal_requests`) are EL -> CL only, so we do - // not do anything special here to handle them - try_payload_v3_to_block(payload.payload_inner) + let ExecutionPayloadV4 { payload_inner, deposit_requests, withdrawal_requests } = payload; + let mut block = try_payload_v3_to_block(payload_inner)?; + + // attach requests with asc type identifiers + let requests = deposit_requests + .into_iter() + .map(Request::DepositRequest) + .chain(withdrawal_requests.into_iter().map(Request::WithdrawalRequest)) + .collect::>(); + + let requests_root = proofs::calculate_requests_root(&requests); + block.header.requests_root = Some(requests_root); + block.requests = Some(requests.into()); + + Ok(block) } -/// Converts [SealedBlock] to [ExecutionPayload], returning additional data (the parent beacon block -/// root) if the block is a V3 payload +/// Converts [SealedBlock] to [ExecutionPayload] pub fn block_to_payload(value: SealedBlock) -> (ExecutionPayload, Option) { - // todo(onbjerg): check for requests_root here and return payload v4 - if value.header.parent_beacon_block_root.is_some() { + if value.header.requests_root.is_some() { + (ExecutionPayload::V4(block_to_payload_v4(value)), None) + } else if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 let (payload, beacon_block_root) = block_to_payload_v3(value); (ExecutionPayload::V3(payload), beacon_block_root) @@ -192,6 +209,33 @@ pub fn block_to_payload_v3(value: SealedBlock) -> (ExecutionPayloadV3, Option ExecutionPayloadV4 { + let (deposit_requests, withdrawal_requests) = + value.requests.take().unwrap_or_default().into_iter().fold( + (Vec::new(), Vec::new()), + |(mut deposits, mut withdrawals), request| { + match request { + Request::DepositRequest(r) => { + deposits.push(r); + } + Request::WithdrawalRequest(r) => { + withdrawals.push(r); + } + _ => {} + }; + + (deposits, withdrawals) + }, + ); + + ExecutionPayloadV4 { + deposit_requests, + withdrawal_requests, + payload_inner: block_to_payload_v3(value).0, + } +} + /// Converts [SealedBlock] to [ExecutionPayloadFieldV2] pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayloadFieldV2 { // if there are withdrawals, return V2 diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index dbb148981bf7c..b58949a67548b 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -8,8 +8,9 @@ use reth_primitives::{ revm_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EVMError, Env, InvalidTransaction, ResultAndState, SpecId, }, + trie::EMPTY_ROOT_HASH, Block, BlockId, BlockNumberOrTag, ChainSpec, Header, IntoRecoveredTransaction, Receipt, - Receipts, SealedBlockWithSenders, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, + Receipts, Requests, SealedBlockWithSenders, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, }; use reth_provider::{BundleStateWithReceipts, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ @@ -240,6 +241,15 @@ impl PendingBlockEnv { let blob_gas_used = if cfg.handler_cfg.spec_id >= SpecId::CANCUN { Some(sum_blob_gas_used) } else { None }; + // note(onbjerg): the rpc spec has not been changed to include requests, so for now we just + // set these to empty + let (requests, requests_root) = + if chain_spec.is_prague_active_at_timestamp(block_env.timestamp.to::()) { + (Some(Requests::default()), Some(EMPTY_ROOT_HASH)) + } else { + (None, None) + }; + let header = Header { parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, @@ -261,10 +271,11 @@ impl PendingBlockEnv { excess_blob_gas: block_env.get_blob_excess_gas(), extra_data: Default::default(), parent_beacon_block_root, + requests_root, }; // seal the block - let block = Block { header, body: executed_txs, ommers: vec![], withdrawals }; + let block = Block { header, body: executed_txs, ommers: vec![], withdrawals, requests }; Ok(SealedBlockWithSenders { block: block.seal_slow(), senders }) } } diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index bce56880a9537..cc5a291fed50c 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -127,6 +127,7 @@ impl Stage for BodyStage { let mut tx_block_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; + let mut requests_cursor = tx.cursor_write::()?; // Get id for the next tx_num of zero if there are no transactions. let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); @@ -238,6 +239,13 @@ impl Stage for BodyStage { .append(block_number, StoredBlockWithdrawals { withdrawals })?; } } + + // Write requests if any + if let Some(requests) = block.requests { + if !requests.0.is_empty() { + requests_cursor.append(block_number, requests)?; + } + } } BlockResponse::Empty(_) => {} }; @@ -273,6 +281,7 @@ impl Stage for BodyStage { let mut body_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; + let mut requests_cursor = tx.cursor_write::()?; // Cursors to unwind transitions let mut tx_block_cursor = tx.cursor_write::()?; @@ -292,6 +301,11 @@ impl Stage for BodyStage { withdrawals_cursor.delete_current()?; } + // Delete the requests entry if any + if requests_cursor.seek_exact(number)?.is_some() { + requests_cursor.delete_current()?; + } + // Delete all transaction to block values. if !block_meta.is_empty() && tx_block_cursor.seek_exact(block_meta.last_tx_num())?.is_some() @@ -663,6 +677,7 @@ mod tests { transactions: block.body.clone(), ommers: block.ommers.clone(), withdrawals: block.withdrawals.clone(), + requests: block.requests.clone(), }, ) } @@ -941,6 +956,7 @@ mod tests { body: body.transactions, ommers: body.ommers, withdrawals: body.withdrawals, + requests: body.requests, })); } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index d3bcfba171191..2c03e34a434fa 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -273,7 +273,8 @@ where } } let time = Instant::now(); - let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); + let BatchBlockExecutionOutput { bundle, receipts, requests: _, first_block } = + executor.finalize(); let state = BundleStateWithReceipts::new(bundle, receipts, first_block); let write_preparation_duration = time.elapsed(); diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index cdf33b40f2f0a..7590f9d066aac 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -516,7 +516,7 @@ mod tests { accounts.iter().map(|(addr, acc)| (*addr, (*acc, std::iter::empty()))), )?; - let SealedBlock { header, body, ommers, withdrawals } = random_block( + let SealedBlock { header, body, ommers, withdrawals, requests } = random_block( &mut rng, stage_progress, preblocks.last().map(|b| b.hash()), @@ -531,7 +531,8 @@ mod tests { .into_iter() .map(|(address, account)| (address, (account, std::iter::empty()))), ); - let sealed_head = SealedBlock { header: header.seal_slow(), body, ommers, withdrawals }; + let sealed_head = + SealedBlock { header: header.seal_slow(), body, ommers, withdrawals, requests }; let head_hash = sealed_head.hash(); let mut blocks = vec![sealed_head]; diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 958ccf9174eaa..e370233d1c7d7 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -15,8 +15,9 @@ workspace = true reth-codecs-derive = { path = "./derive", default-features = false } # eth +alloy-consensus = { workspace = true, optional = true } alloy-eips = { workspace = true, optional = true } -alloy-genesis = { workspace = true, optional = true } +alloy-genesis = { workspace = true, optional = true } alloy-primitives.workspace = true # misc @@ -25,7 +26,10 @@ modular-bitfield = { workspace = true, optional = true } serde.workspace = true [dev-dependencies] -alloy-eips = { workspace = true, default-features = false, features = ["arbitrary", "serde"] } +alloy-eips = { workspace = true, default-features = false, features = [ + "arbitrary", + "serde", +] } alloy-primitives = { workspace = true, features = ["arbitrary", "serde"] } test-fuzz.workspace = true serde_json.workspace = true @@ -37,5 +41,10 @@ proptest-derive.workspace = true [features] default = ["std", "alloy"] std = ["alloy-primitives/std", "bytes/std"] -alloy = ["dep:alloy-eips", "dep:alloy-genesis", "dep:modular-bitfield"] +alloy = [ + "dep:alloy-consensus", + "dep:alloy-eips", + "dep:alloy-genesis", + "dep:modular-bitfield", +] optimism = ["reth-codecs-derive/optimism"] diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 664ab26077cde..b36f4c94312b6 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,5 +1,6 @@ mod access_list; mod genesis_account; mod log; +mod request; mod txkind; mod withdrawal; diff --git a/crates/storage/codecs/src/alloy/request.rs b/crates/storage/codecs/src/alloy/request.rs new file mode 100644 index 0000000000000..d5d4daa4af94c --- /dev/null +++ b/crates/storage/codecs/src/alloy/request.rs @@ -0,0 +1,39 @@ +//! Native Compact codec impl for EIP-7685 requests. + +use crate::Compact; +use alloy_consensus::Request; +use alloy_eips::eip7685::{Decodable7685, Encodable7685}; +use alloy_primitives::Bytes; +use bytes::BufMut; + +impl Compact for Request { + fn to_compact(self, buf: &mut B) -> usize + where + B: BufMut + AsMut<[u8]>, + { + let encoded: Bytes = self.encoded_7685().into(); + encoded.to_compact(buf) + } + + fn from_compact(buf: &[u8], _: usize) -> (Self, &[u8]) { + let (raw, buf) = Bytes::from_compact(buf, buf.len()); + + (Request::decode_7685(&mut raw.as_ref()).expect("invalid eip-7685 request in db"), buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::proptest; + + proptest! { + #[test] + fn roundtrip(request: Request) { + let mut buf = Vec::::new(); + request.to_compact(&mut buf); + let (decoded, _) = Request::from_compact(&buf, buf.len()); + assert_eq!(request, decoded); + } + } +} diff --git a/crates/storage/db/src/tables/codecs/compact.rs b/crates/storage/db/src/tables/codecs/compact.rs index aed8d97efee93..907fb21465287 100644 --- a/crates/storage/db/src/tables/codecs/compact.rs +++ b/crates/storage/db/src/tables/codecs/compact.rs @@ -51,6 +51,7 @@ impl_compression_for_compact!( StageCheckpoint, PruneCheckpoint, ClientVersion, + Requests, // Non-DB GenesisAccount ); diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index b106623259c83..cc420fabcc93e 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -41,7 +41,7 @@ use reth_primitives::{ stage::StageCheckpoint, trie::{StorageTrieEntry, StoredBranchNode, StoredNibbles, StoredNibblesSubKey}, Account, Address, BlockHash, BlockNumber, Bytecode, Header, IntegerList, PruneCheckpoint, - PruneSegment, Receipt, StorageEntry, TransactionSignedNoHash, TxHash, TxNumber, B256, + PruneSegment, Receipt, Requests, StorageEntry, TransactionSignedNoHash, TxHash, TxNumber, B256, }; use std::fmt; @@ -377,6 +377,9 @@ tables! { /// Stores the history of client versions that have accessed the database with write privileges by unix timestamp in seconds. table VersionHistory; + + /// Stores EIP-7685 EL -> CL requests, indexed by block number. + table BlockRequests; } // Alias types. diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index c103ae5f6d051..0eb21aed8a141 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -56,6 +56,7 @@ pub(crate) enum Action { InsertTransactions, InsertTransactionHashNumbers, InsertBlockWithdrawals, + InsertBlockRequests, InsertBlockBodyIndices, InsertTransactionBlocks, @@ -83,6 +84,7 @@ impl Action { Action::InsertTransactions => "insert transactions", Action::InsertTransactionHashNumbers => "insert transaction hash numbers", Action::InsertBlockWithdrawals => "insert block withdrawals", + Action::InsertBlockRequests => "insert block withdrawals", Action::InsertBlockBodyIndices => "insert block body indices", Action::InsertTransactionBlocks => "insert transaction blocks", Action::GetNextTxNum => "get next tx num", diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index e8987b7d44c31..354b5693701b7 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -4,8 +4,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, - ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + ProviderError, PruneCheckpointReader, RequestsProvider, StageCheckpointReader, + StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, + WithdrawalsProvider, }; use reth_db::{ database::Database, init_db, mdbx::DatabaseArguments, models::StoredBlockBodyIndices, @@ -465,6 +466,19 @@ impl WithdrawalsProvider for ProviderFactory { } } +impl RequestsProvider for ProviderFactory +where + DB: Database, +{ + fn requests_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult> { + self.provider()?.requests_by_block(id, timestamp) + } +} + impl StageCheckpointReader for ProviderFactory { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.provider()?.get_stage_checkpoint(id) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index b3c2608bf7226..3a571bbd0dc2b 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -9,8 +9,9 @@ use crate::{ Chain, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, HistoricalStateProvider, HistoryWriter, LatestStateProvider, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, - StageCheckpointReader, StateProviderBox, StateWriter, StatsReader, StorageReader, - TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, + RequestsProvider, StageCheckpointReader, StateProviderBox, StateWriter, StatsReader, + StorageReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, + WithdrawalsProvider, }; use itertools::{izip, Itertools}; use reth_db::{ @@ -35,9 +36,10 @@ use reth_primitives::{ trie::Nibbles, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, ChainSpec, GotExpected, Head, Header, PruneCheckpoint, PruneLimiter, PruneModes, - PruneSegment, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, - StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, - TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, + PruneSegment, Receipt, Requests, SealedBlock, SealedBlockWithSenders, SealedHeader, + StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, + Withdrawals, B256, U256, }; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ @@ -712,7 +714,14 @@ impl DatabaseProvider { &self, range: impl RangeBounds + Clone, ) -> ProviderResult> { - // For block we need Headers, Bodies, Uncles, withdrawals, Transactions, Signers + // For blocks we need: + // + // - Headers + // - Bodies (transactions) + // - Uncles/ommers + // - Withdrawals + // - Requests + // - Signers let block_headers = self.get_or_take::(range.clone())?; if block_headers.is_empty() { @@ -724,6 +733,7 @@ impl DatabaseProvider { let block_ommers = self.get_or_take::(range.clone())?; let block_withdrawals = self.get_or_take::(range.clone())?; + let block_requests = self.get_or_take::(range.clone())?; let block_tx = self.get_take_block_transaction_range::(range.clone())?; @@ -747,8 +757,10 @@ impl DatabaseProvider { // Ommers can be empty for some blocks let mut block_ommers_iter = block_ommers.into_iter(); let mut block_withdrawals_iter = block_withdrawals.into_iter(); + let mut block_requests_iter = block_requests.into_iter(); let mut block_ommers = block_ommers_iter.next(); let mut block_withdrawals = block_withdrawals_iter.next(); + let mut block_requests = block_requests_iter.next(); let mut blocks = Vec::new(); for ((main_block_number, header), (_, header_hash), (_, tx)) in @@ -782,8 +794,22 @@ impl DatabaseProvider { withdrawals = None } + // requests can be missing + let prague_is_active = self.chain_spec.is_prague_active_at_timestamp(header.timestamp); + let mut requests = Some(Requests::default()); + if prague_is_active { + if let Some((block_number, _)) = block_requests.as_ref() { + if *block_number == main_block_number { + requests = Some(block_requests.take().unwrap().1); + block_requests = block_requests_iter.next(); + } + } + } else { + requests = None; + } + blocks.push(SealedBlockWithSenders { - block: SealedBlock { header, body, ommers, withdrawals }, + block: SealedBlock { header, body, ommers, withdrawals, requests }, senders, }) } @@ -1297,7 +1323,13 @@ impl DatabaseProvider { mut assemble_block: F, ) -> ProviderResult> where - F: FnMut(Range, Header, Vec
, Option) -> ProviderResult, + F: FnMut( + Range, + Header, + Vec
, + Option, + Option, + ) -> ProviderResult, { if range.is_empty() { return Ok(Vec::new()) @@ -1309,6 +1341,7 @@ impl DatabaseProvider { let headers = self.headers_range(range)?; let mut ommers_cursor = self.tx.cursor_read::()?; let mut withdrawals_cursor = self.tx.cursor_read::()?; + let mut requests_cursor = self.tx.cursor_read::()?; let mut block_body_cursor = self.tx.cursor_read::()?; for header in headers { @@ -1333,6 +1366,11 @@ impl DatabaseProvider { } else { None }; + let requests = if self.chain_spec.is_prague_active_at_timestamp(header.timestamp) { + Some(requests_cursor.seek_exact(header.number)?.unwrap_or_default().1) + } else { + None + }; let ommers = if self.chain_spec.final_paris_total_difficulty(header.number).is_some() { Vec::new() @@ -1342,7 +1380,7 @@ impl DatabaseProvider { .map(|(_, o)| o.ommers) .unwrap_or_default() }; - if let Ok(b) = assemble_block(tx_range, header, ommers, withdrawals) { + if let Ok(b) = assemble_block(tx_range, header, ommers, withdrawals, requests) { blocks.push(b); } } @@ -1370,6 +1408,7 @@ impl BlockReader for DatabaseProvider { if let Some(header) = self.header_by_number(number)? { let withdrawals = self.withdrawals_by_block(number.into(), header.timestamp)?; let ommers = self.ommers(number.into())?.unwrap_or_default(); + let requests = self.requests_by_block(number.into(), header.timestamp)?; // If the body indices are not found, this means that the transactions either do not // exist in the database yet, or they do exit but are not indexed. // If they exist but are not indexed, we don't have enough @@ -1379,7 +1418,7 @@ impl BlockReader for DatabaseProvider { None => return Ok(None), }; - return Ok(Some(Block { header, body: transactions, ommers, withdrawals })) + return Ok(Some(Block { header, body: transactions, ommers, withdrawals, requests })) } } @@ -1435,6 +1474,7 @@ impl BlockReader for DatabaseProvider { let ommers = self.ommers(block_number.into())?.unwrap_or_default(); let withdrawals = self.withdrawals_by_block(block_number.into(), header.timestamp)?; + let requests = self.requests_by_block(block_number.into(), header.timestamp)?; // Get the block body // @@ -1465,7 +1505,7 @@ impl BlockReader for DatabaseProvider { }) .collect(); - Block { header, body, ommers, withdrawals } + Block { header, body, ommers, withdrawals, requests } // Note: we're using unchecked here because we know the block contains valid txs wrt to // its height and can ignore the s value check so pre EIP-2 txs are allowed .try_with_senders_unchecked(senders) @@ -1475,7 +1515,7 @@ impl BlockReader for DatabaseProvider { fn block_range(&self, range: RangeInclusive) -> ProviderResult> { let mut tx_cursor = self.tx.cursor_read::()?; - self.process_block_range(range, |tx_range, header, ommers, withdrawals| { + self.process_block_range(range, |tx_range, header, ommers, withdrawals, requests| { let body = if tx_range.is_empty() { Vec::new() } else { @@ -1484,7 +1524,7 @@ impl BlockReader for DatabaseProvider { .map(Into::into) .collect() }; - Ok(Block { header, body, ommers, withdrawals }) + Ok(Block { header, body, ommers, withdrawals, requests }) }) } @@ -1495,7 +1535,7 @@ impl BlockReader for DatabaseProvider { let mut tx_cursor = self.tx.cursor_read::()?; let mut senders_cursor = self.tx.cursor_read::()?; - self.process_block_range(range, |tx_range, header, ommers, withdrawals| { + self.process_block_range(range, |tx_range, header, ommers, withdrawals, requests| { let (body, senders) = if tx_range.is_empty() { (Vec::new(), Vec::new()) } else { @@ -1527,7 +1567,7 @@ impl BlockReader for DatabaseProvider { (body, senders) }; - Block { header, body, ommers, withdrawals } + Block { header, body, ommers, withdrawals, requests } .try_with_senders_unchecked(senders) .map_err(|_| ProviderError::SenderRecoveryError) }) @@ -1835,6 +1875,24 @@ impl WithdrawalsProvider for DatabaseProvider { } } +impl RequestsProvider for DatabaseProvider { + fn requests_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult> { + if self.chain_spec.is_prague_active_at_timestamp(timestamp) { + if let Some(number) = self.convert_hash_or_number(id)? { + // If we are past Prague, then all blocks should have a requests list, even if + // empty + let requests = self.tx.get::(number)?.unwrap_or_default(); + return Ok(Some(requests)) + } + } + Ok(None) + } +} + impl EvmEnvProvider for DatabaseProvider { fn fill_env_at( &self, @@ -2587,6 +2645,13 @@ impl BlockWriter for DatabaseProvider { } } + if let Some(requests) = block.block.requests { + if !requests.0.is_empty() { + self.tx.put::(block_number, requests)?; + durations_recorder.record_relative(metrics::Action::InsertBlockRequests); + } + } + let block_indices = StoredBlockBodyIndices { first_tx_num, tx_count }; self.tx.put::(block_number, block_indices.clone())?; durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index d6a7d34c816ac..a8db59b0620f9 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -3,9 +3,9 @@ use crate::{ BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullBundleStateDataProvider, HeaderProvider, ProviderError, - PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, - StateProviderBox, StateProviderFactory, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, TreeViewer, WithdrawalsProvider, + PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, + StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, + TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, @@ -465,6 +465,19 @@ where } } +impl RequestsProvider for BlockchainProvider +where + DB: Database, +{ + fn requests_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult> { + self.database.requests_by_block(id, timestamp) + } +} + impl StageCheckpointReader for BlockchainProvider where DB: Database, diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 275c8935e1af3..1dfd15cd77783 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -4,7 +4,7 @@ use super::{ }; use crate::{ to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, HeaderProvider, - ReceiptProvider, StatsReader, TransactionVariant, TransactionsProvider, + ReceiptProvider, RequestsProvider, StatsReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; use dashmap::{mapref::entry::Entry as DashMapEntry, DashMap}; @@ -1128,6 +1128,17 @@ impl WithdrawalsProvider for StaticFileProvider { } } +impl RequestsProvider for StaticFileProvider { + fn requests_by_block( + &self, + _id: BlockHashOrNumber, + _timestamp: u64, + ) -> ProviderResult> { + // Required data not present in static_files + Err(ProviderError::UnsupportedProvider) + } +} + impl StatsReader for StaticFileProvider { fn count_entries(&self) -> ProviderResult { match T::NAME { diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 32ecb489758a2..a1e1134d397b2 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -8,8 +8,8 @@ use reth_primitives::{ hex_literal::hex, proofs::{state_root_unhashed, storage_root_unhashed}, revm::compat::into_reth_acc, - Address, BlockNumber, Bytes, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, - TxType, Withdrawal, Withdrawals, B256, U256, + Address, BlockNumber, Bytes, Header, Receipt, Receipts, Requests, SealedBlock, + SealedBlockWithSenders, TxType, Withdrawal, Withdrawals, B256, U256, }; use revm::{ db::BundleState, @@ -37,6 +37,7 @@ pub fn assert_genesis_block(provider: &DatabaseProviderRW, g: ); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); @@ -108,6 +109,7 @@ pub fn genesis() -> SealedBlock { body: vec![], ommers: vec![], withdrawals: Some(Withdrawals::default()), + requests: Some(Requests::default()), } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 7dd7c5b4dc5b4..3498677c0f340 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -2,8 +2,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullBundleStateDataProvider, - HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, - StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + HeaderProvider, ReceiptProviderIdExt, RequestsProvider, StateProvider, StateProviderBox, + StateProviderFactory, StateRootProvider, TransactionVariant, TransactionsProvider, + WithdrawalsProvider, }; use parking_lot::Mutex; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; @@ -679,6 +680,16 @@ impl WithdrawalsProvider for MockEthProvider { } } +impl RequestsProvider for MockEthProvider { + fn requests_by_block( + &self, + _id: BlockHashOrNumber, + _timestamp: u64, + ) -> ProviderResult> { + Ok(None) + } +} + impl ChangeSetReader for MockEthProvider { fn account_block_changeset( &self, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 6593b74ccfa54..c97195f946446 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -2,7 +2,7 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PruneCheckpointReader, - ReceiptProviderIdExt, StageCheckpointReader, StateProvider, StateProviderBox, + ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; @@ -448,6 +448,16 @@ impl WithdrawalsProvider for NoopProvider { } } +impl RequestsProvider for NoopProvider { + fn requests_by_block( + &self, + _id: BlockHashOrNumber, + _timestamp: u64, + ) -> ProviderResult> { + Ok(None) + } +} + impl PruneCheckpointReader for NoopProvider { fn get_prune_checkpoint( &self, diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 070e31ed99329..bac6053f789bd 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -1,6 +1,6 @@ use crate::{ BlockIdReader, BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, - TransactionsProvider, WithdrawalsProvider, + RequestsProvider, TransactionsProvider, WithdrawalsProvider, }; use reth_db::models::StoredBlockBodyIndices; use reth_primitives::{ @@ -61,6 +61,7 @@ pub trait BlockReader: + HeaderProvider + TransactionsProvider + ReceiptProvider + + RequestsProvider + WithdrawalsProvider + Send + Sync diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index fdbb5e9a0ae55..814b9ac7b7f77 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -26,6 +26,9 @@ pub use header::*; mod receipts; pub use receipts::*; +mod requests; +pub use requests::*; + mod state; pub use state::*; diff --git a/crates/storage/storage-api/src/requests.rs b/crates/storage/storage-api/src/requests.rs new file mode 100644 index 0000000000000..c8b13dc05d327 --- /dev/null +++ b/crates/storage/storage-api/src/requests.rs @@ -0,0 +1,13 @@ +use reth_primitives::{BlockHashOrNumber, Requests}; +use reth_storage_errors::provider::ProviderResult; + +/// Client trait for fetching EIP-7685 [Requests] for blocks. +#[auto_impl::auto_impl(&, Arc)] +pub trait RequestsProvider: Send + Sync { + /// Get withdrawals by block id. + fn requests_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult>; +} diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 5f5f4cbf1eb0a..70c6c98a9fd09 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -103,7 +103,7 @@ where let mut actions_to_queue: Vec = Vec::new(); if txs.is_empty() { - return; + return } match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| tx.hash()).collect()) { @@ -157,7 +157,7 @@ where // Request locally first, otherwise request from CL loop { if let Some(mined_sidecar) = this.queued_actions.pop_front() { - return Poll::Ready(Some(Ok(mined_sidecar))); + return Poll::Ready(Some(Ok(mined_sidecar))) } // Check if any pending requests are ready and append to buffer @@ -243,7 +243,7 @@ async fn fetch_blobs_for_block( response.status().as_u16(), "Unhandled HTTP status.".to_string(), )), - }; + } } let bytes = match response.bytes().await { diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index d16146420d0d9..7156440db6f2c 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -47,7 +47,7 @@ use reth_payload_builder::{ use reth_primitives::{Address, Chain, ChainSpec, Genesis, Header, Withdrawals, B256}; use reth_rpc_types::{ engine::{ - ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, + ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, PayloadAttributes as EthPayloadAttributes, PayloadId, }, ExecutionPayloadV1, Withdrawal, @@ -167,6 +167,7 @@ impl EngineTypes for CustomEngineTypes { type ExecutionPayloadV1 = ExecutionPayloadV1; type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; fn validate_version_specific_fields( chain_spec: &ChainSpec, diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index a30aa3d930f24..c40f89b13f6c1 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -86,6 +86,8 @@ pub struct Header { pub excess_blob_gas: Option, /// Parent beacon block root. pub parent_beacon_block_root: Option, + /// Requests root. + pub requests_root: Option, } impl From
for SealedHeader { @@ -111,6 +113,7 @@ impl From
for SealedHeader { blob_gas_used: value.blob_gas_used.map(|v| v.to::()), excess_blob_gas: value.excess_blob_gas.map(|v| v.to::()), parent_beacon_block_root: value.parent_beacon_block_root, + requests_root: value.requests_root, }; header.seal(value.hash) } From c54276e740d0691ee9fb59041bd36d9477ac07f9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 17:25:19 +0200 Subject: [PATCH 656/700] chore: misc storage api (#8432) --- .../bundle_state_with_receipts.rs | 34 ++++++++----------- crates/storage/storage-api/src/block.rs | 12 +------ .../storage/storage-api/src/transactions.rs | 16 +++++++-- 3 files changed, 30 insertions(+), 32 deletions(-) diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 90f7e5f8afc4d..37cdc9484fc20 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -65,21 +65,8 @@ impl StateWriter for BundleStateWithReceipts { #[cfg(test)] mod tests { - use std::collections::{BTreeMap, HashMap}; - - use revm::{ - db::{ - states::{ - bundle_state::BundleRetention, changes::PlainStorageRevert, PlainStorageChangeset, - }, - BundleState, EmptyDB, - }, - primitives::{ - Account as RevmAccount, AccountInfo as RevmAccountInfo, AccountStatus, StorageSlot, - }, - DatabaseCommit, State, - }; - + use super::*; + use crate::{test_utils::create_test_provider_factory, AccountReader}; use reth_db::{ cursor::DbDupCursorRO, database::Database, @@ -92,10 +79,19 @@ mod tests { Account, Address, Receipt, Receipts, StorageEntry, B256, U256, }; use reth_trie::{test_utils::state_root, StateRoot}; - - use crate::{test_utils::create_test_provider_factory, AccountReader}; - - use super::*; + use revm::{ + db::{ + states::{ + bundle_state::BundleRetention, changes::PlainStorageRevert, PlainStorageChangeset, + }, + BundleState, EmptyDB, + }, + primitives::{ + Account as RevmAccount, AccountInfo as RevmAccountInfo, AccountStatus, StorageSlot, + }, + DatabaseCommit, State, + }; + use std::collections::{BTreeMap, HashMap}; #[test] fn write_to_db_account_info() { diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index bac6053f789bd..c85b15e39bd7a 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -1,6 +1,6 @@ use crate::{ BlockIdReader, BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, - RequestsProvider, TransactionsProvider, WithdrawalsProvider, + RequestsProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use reth_db::models::StoredBlockBodyIndices; use reth_primitives::{ @@ -10,16 +10,6 @@ use reth_primitives::{ use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; -/// Enum to control transaction hash inclusion. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] -pub enum TransactionVariant { - /// Indicates that transactions should be processed without including their hashes. - NoHash, - /// Indicates that transactions should be processed along with their hashes. - #[default] - WithHash, -} - /// A helper enum that represents the origin of the requested block. /// /// This helper type's sole purpose is to give the caller more control over from where blocks can be diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index 763632a38331d..0553ef787f055 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,11 +1,23 @@ -use std::ops::{Range, RangeBounds, RangeInclusive}; - use crate::{BlockNumReader, BlockReader}; use reth_primitives::{ Address, BlockHashOrNumber, BlockNumber, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use std::ops::{Range, RangeBounds, RangeInclusive}; + +/// Enum to control transaction hash inclusion. +/// +/// This serves as a hint to the provider to include or omit exclude hashes because hashes are +/// stored separately and are not always needed. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] +pub enum TransactionVariant { + /// Indicates that transactions should be processed without including their hashes. + NoHash, + /// Indicates that transactions should be processed along with their hashes. + #[default] + WithHash, +} /// Client trait for fetching [TransactionSigned] related data. #[auto_impl::auto_impl(&, Arc)] From 9874a1b34eddc0fe7453f57864172915daecab48 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 17:27:44 +0200 Subject: [PATCH 657/700] chore: move Chain type (#8433) --- Cargo.lock | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 5 ++--- crates/evm/execution-types/Cargo.toml | 4 ++++ .../{storage/provider => evm/execution-types}/src/chain.rs | 2 +- crates/evm/execution-types/src/lib.rs | 3 +++ crates/storage/provider/Cargo.toml | 1 - crates/storage/provider/src/lib.rs | 5 ++--- crates/storage/provider/src/traits/chain.rs | 2 +- crates/transaction-pool/src/blobstore/tracker.rs | 2 +- 9 files changed, 15 insertions(+), 11 deletions(-) rename crates/{storage/provider => evm/execution-types}/src/chain.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 031ff14752e3e..266786d85f57c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7066,6 +7066,7 @@ name = "reth-execution-types" version = "0.2.0-beta.7" dependencies = [ "reth-evm", + "reth-execution-errors", "reth-primitives", "reth-trie", "revm", @@ -7678,7 +7679,6 @@ dependencies = [ "reth-codecs", "reth-db", "reth-evm", - "reth-execution-errors", "reth-execution-types", "reth-fs-util", "reth-interfaces", diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 0f5a249a4591d..c653e9374b3c6 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -18,11 +18,10 @@ use reth_primitives::{ SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, }; use reth_provider::{ - chain::{ChainSplit, ChainSplitTarget}, BlockExecutionWriter, BlockNumReader, BlockWriter, BundleStateWithReceipts, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, - ChainSpecProvider, DisplayBlocksChain, HeaderProvider, ProviderError, - StaticFileProviderFactory, + ChainSpecProvider, ChainSplit, ChainSplitTarget, DisplayBlocksChain, HeaderProvider, + ProviderError, StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index c998f9a9aa5cd..f24c84be314cc 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -12,10 +12,14 @@ workspace = true [dependencies] reth-primitives.workspace = true +reth-execution-errors.workspace = true reth-trie.workspace = true reth-evm.workspace = true revm.workspace = true +[dev-dependencies] +reth-primitives = { workspace = true, features = ["test-utils"] } + [features] optimism = [] \ No newline at end of file diff --git a/crates/storage/provider/src/chain.rs b/crates/evm/execution-types/src/chain.rs similarity index 99% rename from crates/storage/provider/src/chain.rs rename to crates/evm/execution-types/src/chain.rs index a1064c4dce0d6..8b5240719b714 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -1,6 +1,6 @@ //! Contains [Chain], a chain of blocks and their final state. -use crate::bundle_state::BundleStateWithReceipts; +use crate::BundleStateWithReceipts; use reth_execution_errors::BlockExecutionError; use reth_primitives::{ Address, BlockHash, BlockNumHash, BlockNumber, ForkBlock, Receipt, SealedBlock, diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index 576913997fc2f..7680e70852d3b 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -10,3 +10,6 @@ mod bundle; pub use bundle::*; + +mod chain; +pub use chain::*; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 5fde8879769e5..1bc9ff7ce4aab 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-blockchain-tree-api.workspace = true -reth-execution-errors.workspace = true reth-execution-types.workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 864a962414e94..0d232531889bd 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -31,11 +31,10 @@ pub mod test_utils; /// Re-export provider error. pub use reth_storage_errors::provider::ProviderError; -pub mod chain; -pub use chain::{Chain, DisplayBlocksChain}; +pub use reth_execution_types::*; pub mod bundle_state; -pub use bundle_state::{BundleStateWithReceipts, OriginalValuesKnown, StateChanges, StateReverts}; +pub use bundle_state::{OriginalValuesKnown, StateChanges, StateReverts}; pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { diff --git a/crates/storage/provider/src/traits/chain.rs b/crates/storage/provider/src/traits/chain.rs index df51aecb771cc..6ed498a38cb01 100644 --- a/crates/storage/provider/src/traits/chain.rs +++ b/crates/storage/provider/src/traits/chain.rs @@ -1,6 +1,6 @@ //! Canonical chain state notification trait and types. -use crate::{chain::BlockReceipts, Chain}; +use crate::{BlockReceipts, Chain}; use auto_impl::auto_impl; use reth_primitives::SealedBlockWithSenders; use std::{ diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index c9221002ad46c..96cb5552fc1e5 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -1,7 +1,7 @@ //! Support for maintaining the blob pool. use reth_primitives::{BlockNumber, B256}; -use reth_provider::chain::ChainBlocks; +use reth_provider::ChainBlocks; use std::collections::BTreeMap; /// The type that is used to track canonical blob transactions. From 911a3a4a92cca5550a347e3fcd5561f6f97e3c43 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 17:28:50 +0200 Subject: [PATCH 658/700] chore: replace reth-provider dep in reth-evm (#8430) --- Cargo.lock | 2 +- crates/revm/Cargo.toml | 2 +- crates/revm/src/database.rs | 66 +++++++++++++++++++++++++++++++---- crates/revm/src/test_utils.rs | 2 +- 4 files changed, 63 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 266786d85f57c..ac41650c8f828 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7727,7 +7727,7 @@ dependencies = [ "reth-consensus-common", "reth-execution-errors", "reth-primitives", - "reth-provider", + "reth-storage-api", "reth-storage-errors", "reth-trie", "revm", diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index fe93edd506f4b..715e306f3c1f8 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -14,10 +14,10 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-provider.workspace = true reth-storage-errors.workspace = true reth-execution-errors.workspace = true reth-consensus-common.workspace = true +reth-storage-api.workspace = true reth-trie = { workspace = true, optional = true } # revm diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index b7cf362fee11d..02a9cdd2b2b03 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,6 +1,6 @@ -use reth_primitives::{Address, B256, KECCAK_EMPTY, U256}; -use reth_provider::StateProvider; -use reth_storage_errors::provider::ProviderError; +use crate::primitives::alloy_primitives::{BlockNumber, StorageKey, StorageValue}; +use reth_primitives::{Account, Address, B256, KECCAK_EMPTY, U256}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use revm::{ db::DatabaseRef, primitives::{AccountInfo, Bytecode}, @@ -8,7 +8,61 @@ use revm::{ }; use std::ops::{Deref, DerefMut}; -/// Wrapper around StateProvider that implements revm database trait +/// A helper trait responsible for providing that necessary state for the EVM execution. +/// +/// This servers as the data layer for [Database]. +pub trait EvmStateProvider: Send + Sync { + /// Get basic account information. + /// + /// Returns `None` if the account doesn't exist. + fn basic_account(&self, address: Address) -> ProviderResult>; + + /// Get the hash of the block with the given number. Returns `None` if no block with this number + /// exists. + fn block_hash(&self, number: BlockNumber) -> ProviderResult>; + + /// Get account code by its hash + fn bytecode_by_hash( + &self, + code_hash: B256, + ) -> ProviderResult>; + + /// Get storage of given account. + fn storage( + &self, + account: Address, + storage_key: StorageKey, + ) -> ProviderResult>; +} + +// Blanket implementation of EvmStateProvider for any type that implements StateProvider. +impl EvmStateProvider for T { + fn basic_account(&self, address: Address) -> ProviderResult> { + ::basic_account(self, address) + } + + fn block_hash(&self, number: BlockNumber) -> ProviderResult> { + ::block_hash(self, number) + } + + fn bytecode_by_hash( + &self, + code_hash: B256, + ) -> ProviderResult> { + ::bytecode_by_hash(self, code_hash) + } + + fn storage( + &self, + account: Address, + storage_key: StorageKey, + ) -> ProviderResult> { + ::storage(self, account, storage_key) + } +} + +/// A [Database] and [DatabaseRef] implementation that uses [EvmStateProvider] as the underlying +/// data source. #[derive(Debug, Clone)] pub struct StateProviderDatabase(pub DB); @@ -38,7 +92,7 @@ impl DerefMut for StateProviderDatabase { } } -impl Database for StateProviderDatabase { +impl Database for StateProviderDatabase { type Error = ProviderError; /// Retrieves basic account information for a given address. @@ -72,7 +126,7 @@ impl Database for StateProviderDatabase { } } -impl DatabaseRef for StateProviderDatabase { +impl DatabaseRef for StateProviderDatabase { type Error = ::Error; /// Retrieves basic account information for a given address. diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index d2045c45932b9..a10cb80d5ae06 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -2,7 +2,7 @@ use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, B256, U256, }; -use reth_provider::{AccountReader, BlockHashReader, StateProvider, StateRootProvider}; +use reth_storage_api::{AccountReader, BlockHashReader, StateProvider, StateRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; From 329634256a9d2ddadc07b0a73e29bb9a3a7867cf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 18:04:46 +0200 Subject: [PATCH 659/700] chore: move generators module (#8429) --- Cargo.lock | 2 ++ crates/interfaces/Cargo.toml | 4 +++- crates/interfaces/src/lib.rs | 5 ++++- crates/net/p2p/Cargo.toml | 8 +------- crates/net/p2p/src/test_utils/mod.rs | 3 --- testing/testing-utils/Cargo.toml | 7 +++++-- .../testing-utils/src}/generators.rs | 2 ++ testing/testing-utils/src/lib.rs | 2 ++ 8 files changed, 19 insertions(+), 14 deletions(-) rename {crates/net/p2p/src/test_utils => testing/testing-utils/src}/generators.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index ac41650c8f828..b19b4d6e81c3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7110,6 +7110,7 @@ dependencies = [ "reth-fs-util", "reth-network-p2p", "reth-storage-errors", + "reth-testing-utils", "thiserror", ] @@ -8072,6 +8073,7 @@ name = "reth-testing-utils" version = "0.2.0-beta.7" dependencies = [ "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "rand 0.8.5", "reth-primitives", "secp256k1 0.28.2", ] diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 836ddebb5d34b..1c07af335ccd6 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -18,9 +18,11 @@ reth-fs-util.workspace = true reth-network-p2p.workspace = true reth-storage-errors.workspace = true +reth-testing-utils = { workspace = true, optional = true } + # misc thiserror.workspace = true [features] -test-utils = ["reth-consensus/test-utils", "reth-network-p2p/test-utils"] +test-utils = ["reth-consensus/test-utils", "reth-network-p2p/test-utils", "reth-testing-utils"] clap = ["reth-storage-errors/clap"] \ No newline at end of file diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index 651283bb8bd67..0a649e557a26e 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -36,4 +36,7 @@ pub use reth_blockchain_tree_api as blockchain_tree; /// Common test helpers for mocking out Consensus, Downloaders and Header Clients. #[cfg(feature = "test-utils")] -pub use reth_network_p2p::test_utils; +pub mod test_utils { + pub use reth_network_p2p::test_utils::*; + pub use reth_testing_utils::generators; +} diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index 34705d78e5225..0e3146ab6f39d 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -28,13 +28,7 @@ auto_impl.workspace = true thiserror.workspace = true tracing.workspace = true -secp256k1 = { workspace = true, default-features = false, features = [ - "alloc", - "recovery", - "rand", -], optional = true } parking_lot = { workspace = true, optional = true } -rand = { workspace = true, optional = true } [dev-dependencies] reth-consensus = { workspace = true, features = ["test-utils"] } @@ -45,4 +39,4 @@ tokio = { workspace = true, features = ["full"] } secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] -test-utils = ["reth-consensus/test-utils", "secp256k1", "rand", "parking_lot"] +test-utils = ["reth-consensus/test-utils", "parking_lot"] diff --git a/crates/net/p2p/src/test_utils/mod.rs b/crates/net/p2p/src/test_utils/mod.rs index e56cd8f00f6b1..1abd215d02458 100644 --- a/crates/net/p2p/src/test_utils/mod.rs +++ b/crates/net/p2p/src/test_utils/mod.rs @@ -2,9 +2,6 @@ mod bodies; mod full_block; mod headers; -/// Generators for different data structures like block headers, block bodies and ranges of those. -pub mod generators; - pub use bodies::*; pub use full_block::*; pub use headers::*; diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 97a4c78dfcc37..815976dea562d 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -12,6 +12,9 @@ repository.workspace = true workspace = true [dependencies] -secp256k1.workspace = true -alloy-genesis.workspace = true reth-primitives.workspace = true + +alloy-genesis.workspace = true + +secp256k1.workspace = true +rand.workspace = true \ No newline at end of file diff --git a/crates/net/p2p/src/test_utils/generators.rs b/testing/testing-utils/src/generators.rs similarity index 99% rename from crates/net/p2p/src/test_utils/generators.rs rename to testing/testing-utils/src/generators.rs index 8056e90ff260d..551aa80e47225 100644 --- a/crates/net/p2p/src/test_utils/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,3 +1,5 @@ +//! Generators for different data structures like block headers, block bodies and ranges of those. + pub use rand::Rng; use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, diff --git a/testing/testing-utils/src/lib.rs b/testing/testing-utils/src/lib.rs index 27b54b19e5ba4..c593d306468ff 100644 --- a/testing/testing-utils/src/lib.rs +++ b/testing/testing-utils/src/lib.rs @@ -11,3 +11,5 @@ pub mod genesis_allocator; pub use genesis_allocator::GenesisAllocator; + +pub mod generators; From a5c3c63fe44dc9409a2687604147a228a8bfc056 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 17:56:48 +0200 Subject: [PATCH 660/700] chore: rm reth-interfaces from auto-seal (#8434) --- Cargo.lock | 3 ++- crates/consensus/auto-seal/Cargo.toml | 3 ++- crates/consensus/auto-seal/src/client.rs | 2 +- crates/consensus/auto-seal/src/lib.rs | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b19b4d6e81c3e..e2e9e95557047 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6458,7 +6458,8 @@ dependencies = [ "reth-consensus", "reth-engine-primitives", "reth-evm", - "reth-interfaces", + "reth-execution-errors", + "reth-network-p2p", "reth-network-types", "reth-primitives", "reth-provider", diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index ccbc1e06a32a7..21e192e282ec5 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -15,7 +15,8 @@ workspace = true # reth reth-beacon-consensus.workspace = true reth-primitives.workspace = true -reth-interfaces.workspace = true +reth-execution-errors.workspace = true +reth-network-p2p.workspace = true reth-provider.workspace = true reth-stages-api.workspace = true reth-revm.workspace = true diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs index 67a84d5d9ebdb..d89061e9612b0 100644 --- a/crates/consensus/auto-seal/src/client.rs +++ b/crates/consensus/auto-seal/src/client.rs @@ -1,7 +1,7 @@ //! This includes download client implementations for auto sealing miners. use crate::Storage; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, headers::client::{HeadersClient, HeadersFut, HeadersRequest}, diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 69ca048e155e7..912b9feafba1d 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -18,7 +18,7 @@ use reth_beacon_consensus::BeaconEngineMessage; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_engine_primitives::EngineTypes; -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ constants::{EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, From de4e0ba04175463353cd55507915411be9d098d4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 18:11:08 +0200 Subject: [PATCH 661/700] chore: rm reth-interfaces from network (#8435) --- Cargo.lock | 2 +- crates/net/network/Cargo.toml | 4 ++-- crates/net/network/src/eth_requests.rs | 2 +- crates/net/network/src/fetch/client.rs | 4 ++-- crates/net/network/src/fetch/mod.rs | 4 ++-- crates/net/network/src/message.rs | 2 +- crates/net/network/src/network.rs | 2 +- crates/net/network/src/session/active.rs | 2 +- crates/net/network/src/state.rs | 2 +- crates/net/network/src/transactions/fetcher.rs | 2 +- crates/net/network/src/transactions/mod.rs | 13 ++++++++----- crates/net/network/tests/it/big_pooled_txs_req.rs | 2 +- crates/net/network/tests/it/connect.rs | 8 ++++---- crates/net/network/tests/it/requests.rs | 8 ++++---- 14 files changed, 30 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e2e9e95557047..40c83863b3ad6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7246,11 +7246,11 @@ dependencies = [ "reth-dns-discovery", "reth-ecies", "reth-eth-wire", - "reth-interfaces", "reth-metrics", "reth-net-common", "reth-network", "reth-network-api", + "reth-network-p2p", "reth-network-types", "reth-primitives", "reth-provider", diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index aa6da6ea29138..0c8e079f4da78 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -13,10 +13,10 @@ workspace = true [dependencies] # reth -reth-interfaces.workspace = true reth-primitives.workspace = true reth-net-common.workspace = true reth-network-api.workspace = true +reth-network-p2p.workspace = true reth-discv4.workspace = true reth-discv5.workspace = true reth-dns-discovery.workspace = true @@ -71,12 +71,12 @@ smallvec.workspace = true [dev-dependencies] # reth reth-discv4 = { workspace = true, features = ["test-utils"] } -reth-interfaces = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["test-utils"] } # we need to enable the test-utils feature in our own crate to use utils in # integration tests reth-network = { workspace = true, features = ["test-utils"] } +reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 10821dd804cfe..4f54cbdba7be7 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -10,7 +10,7 @@ use reth_eth_wire::{ BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData, GetReceipts, NodeData, Receipts, }; -use reth_interfaces::p2p::error::RequestResult; +use reth_network_p2p::error::RequestResult; use reth_network_types::PeerId; use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection}; use reth_provider::{BlockReader, HeaderProvider, ReceiptProvider}; diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index 63e22abe00fa0..a8a4b9f07c1bc 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -3,14 +3,14 @@ use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse, peers::PeersHandle}; use futures::{future, future::Either}; -use reth_interfaces::p2p::{ +use reth_network_api::ReputationChangeKind; +use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, error::{PeerRequestResult, RequestError}, headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; -use reth_network_api::ReputationChangeKind; use reth_network_types::PeerId; use reth_primitives::{Header, B256}; use std::sync::{ diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 9ad50edb0a276..3a5ebf14b5130 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -3,12 +3,12 @@ use crate::{message::BlockRequest, peers::PeersHandle}; use futures::StreamExt; use reth_eth_wire::{GetBlockBodies, GetBlockHeaders}; -use reth_interfaces::p2p::{ +use reth_network_api::ReputationChangeKind; +use reth_network_p2p::{ error::{EthResponseValidator, PeerRequestResult, RequestError, RequestResult}, headers::client::HeadersRequest, priority::Priority, }; -use reth_network_api::ReputationChangeKind; use reth_network_types::PeerId; use reth_primitives::{BlockBody, Header, B256}; use std::{ diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 2086fd60ea39b..861fb304e736a 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -10,7 +10,7 @@ use reth_eth_wire::{ NewBlockHashes, NewPooledTransactionHashes, NodeData, PooledTransactions, Receipts, SharedTransactions, Transactions, }; -use reth_interfaces::p2p::error::{RequestError, RequestResult}; +use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_types::PeerId; use reth_primitives::{ BlockBody, Bytes, Header, PooledTransactionsElement, ReceiptWithBloom, B256, diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 8d9b277f41918..b3ec4c7611210 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -7,12 +7,12 @@ use enr::Enr; use parking_lot::Mutex; use reth_discv4::Discv4; use reth_eth_wire::{DisconnectReason, NewBlock, NewPooledTransactionHashes, SharedTransactions}; -use reth_interfaces::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_net_common::bandwidth_meter::BandwidthMeter; use reth_network_api::{ NetworkError, NetworkInfo, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; +use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_network_types::PeerId; use reth_primitives::{Head, NodeRecord, TransactionSigned, B256}; use reth_rpc_types::NetworkStatus; diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 32bfb72acb310..ce726a78a24e7 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -18,8 +18,8 @@ use reth_eth_wire::{ message::{EthBroadcastMessage, RequestPair}, DisconnectP2P, DisconnectReason, EthMessage, }; -use reth_interfaces::p2p::error::RequestError; use reth_metrics::common::mpsc::MeteredPollSender; +use reth_network_p2p::error::RequestError; use reth_network_types::PeerId; use std::{ collections::VecDeque, diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index b0824e0f348a5..e5b477152f959 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -536,7 +536,7 @@ mod tests { capability::{Capabilities, Capability}, BlockBodies, EthVersion, }; - use reth_interfaces::p2p::{bodies::client::BodiesClient, error::RequestError}; + use reth_network_p2p::{bodies::client::BodiesClient, error::RequestError}; use reth_network_types::PeerId; use reth_primitives::{BlockBody, Header, B256}; use reth_provider::test_utils::NoopProvider; diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index c5272ecc3aaae..e10cee9bd1185 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -40,7 +40,7 @@ use reth_eth_wire::{ DedupPayload, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, PartiallyValidData, RequestTxHashes, ValidAnnouncementData, }; -use reth_interfaces::p2p::error::{RequestError, RequestResult}; +use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_types::PeerId; use reth_primitives::{PooledTransactionsElement, TxHash}; use schnellru::ByLength; diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index b6b2328e4f8be..7a0fe600da547 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -20,12 +20,12 @@ use reth_eth_wire::{ NewPooledTransactionHashes, NewPooledTransactionHashes66, NewPooledTransactionHashes68, PooledTransactions, RequestTxHashes, Transactions, }; -use reth_interfaces::{ - p2p::error::{RequestError, RequestResult}, - sync::SyncStateProvider, -}; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{Peers, ReputationChangeKind}; +use reth_network_p2p::{ + error::{RequestError, RequestResult}, + sync::SyncStateProvider, +}; use reth_network_types::PeerId; use reth_primitives::{ FromRecoveredPooledTransaction, PooledTransactionsElement, TransactionSigned, TxHash, B256, @@ -1619,8 +1619,11 @@ mod tests { use alloy_rlp::Decodable; use constants::tx_fetcher::DEFAULT_MAX_COUNT_FALLBACK_PEERS; use futures::FutureExt; - use reth_interfaces::sync::{NetworkSyncUpdater, SyncState}; use reth_network_api::NetworkInfo; + use reth_network_p2p::{ + error::{RequestError, RequestResult}, + sync::{NetworkSyncUpdater, SyncState}, + }; use reth_primitives::hex; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::test_utils::{testing_pool, MockTransaction}; diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 5b3a2a0d0049d..4ddc0cdd36eee 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -1,10 +1,10 @@ use reth_eth_wire::{GetPooledTransactions, PooledTransactions}; -use reth_interfaces::sync::{NetworkSyncUpdater, SyncState}; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, NetworkEvents, PeerRequest, }; use reth_network_api::{NetworkInfo, Peers}; +use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; use reth_primitives::{Signature, TransactionSigned, B256}; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 7b9c785ebdbe2..dc51174334834 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -5,16 +5,16 @@ use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; use reth_discv4::Discv4Config; use reth_eth_wire::DisconnectReason; -use reth_interfaces::{ - p2p::headers::client::{HeadersClient, HeadersRequest}, - sync::{NetworkSyncUpdater, SyncState}, -}; use reth_net_common::ban_list::BanList; use reth_network::{ test_utils::{enr_to_peer_id, NetworkEventStream, PeerConfig, Testnet, GETH_TIMEOUT}, NetworkConfigBuilder, NetworkEvent, NetworkEvents, NetworkManager, PeersConfig, }; use reth_network_api::{NetworkInfo, Peers, PeersInfo}; +use reth_network_p2p::{ + headers::client::{HeadersClient, HeadersRequest}, + sync::{NetworkSyncUpdater, SyncState}, +}; use reth_primitives::{mainnet_nodes, HeadersDirection, NodeRecord}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::test_utils::testing_pool; diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 45c86cb647e8e..555acd08b2485 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -2,15 +2,15 @@ //! Tests for eth related requests use rand::Rng; -use reth_interfaces::p2p::{ - bodies::client::BodiesClient, - headers::client::{HeadersClient, HeadersRequest}, -}; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, NetworkEvents, }; use reth_network_api::{NetworkInfo, Peers}; +use reth_network_p2p::{ + bodies::client::BodiesClient, + headers::client::{HeadersClient, HeadersRequest}, +}; use reth_primitives::{ Block, BlockBody, Bytes, Header, HeadersDirection, Signature, Transaction, TransactionSigned, TxEip2930, TxKind, U256, From 3b7089782ff0c5039bdead28dddf503a9aa6bd27 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 18:33:47 +0200 Subject: [PATCH 662/700] chore: rm reth-interfaces from downloaders (#8436) --- Cargo.lock | 3 ++- crates/net/downloaders/Cargo.toml | 17 +++++++++++------ crates/net/downloaders/src/bodies/bodies.rs | 4 ++-- crates/net/downloaders/src/bodies/noop.rs | 2 +- crates/net/downloaders/src/bodies/queue.rs | 2 +- crates/net/downloaders/src/bodies/request.rs | 4 ++-- crates/net/downloaders/src/bodies/task.rs | 6 +++--- crates/net/downloaders/src/bodies/test_utils.rs | 10 ++++------ crates/net/downloaders/src/file_client.rs | 4 ++-- crates/net/downloaders/src/headers/noop.rs | 2 +- .../downloaders/src/headers/reverse_headers.rs | 5 ++--- crates/net/downloaders/src/headers/task.rs | 6 +++--- crates/net/downloaders/src/metrics.rs | 6 +++--- .../downloaders/src/test_utils/bodies_client.rs | 2 +- crates/net/downloaders/src/test_utils/mod.rs | 2 +- 15 files changed, 39 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 40c83863b3ad6..32f4cadd7d94e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6797,12 +6797,13 @@ dependencies = [ "reth-config", "reth-consensus", "reth-db", - "reth-interfaces", "reth-metrics", + "reth-network-p2p", "reth-network-types", "reth-primitives", "reth-provider", "reth-tasks", + "reth-testing-utils", "reth-tracing", "tempfile", "thiserror", diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 353956d3bd9b9..069fc052eca58 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -13,14 +13,21 @@ workspace = true [dependencies] # reth -reth-interfaces.workspace = true reth-primitives.workspace = true +reth-network-p2p.workspace = true reth-tasks.workspace = true reth-provider.workspace = true reth-config.workspace = true reth-consensus.workspace = true reth-network-types.workspace = true +# optional deps for the test-utils feature +reth-db = { workspace = true, optional = true } +reth-testing-utils = { workspace = true, optional = true } + +# eth +alloy-rlp.workspace = true + # async futures.workspace = true futures-util.workspace = true @@ -38,17 +45,15 @@ tracing.workspace = true rayon.workspace = true thiserror.workspace = true -# optional deps for the test-utils feature -reth-db = { workspace = true, optional = true } -alloy-rlp.workspace = true tempfile = { workspace = true, optional = true } itertools.workspace = true [dev-dependencies] reth-db = { workspace = true, features = ["test-utils"] } reth-consensus = { workspace = true, features = ["test-utils"] } -reth-interfaces = { workspace = true, features = ["test-utils"] } +reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true reth-tracing.workspace = true assert_matches.workspace = true @@ -60,5 +65,5 @@ rand.workspace = true tempfile.workspace = true [features] -test-utils = ["dep:tempfile", "reth-db/test-utils", "reth-consensus/test-utils", "reth-interfaces/test-utils"] +test-utils = ["dep:tempfile", "reth-db/test-utils", "reth-consensus/test-utils", "reth-network-p2p/test-utils", "reth-testing-utils"] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 33139ab501d55..ed5cdf64a5e31 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -4,7 +4,7 @@ use futures::Stream; use futures_util::StreamExt; use reth_config::BodiesConfig; use reth_consensus::Consensus; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::{ client::BodiesClient, downloader::{BodyDownloader, BodyDownloaderResult}, @@ -606,9 +606,9 @@ mod tests { use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; - use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{BlockBody, B256, MAINNET}; use reth_provider::ProviderFactory; + use reth_testing_utils::{generators, generators::random_block_range}; use std::collections::HashMap; // Check that the blocks are emitted in order of block number, not in order of diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index 5885a17c11d2f..2ad60d481719a 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -1,5 +1,5 @@ use futures::Stream; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::{downloader::BodyDownloader, response::BlockResponse}, error::{DownloadError, DownloadResult}, }; diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 072e059a481c0..47f2a19605fec 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -3,7 +3,7 @@ use crate::metrics::BodyDownloaderMetrics; use futures::{stream::FuturesUnordered, Stream}; use futures_util::StreamExt; use reth_consensus::Consensus; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::{client::BodiesClient, response::BlockResponse}, error::DownloadResult, }; diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 593c738e0bba1..905c199feac54 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -1,7 +1,7 @@ use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics}; use futures::{Future, FutureExt}; use reth_consensus::Consensus; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::{client::BodiesClient, response::BlockResponse}, error::{DownloadError, DownloadResult}, priority::Priority, @@ -255,7 +255,7 @@ mod tests { test_utils::{generate_bodies, TestBodiesClient}, }; use reth_consensus::test_utils::TestConsensus; - use reth_interfaces::test_utils::{generators, generators::random_header_range}; + use reth_testing_utils::{generators, generators::random_header_range}; /// Check if future returns empty bodies without dispatching any requests. #[tokio::test] diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index f8815bcb05791..9dfb747a66040 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -1,7 +1,7 @@ use futures::Stream; use futures_util::{FutureExt, StreamExt}; use pin_project::pin_project; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::downloader::{BodyDownloader, BodyDownloaderResult}, error::DownloadResult, }; @@ -44,7 +44,7 @@ impl TaskDownloader { /// ``` /// use reth_consensus::Consensus; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; - /// use reth_interfaces::p2p::bodies::client::BodiesClient; + /// use reth_network_p2p::bodies::client::BodiesClient; /// use reth_provider::HeaderProvider; /// use std::sync::Arc; /// @@ -171,7 +171,7 @@ mod tests { }; use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; - use reth_interfaces::p2p::error::DownloadError; + use reth_network_p2p::error::DownloadError; use reth_provider::test_utils::create_test_provider_factory; use std::sync::Arc; diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index dadd4b3bdd7bf..591e312edcf19 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -3,7 +3,7 @@ #![allow(dead_code)] use reth_db::{database::Database, tables, transaction::DbTxMut, DatabaseEnv}; -use reth_interfaces::{db, p2p::bodies::response::BlockResponse}; +use reth_network_p2p::bodies::response::BlockResponse; use reth_primitives::{Block, BlockBody, SealedBlock, SealedHeader, B256}; use std::collections::HashMap; @@ -45,13 +45,11 @@ pub(crate) fn create_raw_bodies<'a>( #[inline] pub(crate) fn insert_headers(db: &DatabaseEnv, headers: &[SealedHeader]) { - db.update(|tx| -> Result<(), db::DatabaseError> { + db.update(|tx| { for header in headers { - tx.put::(header.number, header.hash())?; - tx.put::(header.number, header.clone().unseal())?; + tx.put::(header.number, header.hash()).unwrap(); + tx.put::(header.number, header.clone().unseal()).unwrap(); } - Ok(()) }) .expect("failed to commit") - .expect("failed to insert headers"); } diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 6dc07c0da91a3..9411099e8557e 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,7 +1,7 @@ use super::file_codec::BlockFileCodec; use futures::Future; use itertools::Either; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, error::RequestError, @@ -483,7 +483,7 @@ mod tests { use futures_util::stream::StreamExt; use rand::Rng; use reth_consensus::test_utils::TestConsensus; - use reth_interfaces::p2p::{ + use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs index 8127cc2324304..093d87f8fef41 100644 --- a/crates/net/downloaders/src/headers/noop.rs +++ b/crates/net/downloaders/src/headers/noop.rs @@ -1,5 +1,5 @@ use futures::Stream; -use reth_interfaces::p2p::headers::{ +use reth_network_p2p::headers::{ downloader::{HeaderDownloader, SyncTarget}, error::HeadersDownloaderError, }; diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index a5cdb145b01ae..2889eb84aa07c 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -7,7 +7,7 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; use reth_config::config::HeadersConfig; use reth_consensus::Consensus; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ client::{HeadersClient, HeadersRequest}, @@ -1220,11 +1220,10 @@ fn calc_next_request( #[cfg(test)] mod tests { use super::*; - use crate::headers::test_utils::child_header; use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; - use reth_interfaces::test_utils::TestHeadersClient; + use reth_network_p2p::test_utils::TestHeadersClient; /// Tests that `replace_number` works the same way as Option::replace #[test] diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index aa079dad26235..1d99c3b75f99d 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -1,7 +1,7 @@ use futures::{FutureExt, Stream}; use futures_util::StreamExt; use pin_project::pin_project; -use reth_interfaces::p2p::headers::{ +use reth_network_p2p::headers::{ downloader::{HeaderDownloader, SyncTarget}, error::HeadersDownloaderResult, }; @@ -45,7 +45,7 @@ impl TaskDownloader { /// # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloader; /// # use reth_downloaders::headers::task::TaskDownloader; /// # use reth_consensus::Consensus; - /// # use reth_interfaces::p2p::headers::client::HeadersClient; + /// # use reth_network_p2p::headers::client::HeadersClient; /// # fn t(consensus:Arc, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( /// client, @@ -184,7 +184,7 @@ mod tests { reverse_headers::ReverseHeadersDownloaderBuilder, test_utils::child_header, }; use reth_consensus::test_utils::TestConsensus; - use reth_interfaces::test_utils::TestHeadersClient; + use reth_network_p2p::test_utils::TestHeadersClient; use std::sync::Arc; #[tokio::test(flavor = "multi_thread")] diff --git a/crates/net/downloaders/src/metrics.rs b/crates/net/downloaders/src/metrics.rs index 629243ee66b45..5f705ea48e631 100644 --- a/crates/net/downloaders/src/metrics.rs +++ b/crates/net/downloaders/src/metrics.rs @@ -1,15 +1,15 @@ -use reth_interfaces::p2p::error::DownloadError; use reth_metrics::{ metrics::{Counter, Gauge}, Metrics, }; +use reth_network_p2p::error::DownloadError; /// Common body downloader metrics. /// /// These metrics will be initialized with the `downloaders.bodies` scope. /// ``` /// use reth_downloaders::metrics::BodyDownloaderMetrics; -/// use reth_interfaces::p2p::error::DownloadError; +/// use reth_network_p2p::error::DownloadError; /// /// // Initialize metrics. /// let metrics = BodyDownloaderMetrics::default(); @@ -80,7 +80,7 @@ pub struct ResponseMetrics { /// These metrics will be initialized with the `downloaders.headers` scope. /// ``` /// use reth_downloaders::metrics::HeaderDownloaderMetrics; -/// use reth_interfaces::p2p::error::DownloadError; +/// use reth_network_p2p::error::DownloadError; /// /// // Initialize metrics. /// let metrics = HeaderDownloaderMetrics::default(); diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index a7387fa88f22d..9200b4f945ce2 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -1,4 +1,4 @@ -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, priority::Priority, diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index 97e30a02dd8b9..fdbcfea23013d 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -4,8 +4,8 @@ use crate::{bodies::test_utils::create_raw_bodies, file_codec::BlockFileCodec}; use futures::SinkExt; -use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{BlockBody, SealedHeader, B256}; +use reth_testing_utils::{generators, generators::random_block_range}; use std::{collections::HashMap, io::SeekFrom, ops::RangeInclusive}; use tokio::{fs::File, io::AsyncSeekExt}; use tokio_util::codec::FramedWrite; From 03c3d5182f6dff6b902e55fbf1ec6a681ec3d8c8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 18:35:32 +0200 Subject: [PATCH 663/700] chore: rm reth-interfaces from node-builder (#8438) --- Cargo.lock | 2 +- crates/node/builder/Cargo.toml | 2 +- crates/node/builder/src/launch/common.rs | 8 +++----- crates/node/builder/src/setup.rs | 2 +- 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 32f4cadd7d94e..9571e1083ef93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7378,8 +7378,8 @@ dependencies = [ "reth-downloaders", "reth-evm", "reth-exex", - "reth-interfaces", "reth-network", + "reth-network-p2p", "reth-node-api", "reth-node-core", "reth-node-events", diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 55b0094a63163..b3297f4859a01 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -32,7 +32,7 @@ reth-payload-builder.workspace = true reth-transaction-pool.workspace = true reth-tasks.workspace = true reth-tracing.workspace = true -reth-interfaces.workspace = true +reth-network-p2p.workspace = true reth-static-file.workspace = true reth-prune.workspace = true reth-stages.workspace = true diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index b6b0a03c83f71..5b66362878dd3 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -1,16 +1,12 @@ //! Helper types that can be used by launchers. -use std::{cmp::max, sync::Arc, thread::available_parallelism}; - use eyre::Context; use rayon::ThreadPoolBuilder; -use tokio::sync::mpsc::Receiver; - use reth_auto_seal_consensus::MiningMode; use reth_config::{config::EtlConfig, PruneConfig}; use reth_db::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitDatabaseError}; -use reth_interfaces::p2p::headers::client::HeadersClient; +use reth_network_p2p::headers::client::HeadersClient; use reth_node_core::{ cli::config::RethRpcConfig, dirs::{ChainPath, DataDirPath}, @@ -23,6 +19,8 @@ use reth_rpc_layer::JwtSecret; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{error, info, warn}; +use std::{cmp::max, sync::Arc, thread::available_parallelism}; +use tokio::sync::mpsc::Receiver; /// Reusable setup for launching a node. /// diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 3314891fe9695..cce709e84d752 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -9,7 +9,7 @@ use reth_downloaders::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::{client::BodiesClient, downloader::BodyDownloader}, headers::{client::HeadersClient, downloader::HeaderDownloader}, }; From d78c5b71fd4a2cf26d2a20a4f595865597e2bd02 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 18:47:58 +0200 Subject: [PATCH 664/700] chore: extract retherror to reth-errors (#8439) --- Cargo.lock | 16 ++++++++++++--- Cargo.toml | 2 ++ crates/errors/Cargo.toml | 21 +++++++++++++++++++ crates/{interfaces => errors}/src/error.rs | 2 +- crates/errors/src/lib.rs | 24 ++++++++++++++++++++++ crates/interfaces/Cargo.toml | 8 ++------ crates/interfaces/src/lib.rs | 3 +-- 7 files changed, 64 insertions(+), 12 deletions(-) create mode 100644 crates/errors/Cargo.toml rename crates/{interfaces => errors}/src/error.rs (97%) create mode 100644 crates/errors/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 9571e1083ef93..46a14684c08ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6887,6 +6887,18 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-errors" +version = "0.2.0-beta.7" +dependencies = [ + "reth-blockchain-tree-api", + "reth-consensus", + "reth-execution-errors", + "reth-fs-util", + "reth-storage-errors", + "thiserror", +] + [[package]] name = "reth-eth-wire" version = "0.2.0-beta.7" @@ -7107,13 +7119,11 @@ name = "reth-interfaces" version = "0.2.0-beta.7" dependencies = [ "reth-blockchain-tree-api", - "reth-consensus", + "reth-errors", "reth-execution-errors", - "reth-fs-util", "reth-network-p2p", "reth-storage-errors", "reth-testing-utils", - "thiserror", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 4b34cbb5cb8a7..4726b63e07390 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "crates/consensus/consensus/", "crates/e2e-test-utils/", "crates/engine-primitives/", + "crates/errors/", "crates/ethereum-forks/", "crates/ethereum/consensus/", "crates/ethereum/engine-primitives/", @@ -236,6 +237,7 @@ reth-downloaders = { path = "crates/net/downloaders" } reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-ecies = { path = "crates/net/ecies" } reth-engine-primitives = { path = "crates/engine-primitives" } +reth-errors = { path = "crates/errors" } reth-eth-wire = { path = "crates/net/eth-wire" } reth-eth-wire-types = { path = "crates/net/eth-wire-types" } reth-ethereum-consensus = { path = "crates/ethereum/consensus" } diff --git a/crates/errors/Cargo.toml b/crates/errors/Cargo.toml new file mode 100644 index 0000000000000..bb56a8bace560 --- /dev/null +++ b/crates/errors/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "reth-errors" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-blockchain-tree-api.workspace = true +reth-consensus.workspace = true +reth-execution-errors.workspace = true +reth-fs-util.workspace = true +reth-storage-errors.workspace = true + +# misc +thiserror.workspace = true diff --git a/crates/interfaces/src/error.rs b/crates/errors/src/error.rs similarity index 97% rename from crates/interfaces/src/error.rs rename to crates/errors/src/error.rs index ddb4e151f4661..4017be351aa9d 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/errors/src/error.rs @@ -1,4 +1,4 @@ -use crate::blockchain_tree::error::{BlockchainTreeError, CanonicalError}; +use reth_blockchain_tree_api::error::{BlockchainTreeError, CanonicalError}; use reth_consensus::ConsensusError; use reth_execution_errors::BlockExecutionError; use reth_fs_util::FsPathError; diff --git a/crates/errors/src/lib.rs b/crates/errors/src/lib.rs new file mode 100644 index 0000000000000..4b8b96fba4956 --- /dev/null +++ b/crates/errors/src/lib.rs @@ -0,0 +1,24 @@ +//! High level error types for the reth in general. +//! +//! ## Feature Flags +//! +//! - `test-utils`: Export utilities for testing + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod error; +pub use error::{RethError, RethResult}; + +pub use reth_blockchain_tree_api::error::{BlockchainTreeError, CanonicalError}; +pub use reth_consensus::ConsensusError; +pub use reth_execution_errors::BlockExecutionError; +pub use reth_storage_errors::{ + db::DatabaseError, + provider::{ProviderError, ProviderResult}, +}; diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 1c07af335ccd6..b12b84a71b6e3 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -12,17 +12,13 @@ workspace = true [dependencies] reth-blockchain-tree-api.workspace = true -reth-consensus.workspace = true reth-execution-errors.workspace = true -reth-fs-util.workspace = true reth-network-p2p.workspace = true reth-storage-errors.workspace = true +reth-errors.workspace = true reth-testing-utils = { workspace = true, optional = true } -# misc -thiserror.workspace = true - [features] -test-utils = ["reth-consensus/test-utils", "reth-network-p2p/test-utils", "reth-testing-utils"] +test-utils = ["reth-network-p2p/test-utils", "reth-testing-utils"] clap = ["reth-storage-errors/clap"] \ No newline at end of file diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index 0a649e557a26e..f056e3c92cb83 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -19,8 +19,7 @@ pub use reth_storage_errors::{db, provider}; pub use reth_execution_errors as executor; /// Possible errors when interacting with the chain. -mod error; -pub use error::{RethError, RethResult}; +pub use reth_errors::{RethError, RethResult}; /// P2P traits. pub use reth_network_p2p as p2p; From 3e7f1377ed4480593db8a169f35643e626e45386 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 28 May 2024 18:53:43 +0200 Subject: [PATCH 665/700] fix(op): filter out receipts for dup txns (#8400) --- Cargo.lock | 6 + Cargo.toml | 2 + bin/reth/Cargo.toml | 1 + bin/reth/src/cli/mod.rs | 18 +- bin/reth/src/commands/import.rs | 2 +- bin/reth/src/commands/import_op.rs | 8 +- bin/reth/src/commands/import_receipts.rs | 168 ------------- bin/reth/src/commands/import_receipts_op.rs | 230 ++++++++++++++++++ bin/reth/src/commands/mod.rs | 2 +- book/SUMMARY.md | 1 - book/cli/SUMMARY.md | 1 - book/cli/op-reth.md | 96 ++++++++ book/cli/reth.md | 27 +- book/cli/reth/import-op.md | 134 ++++++++++ ...port-receipts.md => import-receipts-op.md} | 35 +-- book/cli/reth/import.md | 2 +- book/run/sync-op-mainnet.md | 10 +- crates/consensus/common/Cargo.toml | 1 + crates/consensus/common/src/validation.rs | 2 +- .../downloaders/src/receipt_file_client.rs | 13 +- crates/optimism/primitives/Cargo.toml | 12 + .../primitives/src/bedrock_import.rs} | 2 +- crates/optimism/primitives/src/lib.rs | 10 + crates/primitives/src/lib.rs | 1 - 24 files changed, 552 insertions(+), 232 deletions(-) delete mode 100644 bin/reth/src/commands/import_receipts.rs create mode 100644 bin/reth/src/commands/import_receipts_op.rs create mode 100644 book/cli/op-reth.md create mode 100644 book/cli/reth/import-op.md rename book/cli/reth/{import-receipts.md => import-receipts-op.md} (78%) create mode 100644 crates/optimism/primitives/Cargo.toml rename crates/{primitives/src/op_mainnet.rs => optimism/primitives/src/bedrock_import.rs} (94%) create mode 100644 crates/optimism/primitives/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 46a14684c08ff..bd3da5f3fad82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6423,6 +6423,7 @@ dependencies = [ "reth-node-ethereum", "reth-node-events", "reth-node-optimism", + "reth-optimism-primitives", "reth-payload-builder", "reth-payload-validator", "reth-primitives", @@ -6645,6 +6646,7 @@ dependencies = [ "mockall", "reth-consensus", "reth-interfaces", + "reth-optimism-primitives", "reth-primitives", "reth-provider", ] @@ -7593,6 +7595,10 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-optimism-primitives" +version = "0.2.0-beta.7" + [[package]] name = "reth-payload-builder" version = "0.2.0-beta.7" diff --git a/Cargo.toml b/Cargo.toml index 4726b63e07390..946682343d177 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,7 @@ members = [ "crates/optimism/evm/", "crates/optimism/node/", "crates/optimism/payload/", + "crates/optimism/primitives/", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/ethereum/", @@ -273,6 +274,7 @@ reth-node-events = { path = "crates/node/events" } reth-node-optimism = { path = "crates/optimism/node" } reth-optimism-consensus = { path = "crates/optimism/consensus" } reth-optimism-payload-builder = { path = "crates/optimism/payload" } +reth-optimism-primitives = { path = "crates/optimism/primitives" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-validator = { path = "crates/payload/validator" } reth-primitives = { path = "crates/primitives" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ab1e9927adade..35c5c814aa55e 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -58,6 +58,7 @@ reth-db-common.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true reth-consensus.workspace = true +reth-optimism-primitives.workspace = true # crypto alloy-rlp.workspace = true diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index deece5b62e56f..4bf413acd9013 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -2,13 +2,15 @@ #[cfg(feature = "optimism")] use crate::commands::import_op; +#[cfg(feature = "optimism")] +use crate::commands::import_receipts_op; use crate::{ args::{ utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, import_receipts, init_cmd, init_state, + config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, node::{self, NoArgs}, p2p, recover, stage, test_vectors, }, @@ -150,11 +152,12 @@ impl Cli { Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::ImportReceipts(command) => { - runner.run_blocking_until_ctrl_c(command.execute()) - } #[cfg(feature = "optimism")] Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), + #[cfg(feature = "optimism")] + Commands::ImportReceiptsOp(command) => { + runner.run_blocking_until_ctrl_c(command.execute()) + } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Stage(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), @@ -191,13 +194,14 @@ pub enum Commands { /// This syncs RLP encoded blocks from a file. #[command(name = "import")] Import(import::ImportCommand), - /// This imports RLP encoded receipts from a file. - #[command(name = "import-receipts")] - ImportReceipts(import_receipts::ImportReceiptsCommand), /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. #[cfg(feature = "optimism")] #[command(name = "import-op")] ImportOp(import_op::ImportOpCommand), + /// This imports RLP encoded receipts from a file. + #[cfg(feature = "optimism")] + #[command(name = "import-receipts-op")] + ImportReceiptsOp(import_receipts_op::ImportReceiptsOpCommand), /// Dumps genesis block JSON configuration to stdout. DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 70a2c339cada1..71268fa8e1bbb 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -71,7 +71,7 @@ pub struct ImportCommand { #[arg(long, verbatim_doc_comment)] no_state: bool, - /// Chunk byte length. + /// Chunk byte length to read from file. #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] chunk_len: Option, diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index a85fc4e3dcd39..a1b23bda82e8b 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -18,7 +18,8 @@ use reth_db_common::init::init_genesis; use reth_downloaders::file_client::{ ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, }; -use reth_primitives::{op_mainnet::is_dup_tx, stage::StageId, PruneModes}; +use reth_optimism_primitives::bedrock_import::is_dup_tx; +use reth_primitives::{stage::StageId, PruneModes}; use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -41,7 +42,7 @@ pub struct ImportOpCommand { #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] datadir: MaybePlatformPath, - /// Chunk byte length. + /// Chunk byte length to read from file. #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] chunk_len: Option, @@ -187,6 +188,9 @@ impl ImportOpCommand { info!(target: "reth::cli", total_imported_blocks, total_imported_txns, + total_decoded_blocks, + total_decoded_txns, + total_filtered_out_dup_txns, "Chain file imported" ); diff --git a/bin/reth/src/commands/import_receipts.rs b/bin/reth/src/commands/import_receipts.rs deleted file mode 100644 index 018ff132b94b4..0000000000000 --- a/bin/reth/src/commands/import_receipts.rs +++ /dev/null @@ -1,168 +0,0 @@ -//! Command that imports receipts from a file. - -use crate::{ - args::{ - utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, - }, - dirs::{DataDirPath, MaybePlatformPath}, -}; -use clap::Parser; -use reth_db::{database::Database, init_db, transaction::DbTx, DatabaseEnv}; -use reth_downloaders::{ - file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, - receipt_file_client::ReceiptFileClient, -}; -use reth_node_core::version::SHORT_VERSION; -use reth_primitives::{stage::StageId, ChainSpec, StaticFileSegment}; -use reth_provider::{ - BundleStateWithReceipts, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, - StateWriter, StaticFileProviderFactory, StaticFileWriter, -}; -use tracing::{debug, error, info, trace}; - -use std::{path::PathBuf, sync::Arc}; - -/// Initializes the database with the genesis block. -#[derive(Debug, Parser)] -pub struct ImportReceiptsCommand { - /// The path to the data dir for all reth files and subdirectories. - /// - /// Defaults to the OS-specific data directory: - /// - /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - /// - Windows: `{FOLDERID_RoamingAppData}/reth/` - /// - macOS: `$HOME/Library/Application Support/reth/` - #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] - datadir: MaybePlatformPath, - - /// The chain this node is running. - /// - /// Possible values are either a built-in chain or the path to a chain specification file. - #[arg( - long, - value_name = "CHAIN_OR_PATH", - long_help = chain_help(), - default_value = SUPPORTED_CHAINS[0], - value_parser = genesis_value_parser - )] - chain: Arc, - - /// Chunk byte length. - #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] - chunk_len: Option, - - #[command(flatten)] - db: DatabaseArgs, - - /// The path to a receipts file for import. File must use `HackReceiptCodec` (used for - /// exporting OP chain segment below Bedrock block via testinprod/op-geth). - /// - /// - #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] - path: PathBuf, -} - -impl ImportReceiptsCommand { - /// Execute `import` command - pub async fn execute(self) -> eyre::Result<()> { - info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); - - debug!(target: "reth::cli", - chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), - "Chunking receipts import" - ); - - // add network name to data dir - let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - - let db_path = data_dir.db(); - info!(target: "reth::cli", path = ?db_path, "Opening database"); - - let db = Arc::new(init_db(db_path, self.db.database_args())?); - info!(target: "reth::cli", "Database opened"); - let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; - - let provider = provider_factory.provider_rw()?; - let static_file_provider = provider_factory.static_file_provider(); - - for stage in StageId::ALL { - let checkpoint = provider.get_stage_checkpoint(stage)?; - trace!(target: "reth::cli", - ?stage, - ?checkpoint, - "Read stage checkpoints from db" - ); - } - - // prepare the tx for `write_to_storage` - let tx = provider.into_tx(); - let mut total_decoded_receipts = 0; - - // open file - let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; - - while let Some(file_client) = reader.next_chunk::().await? { - // create a new file client from chunk read from file - let ReceiptFileClient { receipts, first_block, total_receipts: total_receipts_chunk } = - file_client; - - // mark these as decoded - total_decoded_receipts += total_receipts_chunk; - - info!(target: "reth::cli", - first_receipts_block=?first_block, - total_receipts_chunk, - "Importing receipt file chunk" - ); - - // We're reusing receipt writing code internal to - // `BundleStateWithReceipts::write_to_storage`, so we just use a default empty - // `BundleState`. - let bundled_state = - BundleStateWithReceipts::new(Default::default(), receipts, first_block); - - let static_file_producer = - static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?; - - // finally, write the receipts - bundled_state.write_to_storage::<::TXMut>( - &tx, - Some(static_file_producer), - OriginalValuesKnown::Yes, - )?; - } - - tx.commit()?; - // as static files works in file ranges, internally it will be committing when creating the - // next file range already, so we only need to call explicitly at the end. - static_file_provider.commit()?; - - if total_decoded_receipts == 0 { - error!(target: "reth::cli", "No receipts were imported, ensure the receipt file is valid and not empty"); - return Ok(()) - } - - // compare the highest static file block to the number of receipts we decoded - // - // `HeaderNumbers` and `TransactionHashNumbers` tables serve as additional indexes, but - // nothing like this needs to exist for Receipts. So `tx.entries::` would - // return zero here. - let total_imported_receipts = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Receipts) - .expect("static files must exist after ensuring we decoded more than zero"); - - if total_imported_receipts != total_decoded_receipts as u64 { - error!(target: "reth::cli", - total_decoded_receipts, - total_imported_receipts, - "Receipts were partially imported" - ); - } - - info!(target: "reth::cli", total_imported_receipts, "Receipt file imported"); - - Ok(()) - } -} diff --git a/bin/reth/src/commands/import_receipts_op.rs b/bin/reth/src/commands/import_receipts_op.rs new file mode 100644 index 0000000000000..44c79cd2fe6a2 --- /dev/null +++ b/bin/reth/src/commands/import_receipts_op.rs @@ -0,0 +1,230 @@ +//! Command that imports OP mainnet receipts from Bedrock datadir, exported via +//! . + +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; + +use clap::Parser; +use reth_db::{database::Database, init_db, tables, transaction::DbTx}; +use reth_downloaders::{ + file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, + receipt_file_client::ReceiptFileClient, +}; +use reth_node_core::version::SHORT_VERSION; +use reth_optimism_primitives::bedrock_import::is_dup_tx; +use reth_primitives::{stage::StageId, Receipts, StaticFileSegment}; +use reth_provider::{ + BundleStateWithReceipts, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader, +}; +use tracing::{debug, error, info, trace}; + +use crate::{ + args::{ + utils::{genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, +}; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct ImportReceiptsOpCommand { + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// Chunk byte length to read from file. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + + #[command(flatten)] + db: DatabaseArgs, + + /// The path to a receipts file for import. File must use `HackReceiptFileCodec` (used for + /// exporting OP chain segment below Bedrock block via testinprod/op-geth). + /// + /// + #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] + path: PathBuf, +} + +impl ImportReceiptsOpCommand { + /// Execute `import` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); + + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking receipts import" + ); + + let chain_spec = genesis_value_parser(SUPPORTED_CHAINS[0])?; + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(chain_spec.chain); + + let db_path = data_dir.db(); + info!(target: "reth::cli", path = ?db_path, "Opening database"); + + let db = Arc::new(init_db(db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); + let provider_factory = + ProviderFactory::new(db.clone(), chain_spec.clone(), data_dir.static_files())?; + + import_receipts_from_file( + provider_factory, + self.path, + self.chunk_len, + |first_block, receipts: &mut Receipts| { + let mut total_filtered_out_dup_txns = 0; + for (index, receipts_for_block) in receipts.iter_mut().enumerate() { + if is_dup_tx(first_block + index as u64) { + receipts_for_block.clear(); + total_filtered_out_dup_txns += 1; + } + } + + total_filtered_out_dup_txns + }, + ) + .await + } +} + +/// Imports receipts to static files. Takes a filter callback as parameter, that returns the total +/// number of filtered out receipts. +/// +/// Caution! Filter callback must replace completely filtered out receipts for a block, with empty +/// vectors, rather than `vec!(None)`. This is since the code for writing to static files, expects +/// indices in the [`Receipts`] list, to map to sequential block numbers. +pub async fn import_receipts_from_file( + provider_factory: ProviderFactory, + path: P, + chunk_len: Option, + mut filter: F, +) -> eyre::Result<()> +where + DB: Database, + P: AsRef, + F: FnMut(u64, &mut Receipts) -> usize, +{ + let provider = provider_factory.provider_rw()?; + let static_file_provider = provider_factory.static_file_provider(); + + let total_imported_txns = static_file_provider + .count_entries::() + .expect("transaction static files must exist before importing receipts"); + let highest_block_transactions = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Transactions) + .expect("transaction static files must exist before importing receipts"); + + for stage in StageId::ALL { + let checkpoint = provider.get_stage_checkpoint(stage)?; + trace!(target: "reth::cli", + ?stage, + ?checkpoint, + "Read stage checkpoints from db" + ); + } + + // prepare the tx for `write_to_storage` + let tx = provider.into_tx(); + let mut total_decoded_receipts = 0; + let mut total_filtered_out_dup_txns = 0; + + // open file + let mut reader = ChunkedFileReader::new(path, chunk_len).await?; + + while let Some(file_client) = reader.next_chunk::().await? { + // create a new file client from chunk read from file + let ReceiptFileClient { mut receipts, first_block, total_receipts: total_receipts_chunk } = + file_client; + + // mark these as decoded + total_decoded_receipts += total_receipts_chunk; + + total_filtered_out_dup_txns += filter(first_block, &mut receipts); + + info!(target: "reth::cli", + first_receipts_block=?first_block, + total_receipts_chunk, + "Importing receipt file chunk" + ); + + // We're reusing receipt writing code internal to + // `BundleStateWithReceipts::write_to_storage`, so we just use a default empty + // `BundleState`. + let bundled_state = BundleStateWithReceipts::new(Default::default(), receipts, first_block); + + let static_file_producer = + static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?; + + // finally, write the receipts + bundled_state.write_to_storage::( + &tx, + Some(static_file_producer), + OriginalValuesKnown::Yes, + )?; + } + + tx.commit()?; + // as static files works in file ranges, internally it will be committing when creating the + // next file range already, so we only need to call explicitly at the end. + static_file_provider.commit()?; + + if total_decoded_receipts == 0 { + error!(target: "reth::cli", "No receipts were imported, ensure the receipt file is valid and not empty"); + return Ok(()) + } + + let total_imported_receipts = static_file_provider + .count_entries::() + .expect("static files must exist after ensuring we decoded more than zero"); + + if total_imported_receipts + total_filtered_out_dup_txns != total_decoded_receipts { + error!(target: "reth::cli", + total_decoded_receipts, + total_imported_receipts, + total_filtered_out_dup_txns, + "Receipts were partially imported" + ); + } + + if total_imported_receipts != total_imported_txns { + error!(target: "reth::cli", + total_imported_receipts, + total_imported_txns, + "Receipts inconsistent with transactions" + ); + } + + let highest_block_receipts = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Receipts) + .expect("static files must exist after ensuring we decoded more than zero"); + + if highest_block_receipts != highest_block_transactions { + error!(target: "reth::cli", + highest_block_receipts, + highest_block_transactions, + "Height of receipts inconsistent with transactions" + ); + } + + info!(target: "reth::cli", + total_imported_receipts, + total_decoded_receipts, + total_filtered_out_dup_txns, + "Receipt file imported" + ); + + Ok(()) +} diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index 9e6ff8f840247..789724e0d784f 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -6,7 +6,7 @@ pub mod debug_cmd; pub mod dump_genesis; pub mod import; pub mod import_op; -pub mod import_receipts; +pub mod import_receipts_op; pub mod init_cmd; pub mod init_state; diff --git a/book/SUMMARY.md b/book/SUMMARY.md index eaa7210cff118..fc6deb28295aa 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -32,7 +32,6 @@ - [`reth init`](./cli/reth/init.md) - [`reth init-state`](./cli/reth/init-state.md) - [`reth import`](./cli/reth/import.md) - - [`reth import-receipts`](./cli/reth/import-receipts.md) - [`reth dump-genesis`](./cli/reth/dump-genesis.md) - [`reth db`](./cli/reth/db.md) - [`reth db stats`](./cli/reth/db/stats.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index 8c8ea2f42ccb2..ee3d714b2bb5f 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -3,7 +3,6 @@ - [`reth init`](./reth/init.md) - [`reth init-state`](./reth/init-state.md) - [`reth import`](./reth/import.md) - - [`reth import-receipts`](./reth/import-receipts.md) - [`reth dump-genesis`](./reth/dump-genesis.md) - [`reth db`](./reth/db.md) - [`reth db stats`](./reth/db/stats.md) diff --git a/book/cli/op-reth.md b/book/cli/op-reth.md new file mode 100644 index 0000000000000..2b56fa662c21e --- /dev/null +++ b/book/cli/op-reth.md @@ -0,0 +1,96 @@ +# op-reth + +Additional op-reth commands. + +```bash +$ op-reth --help +Usage: op-reth [OPTIONS] + +Commands: + import-op Imports the Bedrock datadir blocks from a file + import-receipts-op Imports the Bedrock datadir receipts from a file + +Options: + -h, --help + Print help (see a summary with '-h') + + -V, --version + Print version + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth.md b/book/cli/reth.md index 2e3ebef31d75b..ae17281718eea 100644 --- a/book/cli/reth.md +++ b/book/cli/reth.md @@ -7,20 +7,19 @@ $ reth --help Usage: reth [OPTIONS] Commands: - node Start the node - init Initialize the database from a genesis file - init-state Initialize the database from a state dump file - import This syncs RLP encoded blocks from a file - import-receipts This imports RLP encoded receipts from a file - dump-genesis Dumps genesis block JSON configuration to stdout - db Database debugging utilities - stage Manipulate individual stages - p2p P2P Debugging utilities - test-vectors Generate Test Vectors - config Write config to stdout - debug Various debug routines - recover Scripts for node recovery - help Print this message or the help of the given subcommand(s) + node Start the node + init Initialize the database from a genesis file + init-state Initialize the database from a state dump file + import This syncs RLP encoded blocks from a file + dump-genesis Dumps genesis block JSON configuration to stdout + db Database debugging utilities + stage Manipulate individual stages + p2p P2P Debugging utilities + test-vectors Generate Test Vectors + config Write config to stdout + debug Various debug routines + recover Scripts for node recovery + help Print this message or the help of the given subcommand(s) Options: --chain diff --git a/book/cli/reth/import-op.md b/book/cli/reth/import-op.md new file mode 100644 index 0000000000000..d2d81980ce33c --- /dev/null +++ b/book/cli/reth/import-op.md @@ -0,0 +1,134 @@ +# op-reth import + +This syncs RLP encoded blocks from a file. Supports import of OVM blocks +from the Bedrock datadir. Requires blocks, up to same height as receipts +file, to already be imported. + +```bash +$ op-reth import-op --help +Usage: op-reth import-op [OPTIONS] + +Options: + --config + The path to the configuration file to use. + + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chunk-len + Chunk byte length to read from file. + + [default: 1GB] + + -h, --help + Print help (see a summary with '-h') + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + + The path to a `.rlp` block file for import. + + The online sync pipeline stages (headers and bodies) are replaced by a file import. Skips block execution since blocks below Bedrock are built on OVM. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/import-receipts.md b/book/cli/reth/import-receipts-op.md similarity index 78% rename from book/cli/reth/import-receipts.md rename to book/cli/reth/import-receipts-op.md index 7cea21d791956..a5e1863cea207 100644 --- a/book/cli/reth/import-receipts.md +++ b/book/cli/reth/import-receipts-op.md @@ -1,10 +1,13 @@ -# reth import-receipts +# op-reth import-receipts-op -This imports RLP encoded receipts from a file +This imports non-standard RLP encoded receipts from a file. +The supported RLP encoding, is the non-standard encoding used +for receipt export in . +Supports import of OVM receipts from the Bedrock datadir. ```bash -$ reth import-receipts --help -Usage: reth import-receipts [OPTIONS] +$ op-reth import-receipts-op --help +Usage: op-reth import-receipts-op [OPTIONS] Options: --datadir @@ -18,28 +21,10 @@ Options: [default: default] - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, goerli, holesky, dev - - [default: mainnet] - --chunk-len - Chunk byte length. - - --instance - Add a new instance of a node. - - Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - - Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Chunk byte length to read from file. - [default: 1] + [default: 1GB] -h, --help Print help (see a summary with '-h') @@ -64,7 +49,7 @@ Database: [possible values: true, false] - The path to a receipts file for import. File must use `HackReceiptCodec` (used for + The path to a receipts file for import. File must use `HackReceiptFileCodec` (used for exporting OP chain segment below Bedrock block via testinprod/op-geth). diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 8493a88f23829..9c320d0b684df 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -34,7 +34,7 @@ Options: Disables stages that require state. --chunk-len - Chunk byte length. + Chunk byte length to read from file. --instance Add a new instance of a node. diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md index b50a32fb4b7a1..8c54aa87757aa 100644 --- a/book/run/sync-op-mainnet.md +++ b/book/run/sync-op-mainnet.md @@ -22,7 +22,7 @@ Output from running the command to export state, can also be downloaded from ). +Import of >100 million OVM blocks, from genesis to Bedrock, completes in 6 hours. ```bash ./op-reth import-op @@ -36,14 +36,18 @@ corresponding transactions must already be imported (see [step 1](#1-import-bloc Imports a `.rlp` file of receipts, that has been exported with command specified in (command for exporting receipts uses custom RLP-encoding). +Import of >100 million OVM receipts, from genesis to Bedrock, completes in 30 minutes. + ```bash -./op-reth import-receipts --chain optimism +./op-reth import-receipts-op ``` ### 3. Import State Imports a `.jsonl` state dump. The block at which the state dump is made, must be the latest block in -reth's database. +reth's database. This should be block 105 235 063, the first Bedrock block (see [step 1](#1-import-blocks)). + +Import of >4 million OP mainnet accounts at Bedrock, completes in 10 minutes. ```bash ./op-reth init-state --chain optimism diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index accf2d08e0e88..b18b4dcf6ddbc 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-optimism-primitives.workspace = true reth-consensus.workspace=true [dev-dependencies] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index ad1e11643d8a0..74c515c204b4e 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,12 +1,12 @@ //! Collection of methods for block validation. use reth_consensus::ConsensusError; +use reth_optimism_primitives::bedrock_import::is_dup_tx; use reth_primitives::{ constants::{ eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, MAXIMUM_EXTRA_DATA_SIZE, }, - op_mainnet::is_dup_tx, ChainSpec, GotExpected, Hardfork, Header, SealedBlock, SealedHeader, }; diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs index 0eaa4ff1b7763..b7231889c6e44 100644 --- a/crates/net/downloaders/src/receipt_file_client.rs +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -75,10 +75,10 @@ impl FromReader for ReceiptFileClient { Err(err) => return Err(err), }; - total_receipts += 1; - match receipt { Some(ReceiptWithBlockNumber { receipt, number }) => { + total_receipts += 1; + if first_block.is_none() { first_block = Some(number); block_number = number; @@ -202,7 +202,8 @@ mod test { let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); - assert_eq!(4, total_receipts); + // 2 non-empty receipt objects + assert_eq!(2, total_receipts); assert_eq!(0, first_block); assert!(receipts[0].is_empty()); assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); @@ -229,7 +230,8 @@ mod test { let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); - assert_eq!(4, total_receipts); + // 2 non-empty receipt objects + assert_eq!(2, total_receipts); assert_eq!(0, first_block); assert!(receipts[0].is_empty()); assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); @@ -257,7 +259,8 @@ mod test { let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); - assert_eq!(5, total_receipts); + // 4 non-empty receipt objects + assert_eq!(4, total_receipts); assert_eq!(0, first_block); assert!(receipts[0].is_empty()); assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml new file mode 100644 index 0000000000000..0acd2f1ebdca7 --- /dev/null +++ b/crates/optimism/primitives/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "reth-optimism-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "OP primitive types" + +[lints] +workspace = true \ No newline at end of file diff --git a/crates/primitives/src/op_mainnet.rs b/crates/optimism/primitives/src/bedrock_import.rs similarity index 94% rename from crates/primitives/src/op_mainnet.rs rename to crates/optimism/primitives/src/bedrock_import.rs index c60504e92b4c3..17020f9f2b902 100644 --- a/crates/primitives/src/op_mainnet.rs +++ b/crates/optimism/primitives/src/bedrock_import.rs @@ -1,4 +1,4 @@ -//! Helpers for working with replayed OP mainnet OVM transactions (in blocks below Bedrock). +//! Replayed OP mainnet OVM transactions (in blocks below Bedrock). /// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, /// replayed in blocks: diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs new file mode 100644 index 0000000000000..5cdb53def7fc6 --- /dev/null +++ b/crates/optimism/primitives/src/lib.rs @@ -0,0 +1,10 @@ +//! Standalone crate for Optimism-specific Reth primitive types. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod bedrock_import; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 7681ddb9b2874..d3ea340377c55 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -35,7 +35,6 @@ mod header; mod integer_list; mod log; mod net; -pub mod op_mainnet; pub mod proofs; mod prune; mod receipt; From 409c15dde45d4256537b231ca833fddd79dafdfd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 19:05:47 +0200 Subject: [PATCH 666/700] chore: rm reth error variant (#8440) --- .../beacon/src/engine/hooks/prune.rs | 1 - crates/prune/src/error.rs | 5 +---- crates/prune/src/segments/mod.rs | 20 +++++++++---------- 3 files changed, 10 insertions(+), 16 deletions(-) diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index d2c2e2d33a1e9..ea9078aca919e 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -169,7 +169,6 @@ impl From for EngineHookError { PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { EngineHookError::Internal(Box::new(err)) } - PrunerError::Interface(err) => err.into(), PrunerError::Database(err) => RethError::Database(err).into(), PrunerError::Provider(err) => RethError::Provider(err).into(), } diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index 49333b4db437c..348b12f674c20 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -4,6 +4,7 @@ use reth_primitives::PruneSegmentError; use reth_provider::ProviderError; use thiserror::Error; +/// Errors that can occur during pruning. #[derive(Error, Debug)] pub enum PrunerError { #[error(transparent)] @@ -12,9 +13,6 @@ pub enum PrunerError { #[error("inconsistent data: {0}")] InconsistentData(&'static str), - #[error(transparent)] - Interface(#[from] RethError), - #[error(transparent)] Database(#[from] DatabaseError), @@ -28,7 +26,6 @@ impl From for RethError { PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { RethError::other(err) } - PrunerError::Interface(err) => err, PrunerError::Database(err) => RethError::Database(err), PrunerError::Provider(err) => RethError::Provider(err), } diff --git a/crates/prune/src/segments/mod.rs b/crates/prune/src/segments/mod.rs index 82b95bc07d08a..d0d66e817600d 100644 --- a/crates/prune/src/segments/mod.rs +++ b/crates/prune/src/segments/mod.rs @@ -9,27 +9,25 @@ mod storage_history; mod transaction_lookup; mod transactions; +use crate::PrunerError; pub use account_history::AccountHistory; pub use headers::Headers; pub use receipts::Receipts; pub use receipts_by_logs::ReceiptsByLogs; -pub use sender_recovery::SenderRecovery; -pub use set::SegmentSet; -use std::fmt::Debug; -pub use storage_history::StorageHistory; -pub use transaction_lookup::TransactionLookup; -pub use transactions::Transactions; - -use crate::PrunerError; use reth_db::database::Database; -use reth_interfaces::{provider::ProviderResult, RethResult}; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{ BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, TxNumber, }; use reth_provider::{BlockReader, DatabaseProviderRW, PruneCheckpointWriter}; -use std::ops::RangeInclusive; +pub use sender_recovery::SenderRecovery; +pub use set::SegmentSet; +use std::{fmt::Debug, ops::RangeInclusive}; +pub use storage_history::StorageHistory; use tracing::error; +pub use transaction_lookup::TransactionLookup; +pub use transactions::Transactions; /// A segment represents a pruning of some portion of the data. /// @@ -85,7 +83,7 @@ impl PruneInput { pub(crate) fn get_next_tx_num_range( &self, provider: &DatabaseProviderRW, - ) -> RethResult>> { + ) -> ProviderResult>> { let from_tx_number = self.previous_checkpoint // Checkpoint exists, prune from the next transaction after the highest pruned one .and_then(|checkpoint| match checkpoint.tx_number { From 02322217170086810b4d03a0a5bd2711d8473626 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 19:50:58 +0200 Subject: [PATCH 667/700] chore(deps): rm reth-interfaces from rpc (#8444) --- Cargo.lock | 3 ++- crates/rpc/rpc/Cargo.toml | 5 +++-- crates/rpc/rpc/src/eth/api/mod.rs | 3 +-- crates/rpc/rpc/src/eth/api/server.rs | 2 +- crates/rpc/rpc/src/eth/cache/mod.rs | 2 +- crates/rpc/rpc/src/eth/error.rs | 8 ++++---- crates/rpc/rpc/src/result.rs | 6 +++--- crates/rpc/rpc/src/reth.rs | 2 +- 8 files changed, 16 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bd3da5f3fad82..74d80396a5b04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7776,10 +7776,10 @@ dependencies = [ "pin-project", "rand 0.8.5", "reth-consensus-common", + "reth-errors", "reth-evm", "reth-evm-ethereum", "reth-evm-optimism", - "reth-interfaces", "reth-metrics", "reth-network-api", "reth-network-types", @@ -7791,6 +7791,7 @@ dependencies = [ "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", + "reth-testing-utils", "reth-transaction-pool", "revm", "revm-inspectors", diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 224866be6e232..53225f2b57d63 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -13,10 +13,10 @@ workspace = true [dependencies] # reth -reth-interfaces.workspace = true reth-primitives.workspace = true reth-rpc-api.workspace = true reth-rpc-types.workspace = true +reth-errors.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true @@ -81,10 +81,11 @@ dyn-clone.workspace = true [dev-dependencies] reth-evm-ethereum.workspace = true +reth-testing-utils.workspace = true + jsonrpsee = { workspace = true, features = ["client"] } assert_matches.workspace = true tempfile.workspace = true -reth-interfaces = { workspace = true, features = ["test-utils"] } [features] optimism = [ diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index d7ec6a7db4573..484cf73c91a87 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -11,10 +11,9 @@ use crate::eth::{ gas_oracle::GasPriceOracle, signer::EthSigner, }; - use async_trait::async_trait; +use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvm; -use reth_interfaces::{RethError, RethResult}; use reth_network_api::NetworkInfo; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index a1796a71dd49e..7fd358c1328cb 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -435,7 +435,6 @@ mod tests { }; use jsonrpsee::types::error::INVALID_PARAMS_CODE; use reth_evm_ethereum::EthEvmConfig; - use reth_interfaces::test_utils::{generators, generators::Rng}; use reth_network_api::noop::NoopNetwork; use reth_primitives::{ constants::ETHEREUM_BLOCK_GAS_LIMIT, BaseFeeParams, Block, BlockNumberOrTag, Header, @@ -448,6 +447,7 @@ mod tests { use reth_rpc_api::EthApiServer; use reth_rpc_types::FeeHistory; use reth_tasks::pool::BlockingTaskPool; + use reth_testing_utils::{generators, generators::Rng}; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; fn build_test_eth_api< diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index 45f9f8c7e40bc..a3e4011073183 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -1,8 +1,8 @@ //! Async caching support for eth RPC use futures::{future::Either, Stream, StreamExt}; +use reth_errors::{ProviderError, ProviderResult}; use reth_evm::ConfigureEvm; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ Block, BlockHashOrNumber, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, TransactionSigned, TransactionSignedEcRecovered, B256, diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 90ed87facc0c1..26a4efa48f657 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -3,7 +3,7 @@ use crate::result::{internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code}; use alloy_sol_types::decode_revert_reason; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}; -use reth_interfaces::RethError; +use reth_errors::RethError; use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes}; use reth_rpc_types::{ error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, @@ -193,9 +193,9 @@ impl From for EthApiError { } } -impl From for EthApiError { - fn from(error: reth_interfaces::provider::ProviderError) -> Self { - use reth_interfaces::provider::ProviderError; +impl From for EthApiError { + fn from(error: reth_errors::ProviderError) -> Self { + use reth_errors::ProviderError; match error { ProviderError::HeaderNotFound(_) | ProviderError::BlockHashNotFound(_) | diff --git a/crates/rpc/rpc/src/result.rs b/crates/rpc/rpc/src/result.rs index 677b537adea65..406d59ce13bad 100644 --- a/crates/rpc/rpc/src/result.rs +++ b/crates/rpc/rpc/src/result.rs @@ -99,8 +99,8 @@ macro_rules! impl_to_rpc_result { } impl_to_rpc_result!(PayloadError); -impl_to_rpc_result!(reth_interfaces::RethError); -impl_to_rpc_result!(reth_interfaces::provider::ProviderError); +impl_to_rpc_result!(reth_errors::RethError); +impl_to_rpc_result!(reth_errors::ProviderError); impl_to_rpc_result!(reth_network_api::NetworkError); /// Constructs an invalid params JSON-RPC error. @@ -152,7 +152,7 @@ pub(crate) fn rpc_err( #[cfg(test)] mod tests { use super::*; - use reth_interfaces::{RethError, RethResult}; + use reth_errors::{RethError, RethResult}; fn assert_rpc_result>() {} diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index f1ff402aa75a6..43890bea06ae8 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -1,7 +1,7 @@ use crate::eth::error::{EthApiError, EthResult}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use reth_interfaces::RethResult; +use reth_errors::RethResult; use reth_primitives::{Address, BlockId, U256}; use reth_provider::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; use reth_rpc_api::RethApiServer; From 5dd1d88a47d4c6778c4a73561988a91f5f84cd1c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 19:51:16 +0200 Subject: [PATCH 668/700] chore: rm reth-interfaces from trie parallel (#8442) --- Cargo.lock | 2 +- crates/trie-parallel/Cargo.toml | 2 +- crates/trie-parallel/src/async_root.rs | 2 +- crates/trie-parallel/src/parallel_root.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 74d80396a5b04..2be2f5be15f39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8200,7 +8200,7 @@ dependencies = [ "rand 0.8.5", "rayon", "reth-db", - "reth-interfaces", + "reth-execution-errors", "reth-metrics", "reth-primitives", "reth-provider", diff --git a/crates/trie-parallel/Cargo.toml b/crates/trie-parallel/Cargo.toml index 649e089739efe..9c005c415e452 100644 --- a/crates/trie-parallel/Cargo.toml +++ b/crates/trie-parallel/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-primitives.workspace = true reth-db.workspace = true reth-trie.workspace = true -reth-interfaces.workspace = true +reth-execution-errors.workspace = true reth-provider.workspace = true # alloy diff --git a/crates/trie-parallel/src/async_root.rs b/crates/trie-parallel/src/async_root.rs index 9665c09295cd1..dec284bee383d 100644 --- a/crates/trie-parallel/src/async_root.rs +++ b/crates/trie-parallel/src/async_root.rs @@ -2,7 +2,7 @@ use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; use reth_db::database::Database; -use reth_interfaces::trie::StorageRootError; +use reth_execution_errors::StorageRootError; use reth_primitives::{ trie::{HashBuilder, Nibbles, TrieAccount}, B256, diff --git a/crates/trie-parallel/src/parallel_root.rs b/crates/trie-parallel/src/parallel_root.rs index 58957765201ff..04417360567ef 100644 --- a/crates/trie-parallel/src/parallel_root.rs +++ b/crates/trie-parallel/src/parallel_root.rs @@ -2,7 +2,7 @@ use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets use alloy_rlp::{BufMut, Encodable}; use rayon::prelude::*; use reth_db::database::Database; -use reth_interfaces::trie::StorageRootError; +use reth_execution_errors::StorageRootError; use reth_primitives::{ trie::{HashBuilder, Nibbles, TrieAccount}, B256, From 9f38a67fa30aa39b4dd9dc1cfd1231bb2a55a03f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 19:54:54 +0200 Subject: [PATCH 669/700] chore(deps): txpool deps cleanup (#8443) --- Cargo.lock | 5 +---- crates/transaction-pool/Cargo.toml | 7 +++---- crates/transaction-pool/src/lib.rs | 3 ++- crates/transaction-pool/src/noop.rs | 2 +- crates/transaction-pool/src/pool/mod.rs | 2 +- crates/transaction-pool/src/test_utils/mock.rs | 2 +- crates/transaction-pool/src/traits.rs | 6 ++++-- 7 files changed, 13 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2be2f5be15f39..fa5c92937d68e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8138,16 +8138,13 @@ dependencies = [ "pprof", "proptest", "rand 0.8.5", - "reth-eth-wire", + "reth-eth-wire-types", "reth-fs-util", "reth-metrics", - "reth-network-types", "reth-primitives", "reth-provider", - "reth-revm", "reth-tasks", "reth-tracing", - "revm", "rustc-hash", "schnellru", "serde", diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 82020e17257a1..e742c569b76a4 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -13,15 +13,14 @@ workspace = true [dependencies] # reth -reth-eth-wire.workspace = true +reth-eth-wire-types.workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true reth-provider.workspace = true reth-tasks.workspace = true -revm.workspace = true + +# ethereum alloy-rlp.workspace = true -reth-revm = { workspace = true, optional = true } -reth-network-types.workspace = true # async/futures futures-util.workspace = true diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 45ab93444e7a7..0ddc8ec016fc6 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -147,11 +147,12 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![warn(clippy::missing_const_for_fn)] use crate::{identifier::TransactionId, pool::PoolInner}; use aquamarine as _; -use reth_eth_wire::HandleMempoolData; +use reth_eth_wire_types::HandleMempoolData; use reth_primitives::{Address, BlobTransactionSidecar, PooledTransactionsElement, TxHash, U256}; use reth_provider::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index b550a2bc160c6..8c3a52f4c348c 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -16,7 +16,7 @@ use crate::{ PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; -use reth_eth_wire::HandleMempoolData; +use reth_eth_wire_types::HandleMempoolData; use reth_primitives::{Address, BlobTransactionSidecar, TxHash, U256}; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 163f30ea6b498..fe84e6e90b0b0 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -82,7 +82,7 @@ use crate::{ }; use best::BestTransactions; use parking_lot::{Mutex, RwLock, RwLockReadGuard}; -use reth_eth_wire::HandleMempoolData; +use reth_eth_wire_types::HandleMempoolData; use reth_primitives::{ Address, BlobTransaction, BlobTransactionSidecar, IntoRecoveredTransaction, PooledTransactionsElement, TransactionSigned, TxHash, B256, diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index dded89a0ed6f5..15755b4e602f4 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -760,7 +760,7 @@ impl EthPoolTransaction for MockTransaction { fn validate_blob( &self, _blob: &BlobTransactionSidecar, - _settings: &revm::primitives::KzgSettings, + _settings: &reth_primitives::kzg::KzgSettings, ) -> Result<(), reth_primitives::BlobTransactionValidationError> { match &self { Self::Eip4844 { .. } => Ok(()), diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index fba43e899c20a..dd465a77b9fff 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -8,8 +8,7 @@ use crate::{ AllTransactionsEvents, }; use futures_util::{ready, Stream}; -use reth_eth_wire::HandleMempoolData; -use reth_network_types::PeerId; +use reth_eth_wire_types::HandleMempoolData; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, @@ -29,6 +28,9 @@ use std::{ }; use tokio::sync::mpsc::Receiver; +/// The PeerId type. +pub type PeerId = reth_primitives::B512; + /// General purpose abstraction of a transaction-pool. /// /// This is intended to be used by API-consumers such as RPC that need inject new incoming, From c67ed1d5a9cd469c273e81da8ebeabb469fef44a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 19:55:39 +0200 Subject: [PATCH 670/700] chore(deps): rm reth-interfaces from examples (#8445) --- Cargo.lock | 3 ++- examples/exex/rollup/Cargo.toml | 4 ++-- examples/exex/rollup/src/execution.rs | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fa5c92937d68e..002e5e397067d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2985,14 +2985,15 @@ dependencies = [ "once_cell", "reth", "reth-cli-runner", + "reth-execution-errors", "reth-exex", - "reth-interfaces", "reth-node-api", "reth-node-core", "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-revm", + "reth-testing-utils", "reth-tracing", "reth-trie", "rusqlite", diff --git a/examples/exex/rollup/Cargo.toml b/examples/exex/rollup/Cargo.toml index f32a7762926d8..bea03566618e8 100644 --- a/examples/exex/rollup/Cargo.toml +++ b/examples/exex/rollup/Cargo.toml @@ -10,11 +10,11 @@ license.workspace = true reth.workspace = true reth-cli-runner.workspace = true reth-exex.workspace = true -reth-interfaces.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-node-ethereum.workspace = true reth-primitives.workspace = true +reth-execution-errors.workspace = true reth-provider.workspace = true reth-revm.workspace = true reth-tracing.workspace = true @@ -35,6 +35,6 @@ rusqlite = { version = "0.31.0", features = ["bundled"] } serde_json.workspace = true [dev-dependencies] -reth-interfaces = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true secp256k1.workspace = true diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs index 1403833d30ec3..e1fcfb5c485b4 100644 --- a/examples/exex/rollup/src/execution.rs +++ b/examples/exex/rollup/src/execution.rs @@ -3,7 +3,7 @@ use alloy_consensus::{Blob, SidecarCoder, SimpleCoder}; use alloy_rlp::Decodable as _; use eyre::OptionExt; use reth::transaction_pool::TransactionPool; -use reth_interfaces::executor::BlockValidationError; +use reth_execution_errors::BlockValidationError; use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ @@ -273,7 +273,6 @@ mod tests { test_utils::{testing_pool, MockTransaction}, TransactionOrigin, TransactionPool, }; - use reth_interfaces::test_utils::generators::{self, sign_tx_with_key_pair}; use reth_primitives::{ bytes, constants::ETH_TO_WEI, @@ -282,6 +281,7 @@ mod tests { BlockNumber, Receipt, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, }; use reth_revm::Evm; + use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; use rusqlite::Connection; use secp256k1::{Keypair, Secp256k1}; From 1f2b68b8eaa73b5946715ce149807ecac97da67c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 May 2024 19:56:03 +0200 Subject: [PATCH 671/700] chore: rm reth-interfaces from static file (#8441) --- Cargo.lock | 2 +- crates/static-file/Cargo.toml | 2 +- crates/static-file/src/static_file_producer.rs | 13 +++++-------- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 002e5e397067d..727d632b87e80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8025,12 +8025,12 @@ dependencies = [ "parking_lot 0.12.3", "rayon", "reth-db", - "reth-interfaces", "reth-nippy-jar", "reth-primitives", "reth-provider", "reth-stages", "reth-storage-errors", + "reth-testing-utils", "reth-tokio-util", "tempfile", "tokio", diff --git a/crates/static-file/Cargo.toml b/crates/static-file/Cargo.toml index f73300841609e..290ffa4eb220f 100644 --- a/crates/static-file/Cargo.toml +++ b/crates/static-file/Cargo.toml @@ -30,9 +30,9 @@ rayon.workspace = true parking_lot = { workspace = true, features = ["send_guard", "arc_lock"] } [dev-dependencies] -reth-interfaces.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true assert_matches.workspace = true tempfile.workspace = true diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index ef66a12ed1784..1f9cfd12914b4 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -246,21 +246,18 @@ mod tests { }; use assert_matches::assert_matches; use reth_db::{database::Database, test_utils::TempDatabase, transaction::DbTx, DatabaseEnv}; - use reth_interfaces::{ - provider::ProviderError, - test_utils::{ - generators, - generators::{random_block_range, random_receipt}, - }, - }; use reth_primitives::{ static_file::HighestStaticFiles, PruneModes, StaticFileSegment, B256, U256, }; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - ProviderFactory, StaticFileProviderFactory, + ProviderError, ProviderFactory, StaticFileProviderFactory, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::{ + generators, + generators::{random_block_range, random_receipt}, + }; use std::{ sync::{mpsc::channel, Arc}, time::Duration, From c6b18ff90bde4843922decaa1d50ed0f0ffbfdf8 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 28 May 2024 21:30:26 +0200 Subject: [PATCH 672/700] chore(tree): remove unused `BlockchainTreeViewer` methods (#8426) --- crates/blockchain-tree-api/src/lib.rs | 22 +----------------- crates/blockchain-tree/src/block_indices.rs | 8 ------- crates/blockchain-tree/src/noop.rs | 18 +-------------- crates/blockchain-tree/src/shareable.rs | 24 +------------------- crates/storage/provider/src/providers/mod.rs | 18 +-------------- 5 files changed, 4 insertions(+), 86 deletions(-) diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs index e17b790a42f10..19fa256ebc73d 100644 --- a/crates/blockchain-tree-api/src/lib.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -14,7 +14,7 @@ use reth_primitives::{ SealedHeader, }; use reth_storage_errors::provider::ProviderError; -use std::collections::{BTreeMap, HashSet}; +use std::collections::BTreeMap; pub mod error; @@ -266,11 +266,6 @@ pub enum InsertPayloadOk { /// * Pending blocks that extend the canonical chain but are not yet included. /// * Future pending blocks that extend the pending blocks. pub trait BlockchainTreeViewer: Send + Sync { - /// Returns both pending and side-chain block numbers and their hashes. - /// - /// Caution: This will not return blocks from the canonical chain. - fn blocks(&self) -> BTreeMap>; - /// Returns the header with matching hash from the tree, if it exists. /// /// Caution: This will not return headers from the canonical chain. @@ -288,13 +283,6 @@ pub trait BlockchainTreeViewer: Send + Sync { /// disconnected from the canonical chain. fn block_with_senders_by_hash(&self, hash: BlockHash) -> Option; - /// Returns the _buffered_ (disconnected) block with matching hash from the internal buffer if - /// it exists. - /// - /// Caution: Unlike [Self::block_by_hash] this will only return blocks that are currently - /// disconnected from the canonical chain. - fn buffered_block_by_hash(&self, block_hash: BlockHash) -> Option; - /// Returns the _buffered_ (disconnected) header with matching hash from the internal buffer if /// it exists. /// @@ -307,9 +295,6 @@ pub trait BlockchainTreeViewer: Send + Sync { self.block_by_hash(hash).is_some() } - /// Canonical block number and hashes best known by the tree. - fn canonical_blocks(&self) -> BTreeMap; - /// Return whether or not the block is known and in the canonical chain. fn is_canonical(&self, hash: BlockHash) -> Result; @@ -322,11 +307,6 @@ pub trait BlockchainTreeViewer: Send + Sync { /// Return BlockchainTree best known canonical chain tip (BlockHash, BlockNumber) fn canonical_tip(&self) -> BlockNumHash; - /// Return block hashes that extends the canonical chain tip by one. - /// This is used to fetch what is considered the pending blocks, blocks that - /// has best chance to become canonical. - fn pending_blocks(&self) -> (BlockNumber, Vec); - /// Return block number and hash that extends the canonical chain tip by one. /// /// If there is no such block, this returns `None`. diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 875b6fbe1010e..7df9736824b69 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -57,14 +57,6 @@ impl BlockIndices { } } - /// Return internal index that maps all pending block number to their hashes. - /// - /// This essentially contains all possible branches. Given a parent block, then the child block - /// number as the key has all possible block hashes as the value. - pub fn block_number_to_block_hashes(&self) -> &BTreeMap> { - &self.block_number_to_block_hashes - } - /// Return fork to child indices pub fn fork_to_child(&self) -> &HashMap> { &self.fork_to_child diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index f4d27272f4411..c6dedfa643f76 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -12,7 +12,7 @@ use reth_provider::{ BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, FullBundleStateDataProvider, }; -use std::collections::{BTreeMap, HashSet}; +use std::collections::BTreeMap; /// A BlockchainTree that does nothing. /// @@ -74,10 +74,6 @@ impl BlockchainTreeEngine for NoopBlockchainTree { } impl BlockchainTreeViewer for NoopBlockchainTree { - fn blocks(&self) -> BTreeMap> { - Default::default() - } - fn header_by_hash(&self, _hash: BlockHash) -> Option { None } @@ -90,18 +86,10 @@ impl BlockchainTreeViewer for NoopBlockchainTree { None } - fn buffered_block_by_hash(&self, _block_hash: BlockHash) -> Option { - None - } - fn buffered_header_by_hash(&self, _block_hash: BlockHash) -> Option { None } - fn canonical_blocks(&self) -> BTreeMap { - Default::default() - } - fn is_canonical(&self, _block_hash: BlockHash) -> Result { Ok(false) } @@ -114,10 +102,6 @@ impl BlockchainTreeViewer for NoopBlockchainTree { Default::default() } - fn pending_blocks(&self) -> (BlockNumber, Vec) { - (0, vec![]) - } - fn pending_block_num_hash(&self) -> Option { None } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 0c9d803d5662a..fc323fd267964 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -17,10 +17,7 @@ use reth_provider::{ BlockchainTreePendingStateProvider, CanonStateSubscriptions, FullBundleStateDataProvider, ProviderError, }; -use std::{ - collections::{BTreeMap, HashSet}, - sync::Arc, -}; +use std::{collections::BTreeMap, sync::Arc}; use tracing::trace; /// Shareable blockchain tree that is behind a RwLock @@ -111,11 +108,6 @@ where DB: Database + Clone, E: BlockExecutorProvider, { - fn blocks(&self) -> BTreeMap> { - trace!(target: "blockchain_tree", "Returning all blocks in blockchain tree"); - self.tree.read().block_indices().block_number_to_block_hashes().clone() - } - fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); self.tree.read().sidechain_block_by_hash(hash).map(|b| b.header.clone()) @@ -131,19 +123,10 @@ where self.tree.read().block_with_senders_by_hash(block_hash).cloned() } - fn buffered_block_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.read().get_buffered_block(&block_hash).map(|b| b.block.clone()) - } - fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { self.tree.read().get_buffered_block(&block_hash).map(|b| b.header.clone()) } - fn canonical_blocks(&self) -> BTreeMap { - trace!(target: "blockchain_tree", "Returning canonical blocks in tree"); - self.tree.read().block_indices().canonical_chain().inner().clone() - } - fn is_canonical(&self, hash: BlockHash) -> Result { trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical"); self.tree.read().is_block_hash_canonical(&hash) @@ -159,11 +142,6 @@ where self.tree.read().block_indices().canonical_tip() } - fn pending_blocks(&self) -> (BlockNumber, Vec) { - trace!(target: "blockchain_tree", "Returning all pending blocks"); - self.tree.read().block_indices().pending_blocks() - } - fn pending_block_num_hash(&self) -> Option { trace!(target: "blockchain_tree", "Returning first pending block"); self.tree.read().block_indices().pending_block_num_hash() diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index a8db59b0620f9..eda49830dd78c 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -28,7 +28,7 @@ use reth_primitives::{ use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ - collections::{BTreeMap, HashSet}, + collections::BTreeMap, ops::{RangeBounds, RangeInclusive}, sync::Arc, time::Instant, @@ -705,10 +705,6 @@ impl BlockchainTreeViewer for BlockchainProvider where DB: Send + Sync, { - fn blocks(&self) -> BTreeMap> { - self.tree.blocks() - } - fn header_by_hash(&self, hash: BlockHash) -> Option { self.tree.header_by_hash(hash) } @@ -721,18 +717,10 @@ where self.tree.block_with_senders_by_hash(block_hash) } - fn buffered_block_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.buffered_block_by_hash(block_hash) - } - fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { self.tree.buffered_header_by_hash(block_hash) } - fn canonical_blocks(&self) -> BTreeMap { - self.tree.canonical_blocks() - } - fn is_canonical(&self, hash: BlockHash) -> Result { self.tree.is_canonical(hash) } @@ -745,10 +733,6 @@ where self.tree.canonical_tip() } - fn pending_blocks(&self) -> (BlockNumber, Vec) { - self.tree.pending_blocks() - } - fn pending_block_num_hash(&self) -> Option { self.tree.pending_block_num_hash() } From 76b32c8b5f3b9937ca507c75b4ba53cb0c83915e Mon Sep 17 00:00:00 2001 From: rakita Date: Wed, 29 May 2024 00:08:06 +0200 Subject: [PATCH 673/700] chore: bump revm/alloy (#8447) Co-authored-by: Matthias Seitz --- Cargo.lock | 155 ++++++++++-------- Cargo.toml | 35 ++-- crates/optimism/evm/src/l1.rs | 8 +- crates/primitives/src/revm/compat.rs | 4 +- crates/primitives/src/revm/env.rs | 3 - crates/rpc/rpc-testing-util/src/debug.rs | 4 +- crates/rpc/rpc/src/debug.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 16 +- crates/rpc/rpc/src/eth/error.rs | 3 - crates/rpc/rpc/src/eth/revm_utils.rs | 13 +- .../bundle_state_with_receipts.rs | 39 ++--- deny.toml | 1 + 12 files changed, 155 insertions(+), 128 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 727d632b87e80..d3d4dda04c13a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "arbitrary", "c-kzg", "proptest", @@ -180,11 +180,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "arbitrary", "c-kzg", "derive_more", @@ -212,10 +212,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "serde", "serde_json", ] @@ -246,7 +246,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-primitives", "serde", @@ -258,16 +258,17 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-signer", "alloy-sol-types", "async-trait", + "auto_impl", "futures-utils-wasm", "thiserror", ] @@ -275,9 +276,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-primitives", "k256", "serde_json", @@ -317,14 +318,16 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-chains", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -337,6 +340,7 @@ dependencies = [ "lru", "pin-project", "reqwest 0.12.4", + "serde", "serde_json", "tokio", "tracing", @@ -368,7 +372,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -388,14 +392,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -428,19 +432,19 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "serde", ] [[package]] name = "alloy-rpc-types-beacon" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-primitives", "alloy-rpc-types-engine", "serde", @@ -451,14 +455,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "jsonrpsee-types", "jsonwebtoken 9.3.0", "rand 0.8.5", @@ -469,11 +473,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "serde", "serde_json", ] @@ -481,7 +485,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-primitives", "serde", @@ -501,7 +505,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-primitives", "async-trait", @@ -514,9 +518,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -603,7 +607,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -621,7 +625,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=64feb9b#64feb9bc51c8021ea08535694c44de84222f474e" +source = "git+https://github.com/alloy-rs/alloy?rev=bd39117#bd391179fb4ebf64fe13484e44f2e432baf9b6c2" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -2976,7 +2980,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -5462,6 +5466,18 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2 0.10.8", +] + [[package]] name = "page_size" version = "0.6.0" @@ -5871,6 +5887,15 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + [[package]] name = "primitive-types" version = "0.12.2" @@ -6592,9 +6617,9 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.7" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-primitives", "arbitrary", "bytes", @@ -6820,9 +6845,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7043,7 +7068,7 @@ dependencies = [ name = "reth-evm-ethereum" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "reth-ethereum-consensus", "reth-evm", "reth-primitives", @@ -7636,12 +7661,12 @@ name = "reth-primitives" version = "0.2.0-beta.7" dependencies = [ "alloy-chains", - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-trie", "arbitrary", "assert_matches", @@ -7743,7 +7768,7 @@ dependencies = [ name = "reth-revm" version = "0.2.0-beta.7" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "reth-consensus-common", "reth-execution-errors", "reth-primitives", @@ -7928,7 +7953,7 @@ name = "reth-rpc-types" version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", "alloy-rpc-types-engine", @@ -7949,7 +7974,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -8092,7 +8117,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.7" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "rand 0.8.5", "reth-primitives", "secp256k1 0.28.2", @@ -8212,8 +8237,7 @@ dependencies = [ [[package]] name = "revm" version = "9.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a2c336f9921588e50871c00024feb51a521eca50ce6d01494bb9c50f837c8ed" +source = "git+https://github.com/bluealloy/revm?rev=a28a543#a28a5439b9cfb7494cbd670da10cbedcfe6c5854" dependencies = [ "auto_impl", "cfg-if", @@ -8227,10 +8251,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=5a4fd5e#5a4fd5e394d8bdf1337ac076d0b5fde4f2dd617c" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=ed5450e#ed5450e7169ce0237e791fed661688c55997a0ac" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bd39117)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", @@ -8245,8 +8269,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a58182c7454179826f9dad2ca577661963092ce9d0fd0c9d682c1e9215a72e70" +source = "git+https://github.com/bluealloy/revm?rev=a28a543#a28a5439b9cfb7494cbd670da10cbedcfe6c5854" dependencies = [ "revm-primitives", "serde", @@ -8255,13 +8278,14 @@ dependencies = [ [[package]] name = "revm-precompile" version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc8af9aa737eef0509a50d9f3cc1a631557a00ef2e70a3aa8a75d9ee0ed275bb" +source = "git+https://github.com/bluealloy/revm?rev=a28a543#a28a5439b9cfb7494cbd670da10cbedcfe6c5854" dependencies = [ "aurora-engine-modexp", + "blst", "c-kzg", "k256", "once_cell", + "p256", "revm-primitives", "ripemd", "secp256k1 0.29.0", @@ -8272,8 +8296,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bf5d465e64b697da6a111cb19e798b5b2ebb18e5faf2ad48e9e8d47c64add2" +source = "git+https://github.com/bluealloy/revm?rev=a28a543#a28a5439b9cfb7494cbd670da10cbedcfe6c5854" dependencies = [ "alloy-primitives", "auto_impl", diff --git a/Cargo.toml b/Cargo.toml index 946682343d177..a94a65c658660 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -307,11 +307,12 @@ reth-trie-parallel = { path = "crates/trie-parallel" } revm = { version = "9.0.0", features = [ "std", "secp256k1", + "blst", ], default-features = false } revm-primitives = { version = "4.0.0", features = [ "std", ], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "5a4fd5e" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "ed5450e" } # eth alloy-chains = "0.1.15" @@ -320,21 +321,21 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.4" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "64feb9b" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "bd39117" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "bd39117" } # misc auto_impl = "1" @@ -434,3 +435,9 @@ proptest-derive = "0.4" serial_test = "3" similar-asserts = "1.5.0" test-fuzz = "5" + +[patch.crates-io] +revm = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } +revm-interpreter = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } +revm-precompile = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } +revm-primitives = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } \ No newline at end of file diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index bd0313087b089..62b87678b4eb4 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -190,7 +190,9 @@ impl RethL1BlockInfo for L1BlockInfo { return Ok(U256::ZERO) } - let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, timestamp) { + let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, timestamp) { + SpecId::FJORD + } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, timestamp) { SpecId::ECOTONE } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, timestamp) { SpecId::REGOLITH @@ -211,7 +213,9 @@ impl RethL1BlockInfo for L1BlockInfo { timestamp: u64, input: &[u8], ) -> Result { - let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, timestamp) { + let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, timestamp) { + SpecId::FJORD + } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, timestamp) { SpecId::REGOLITH } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { SpecId::BEDROCK diff --git a/crates/primitives/src/revm/compat.rs b/crates/primitives/src/revm/compat.rs index a153e86e9e734..705fc188065ca 100644 --- a/crates/primitives/src/revm/compat.rs +++ b/crates/primitives/src/revm/compat.rs @@ -36,7 +36,5 @@ pub fn calculate_intrinsic_gas_after_merge( is_shanghai: bool, ) -> u64 { let spec_id = if is_shanghai { SpecId::SHANGHAI } else { SpecId::MERGE }; - // TODO(EOF) - let initcodes = &[]; - validate_initial_tx_gas(spec_id, input, kind.is_create(), access_list, initcodes) + validate_initial_tx_gas(spec_id, input, kind.is_create(), access_list) } diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index 49b9f609c2b20..1691e09e7d7ad 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -171,9 +171,6 @@ pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_b // enveloped tx size. enveloped_tx: Some(Bytes::default()), }, - // TODO(EOF) - eof_initcodes: vec![], - eof_initcodes_hashed: Default::default(), }; // ensure the block gas limit is >= the tx diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index bdb549c30370f..f409c3ce3d971 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -413,8 +413,8 @@ mod tests { let url = parse_env_url("RETH_RPC_TEST_NODE_URL").unwrap(); let client = HttpClientBuilder::default().build(url).unwrap(); - let opts = - GethDebugTracingOptions::default().call_config(CallConfig::default().only_top_call()); + let opts = GethDebugTracingOptions::default() + .with_call_config(CallConfig::default().only_top_call()); let mut stream = client.debug_trace_transactions_in_block(block, opts).await.unwrap(); while let Some(res) = stream.next().await { diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index ebc52877d848f..f5c9ed2fe4d4e 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -519,7 +519,7 @@ where env: EnvWithHandlerCfg, db: &mut CacheDB>, transaction_context: Option, - ) -> EthResult<(GethTrace, revm_primitives::State)> { + ) -> EthResult<(GethTrace, revm_primitives::EvmState)> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; if let Some(tracer) = tracer { diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 538aa606b8168..6e01ce3a2c57b 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -39,8 +39,8 @@ use reth_transaction_pool::{TransactionOrigin, TransactionPool}; use revm::{ db::CacheDB, primitives::{ - db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, - ResultAndState, SpecId, State, + db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, + ExecutionResult, ResultAndState, SpecId, }, GetInspector, Inspector, }; @@ -425,7 +425,7 @@ pub trait EthTransactions: Send + Sync { TransactionInfo, TracingInspector, ExecutionResult, - &'a State, + &'a EvmState, &'a StateCacheDB, ) -> EthResult + Send @@ -443,7 +443,7 @@ pub trait EthTransactions: Send + Sync { /// 2. configures the EVM evn /// 3. loops over all transactions and executes them /// 4. calls the callback with the transaction info, the execution result, the changed state - /// _after_ the transaction [State] and the database that points to the state + /// _after_ the transaction [EvmState] and the database that points to the state /// right _before_ the transaction, in other words the state the transaction was /// executed on: `changed_state = tx(cached_state)` /// @@ -462,7 +462,7 @@ pub trait EthTransactions: Send + Sync { TransactionInfo, Insp, ExecutionResult, - &'a State, + &'a EvmState, &'a StateCacheDB, ) -> EthResult + Send @@ -491,7 +491,7 @@ pub trait EthTransactions: Send + Sync { TransactionInfo, TracingInspector, ExecutionResult, - &'a State, + &'a EvmState, &'a StateCacheDB, ) -> EthResult + Send @@ -529,7 +529,7 @@ pub trait EthTransactions: Send + Sync { TransactionInfo, Insp, ExecutionResult, - &'a State, + &'a EvmState, &'a StateCacheDB, ) -> EthResult + Send @@ -1278,7 +1278,7 @@ where TransactionInfo, Insp, ExecutionResult, - &'a State, + &'a EvmState, &'a StateCacheDB, ) -> EthResult + Send diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 26a4efa48f657..ff12130b19f82 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -487,9 +487,6 @@ impl From for RpcInvalidTransactionError { OptimismInvalidTransactionError::HaltedDepositPostRegolith, ), // TODO(EOF) - InvalidTransaction::EofInitcodesNotSupported => todo!("EOF"), - InvalidTransaction::EofInitcodesNumberLimit => todo!("EOF"), - InvalidTransaction::EofInitcodesSizeLimit => todo!("EOF"), InvalidTransaction::EofCrateShouldHaveToAddress => todo!("EOF"), } } diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index e0cb8e14d9cf4..fcd132bfec1af 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -272,9 +272,6 @@ pub(crate) fn create_txn_env( max_fee_per_blob_gas, #[cfg(feature = "optimism")] optimism: OptimismFields { enveloped_tx: Some(Bytes::new()), ..Default::default() }, - // TODO(EOF) - eof_initcodes: Default::default(), - eof_initcodes_hashed: Default::default(), }; Ok(env) @@ -540,13 +537,19 @@ where account, new_account_state .into_iter() - .map(|(slot, value)| (U256::from_be_bytes(slot.0), value)) + .map(|(slot, value)| { + (U256::from_be_bytes(slot.0), U256::from_be_bytes(value.0)) + }) .collect(), )?; } (None, Some(account_state_diff)) => { for (slot, value) in account_state_diff { - db.insert_account_storage(account, U256::from_be_bytes(slot.0), value)?; + db.insert_account_storage( + account, + U256::from_be_bytes(slot.0), + U256::from_be_bytes(value.0), + )?; } } }; diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 37cdc9484fc20..71992152ed4fd 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -87,7 +87,7 @@ mod tests { BundleState, EmptyDB, }, primitives::{ - Account as RevmAccount, AccountInfo as RevmAccountInfo, AccountStatus, StorageSlot, + Account as RevmAccount, AccountInfo as RevmAccountInfo, AccountStatus, EvmStorageSlot, }, DatabaseCommit, State, }; @@ -260,11 +260,11 @@ mod tests { storage: HashMap::from([ ( U256::from(0), - StorageSlot { present_value: U256::from(1), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, ), ( U256::from(1), - StorageSlot { present_value: U256::from(2), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, ), ]), }, @@ -277,9 +277,9 @@ mod tests { // 0x01 => 1 => 2 storage: HashMap::from([( U256::from(1), - StorageSlot { + EvmStorageSlot { present_value: U256::from(2), - previous_or_original_value: U256::from(1), + original_value: U256::from(1), }, )]), }, @@ -440,11 +440,11 @@ mod tests { storage: HashMap::from([ ( U256::ZERO, - StorageSlot { present_value: U256::from(1), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, ), ( U256::from(1), - StorageSlot { present_value: U256::from(2), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, ), ]), }, @@ -470,10 +470,7 @@ mod tests { // 0x00 => 1 => 2 storage: HashMap::from([( U256::ZERO, - StorageSlot { - previous_or_original_value: U256::from(1), - present_value: U256::from(2), - }, + EvmStorageSlot { original_value: U256::from(1), present_value: U256::from(2) }, )]), }, )])); @@ -513,15 +510,15 @@ mod tests { storage: HashMap::from([ ( U256::ZERO, - StorageSlot { present_value: U256::from(2), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, ), ( U256::from(2), - StorageSlot { present_value: U256::from(4), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(4), ..Default::default() }, ), ( U256::from(6), - StorageSlot { present_value: U256::from(6), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(6), ..Default::default() }, ), ]), }, @@ -556,7 +553,7 @@ mod tests { // 0x00 => 0 => 2 storage: HashMap::from([( U256::ZERO, - StorageSlot { present_value: U256::from(2), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, )]), }, )])); @@ -587,7 +584,7 @@ mod tests { // 0x00 => 0 => 9 storage: HashMap::from([( U256::ZERO, - StorageSlot { present_value: U256::from(9), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(9), ..Default::default() }, )]), }, )])); @@ -748,11 +745,11 @@ mod tests { storage: HashMap::from([ ( U256::ZERO, - StorageSlot { present_value: U256::from(1), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, ), ( U256::from(1), - StorageSlot { present_value: U256::from(2), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, ), ]), }, @@ -796,7 +793,7 @@ mod tests { // 0x01 => 0 => 5 storage: HashMap::from([( U256::from(1), - StorageSlot { present_value: U256::from(5), ..Default::default() }, + EvmStorageSlot { present_value: U256::from(5), ..Default::default() }, )]), }, )])); @@ -949,7 +946,7 @@ mod tests { info: into_revm_acc(account2.0), storage: HashMap::from_iter([( slot2, - StorageSlot::new_changed(account2_slot2_old_value, account2_slot2_new_value), + EvmStorageSlot::new_changed(account2_slot2_old_value, account2_slot2_new_value), )]), }, )])); @@ -1017,7 +1014,7 @@ mod tests { info: into_revm_acc(account1_new), storage: HashMap::from_iter([( slot20, - StorageSlot::new_changed(U256::ZERO, account1_slot20_value), + EvmStorageSlot::new_changed(U256::ZERO, account1_slot20_value), )]), }, )])); diff --git a/deny.toml b/deny.toml index 99b2c8d4f97f5..c6532bbf57aa8 100644 --- a/deny.toml +++ b/deny.toml @@ -92,6 +92,7 @@ allow-git = [ # TODO: remove, see ./Cargo.toml "https://github.com/alloy-rs/alloy", "https://github.com/foundry-rs/block-explorers", + "https://github.com/bluealloy/revm", "https://github.com/paradigmxyz/evm-inspectors", "https://github.com/sigp/discv5", ] From 7843ce9949474f028669ab6dfc24c41cfaf97863 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 00:34:22 +0200 Subject: [PATCH 674/700] chore: rm reth-interfaces from payload basic (#8450) --- Cargo.lock | 1 - crates/payload/basic/Cargo.toml | 1 - crates/payload/basic/src/lib.rs | 3 +-- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3d4dda04c13a..6159cfd3d6b0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6509,7 +6509,6 @@ dependencies = [ "futures-util", "metrics", "reth-engine-primitives", - "reth-interfaces", "reth-metrics", "reth-payload-builder", "reth-primitives", diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index dd2b824192ea1..b5c795815cff1 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -19,7 +19,6 @@ reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true reth-tasks.workspace = true -reth-interfaces.workspace = true reth-engine-primitives.workspace = true # ethereum diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 6529710ca4f07..71945b83ab606 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -12,7 +12,6 @@ use crate::metrics::PayloadBuilderMetrics; use futures_core::ready; use futures_util::FutureExt; use reth_engine_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_interfaces::RethResult; use reth_payload_builder::{ database::CachedReads, error::PayloadBuilderError, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, @@ -823,7 +822,7 @@ pub fn commit_withdrawals>( chain_spec: &ChainSpec, timestamp: u64, withdrawals: Withdrawals, -) -> RethResult { +) -> Result { if !chain_spec.is_shanghai_active_at_timestamp(timestamp) { return Ok(WithdrawalsOutcome::pre_shanghai()) } From d777d5f2705d416e1e42c883c744483df1de81c1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 00:34:36 +0200 Subject: [PATCH 675/700] chore: rm reth-interfaces from node-core (#8449) --- Cargo.lock | 3 ++- crates/node-core/Cargo.toml | 3 ++- crates/node-core/src/args/database.rs | 5 ++--- crates/node-core/src/node_config.rs | 13 +++++++------ crates/node-core/src/utils.rs | 4 ++-- 5 files changed, 15 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6159cfd3d6b0d..845a1a20765fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7471,10 +7471,10 @@ dependencies = [ "reth-discv5", "reth-engine-primitives", "reth-fs-util", - "reth-interfaces", "reth-metrics", "reth-net-nat", "reth-network", + "reth-network-p2p", "reth-primitives", "reth-provider", "reth-rpc", @@ -7482,6 +7482,7 @@ dependencies = [ "reth-rpc-builder", "reth-rpc-types", "reth-rpc-types-compat", + "reth-storage-errors", "reth-tasks", "reth-tracing", "reth-transaction-pool", diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 787c68c9d1b66..6b8a35c7994ab 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -15,9 +15,10 @@ workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } -reth-interfaces = { workspace = true, features = ["clap"] } +reth-storage-errors = { workspace = true, features = ["clap"] } reth-provider.workspace = true reth-network = { workspace = true, features = ["serde"] } +reth-network-p2p.workspace = true reth-rpc-builder.workspace = true reth-rpc.workspace = true reth-rpc-types.workspace = true diff --git a/crates/node-core/src/args/database.rs b/crates/node-core/src/args/database.rs index 1c14c99db6eb6..77a458cb3492a 100644 --- a/crates/node-core/src/args/database.rs +++ b/crates/node-core/src/args/database.rs @@ -1,9 +1,8 @@ //! clap [Args](clap::Args) for database configuration -use clap::Args; -use reth_interfaces::db::LogLevel; - use crate::version::default_client_version; +use clap::Args; +use reth_storage_errors::db::LogLevel; /// Parameters for database configuration #[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)] diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 5cb251f8afe0a..02b3ee985c12e 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -14,8 +14,8 @@ use metrics_exporter_prometheus::PrometheusHandle; use once_cell::sync::Lazy; use reth_config::{config::PruneConfig, Config}; use reth_db::{database::Database, database_metrics::DatabaseMetrics}; -use reth_interfaces::{p2p::headers::client::HeadersClient, RethResult}; use reth_network::{NetworkBuilder, NetworkConfig, NetworkManager}; +use reth_network_p2p::headers::client::HeadersClient; use reth_primitives::{ constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, kzg::KzgSettings, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, Head, SealedHeader, B256, MAINNET, @@ -24,6 +24,7 @@ use reth_provider::{ providers::StaticFileProvider, BlockHashReader, BlockNumReader, HeaderProvider, ProviderFactory, StageCheckpointReader, }; +use reth_storage_errors::provider::ProviderResult; use reth_tasks::TaskExecutor; use secp256k1::SecretKey; use std::{ @@ -366,7 +367,7 @@ impl NodeConfig { /// Fetches the head block from the database. /// /// If the database is empty, returns the genesis block. - pub fn lookup_head(&self, factory: ProviderFactory) -> RethResult { + pub fn lookup_head(&self, factory: ProviderFactory) -> ProviderResult { let provider = factory.provider()?; let head = provider.get_stage_checkpoint(StageId::Finish)?.unwrap_or_default().block_number; @@ -401,7 +402,7 @@ impl NodeConfig { provider: Provider, client: Client, tip: B256, - ) -> RethResult + ) -> ProviderResult where Provider: HeaderProvider, Client: HeadersClient, @@ -414,7 +415,7 @@ impl NodeConfig { return Ok(header.number) } - Ok(self.fetch_tip_from_network(client, tip.into()).await?.number) + Ok(self.fetch_tip_from_network(client, tip.into()).await.number) } /// Attempt to look up the block with the given number and return the header. @@ -424,7 +425,7 @@ impl NodeConfig { &self, client: Client, tip: BlockHashOrNumber, - ) -> RethResult + ) -> SealedHeader where Client: HeadersClient, { @@ -434,7 +435,7 @@ impl NodeConfig { match get_single_header(&client, tip).await { Ok(tip_header) => { info!(target: "reth::cli", ?tip, "Successfully fetched tip"); - return Ok(tip_header) + return tip_header } Err(error) => { fetch_failures += 1; diff --git a/crates/node-core/src/utils.rs b/crates/node-core/src/utils.rs index 13ed6a2471ef4..eba840b6ec88e 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node-core/src/utils.rs @@ -4,12 +4,12 @@ use eyre::Result; use reth_consensus_common::validation::validate_block_pre_execution; use reth_fs_util as fs; -use reth_interfaces::p2p::{ +use reth_network::NetworkManager; +use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; -use reth_network::NetworkManager; use reth_primitives::{BlockHashOrNumber, ChainSpec, HeadersDirection, SealedBlock, SealedHeader}; use reth_provider::BlockReader; use reth_rpc_types::engine::{JwtError, JwtSecret}; From 2bfe5a9f98586221a8aee40da1208150b8861a4e Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 29 May 2024 11:42:25 +0300 Subject: [PATCH 676/700] perf(mdbx): propagate `-C target-cpu` (#8452) --- crates/storage/libmdbx-rs/mdbx-sys/build.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/crates/storage/libmdbx-rs/mdbx-sys/build.rs b/crates/storage/libmdbx-rs/mdbx-sys/build.rs index c561cd3af02d6..5f82d02b4c54e 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/build.rs +++ b/crates/storage/libmdbx-rs/mdbx-sys/build.rs @@ -83,22 +83,27 @@ fn main() { let mut cc_builder = cc::Build::new(); cc_builder.flag_if_supported("-Wno-unused-parameter").flag_if_supported("-Wuninitialized"); - #[cfg(not(target_os = "linux"))] - cc_builder.flag_if_supported("-Wbad-function-cast"); + if env::var("CARGO_CFG_TARGET_OS").unwrap() != "linux" { + cc_builder.flag_if_supported("-Wbad-function-cast"); + } let flags = format!("{:?}", cc_builder.get_compiler().cflags_env()); cc_builder.define("MDBX_BUILD_FLAGS", flags.as_str()).define("MDBX_TXN_CHECKOWNER", "0"); // Enable debugging on debug builds #[cfg(debug_assertions)] - { - cc_builder.define("MDBX_DEBUG", "1").define("MDBX_ENABLE_PROFGC", "1"); - } + cc_builder.define("MDBX_DEBUG", "1").define("MDBX_ENABLE_PROFGC", "1"); // Disables debug logging on optimized builds #[cfg(not(debug_assertions))] + cc_builder.define("MDBX_DEBUG", "0").define("NDEBUG", None); + + // Propagate `-C target-cpu=native` + let rustflags = env::var("CARGO_ENCODED_RUSTFLAGS").unwrap(); + if rustflags.contains("target-cpu=native") && + env::var("CARGO_CFG_TARGET_ENV").unwrap() != "msvc" { - cc_builder.define("MDBX_DEBUG", "0").define("NDEBUG", None); + cc_builder.flag("-march=native"); } cc_builder.file(mdbx.join("mdbx.c")).compile("libmdbx.a"); From 25e2b24257182e78398dd9b7261f9e99c818e68d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 11:28:25 +0200 Subject: [PATCH 677/700] chore: rm reth-interfaces from provider (#8457) --- Cargo.lock | 4 +++- crates/storage/provider/Cargo.toml | 5 +++-- crates/storage/provider/src/providers/database/mod.rs | 10 +++++----- .../provider/src/providers/database/provider.rs | 2 +- .../storage/provider/src/providers/static_file/mod.rs | 2 +- crates/storage/provider/src/traits/header_sync_gap.rs | 5 ++--- 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 845a1a20765fd..e0477cb292643 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7723,15 +7723,17 @@ dependencies = [ "reth-blockchain-tree-api", "reth-codecs", "reth-db", + "reth-errors", "reth-evm", "reth-execution-types", "reth-fs-util", - "reth-interfaces", "reth-metrics", + "reth-network-p2p", "reth-nippy-jar", "reth-primitives", "reth-storage-api", "reth-storage-errors", + "reth-testing-utils", "reth-trie", "revm", "strum", diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 1bc9ff7ce4aab..c9482ce58f764 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -17,9 +17,10 @@ reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true +reth-errors.workspace = true reth-storage-errors.workspace = true -reth-interfaces.workspace = true reth-storage-api.workspace = true +reth-network-p2p.workspace = true reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true @@ -59,7 +60,7 @@ rayon.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } reth-trie = { workspace = true, features = ["test-utils"] } -reth-interfaces = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true alloy-rlp.workspace = true parking_lot.workspace = true diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 354b5693701b7..f7ab2e574928f 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -12,8 +12,8 @@ use reth_db::{ database::Database, init_db, mdbx::DatabaseArguments, models::StoredBlockBodyIndices, DatabaseEnv, }; +use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::{RethError, RethResult}; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, @@ -599,15 +599,15 @@ mod tests { tables, test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; - use reth_interfaces::test_utils::{ - generators, - generators::{random_block, random_header}, - }; use reth_primitives::{ hex_literal::hex, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, StaticFileSegment, TxNumber, B256, U256, }; use reth_storage_errors::provider::ProviderError; + use reth_testing_utils::{ + generators, + generators::{random_block, random_header}, + }; use std::{ops::RangeInclusive, sync::Arc}; use tokio::sync::watch; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 3a571bbd0dc2b..0a0d318e610c1 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -28,7 +28,7 @@ use reth_db::{ BlockNumberList, DatabaseError, }; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::p2p::headers::downloader::SyncTarget; +use reth_network_p2p::headers::downloader::SyncTarget; use reth_primitives::{ keccak256, revm::{config::revm_spec, env::fill_block_env}, diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index cb9f879dde6af..9243afff9716b 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -65,8 +65,8 @@ mod tests { transaction::{DbTx, DbTxMut}, CanonicalHeaders, HeaderNumbers, HeaderTerminalDifficulties, Headers, RawTable, }; - use reth_interfaces::test_utils::generators::{self, random_header_range}; use reth_primitives::{static_file::find_fixed_range, BlockNumber, B256, U256}; + use reth_testing_utils::generators::{self, random_header_range}; #[test] fn test_snap() { diff --git a/crates/storage/provider/src/traits/header_sync_gap.rs b/crates/storage/provider/src/traits/header_sync_gap.rs index 8a60eb15a77d3..54556101aec5e 100644 --- a/crates/storage/provider/src/traits/header_sync_gap.rs +++ b/crates/storage/provider/src/traits/header_sync_gap.rs @@ -1,5 +1,4 @@ -use auto_impl::auto_impl; -use reth_interfaces::p2p::headers::downloader::SyncTarget; +use reth_network_p2p::headers::downloader::SyncTarget; use reth_primitives::{BlockHashOrNumber, BlockNumber, SealedHeader, B256}; use reth_storage_errors::provider::ProviderResult; use tokio::sync::watch; @@ -37,7 +36,7 @@ impl HeaderSyncGap { } /// Client trait for determining the current headers sync gap. -#[auto_impl(&, Arc)] +#[auto_impl::auto_impl(&, Arc)] pub trait HeaderSyncGapProvider: Send + Sync { /// Find a current sync gap for the headers depending on the [HeaderSyncMode] and the last /// uninterrupted block number. Last uninterrupted block represents the block number before From 97cb38354395f3b425f383aa7bdab39b35735aad Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 11:28:52 +0200 Subject: [PATCH 678/700] chore: rm reth-interfaces from stages (#8455) --- Cargo.lock | 4 +++ crates/stages/Cargo.toml | 24 ++++++++++------- crates/stages/src/lib.rs | 2 +- crates/stages/src/sets.rs | 4 +-- crates/stages/src/stages/bodies.rs | 26 ++++++++----------- crates/stages/src/stages/execution.rs | 2 +- crates/stages/src/stages/finish.rs | 6 ++--- crates/stages/src/stages/hashing_account.rs | 8 +++--- crates/stages/src/stages/hashing_storage.rs | 8 +++--- crates/stages/src/stages/headers.rs | 10 +++---- .../src/stages/index_account_history.rs | 6 ++--- .../src/stages/index_storage_history.rs | 6 ++--- crates/stages/src/stages/merkle.rs | 10 +++---- crates/stages/src/stages/mod.rs | 2 +- crates/stages/src/stages/sender_recovery.rs | 8 +++--- crates/stages/src/stages/tx_lookup.rs | 8 +++--- crates/stages/src/test_utils/runner.rs | 2 +- crates/stages/src/test_utils/test_db.rs | 3 ++- 18 files changed, 71 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0477cb292643..8ce071a3992e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8004,14 +8004,18 @@ dependencies = [ "reth-etl", "reth-evm", "reth-evm-ethereum", + "reth-execution-errors", "reth-exex", "reth-interfaces", + "reth-network-p2p", "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", "reth-stages-api", "reth-static-file", + "reth-storage-errors", + "reth-testing-utils", "reth-trie", "serde_json", "tempfile", diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index ef91b2be2371a..c78d10488e93f 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -13,19 +13,22 @@ workspace = true [dependencies] # reth -reth-exex.workspace = true -reth-primitives.workspace = true -reth-interfaces.workspace = true -reth-db.workspace = true reth-codecs.workspace = true -reth-provider.workspace = true -reth-trie = { workspace = true, features = ["metrics"] } -reth-etl.workspace = true reth-config.workspace = true -reth-stages-api = { workspace = true, features = ["test-utils"] } reth-consensus.workspace = true +reth-db.workspace = true +reth-etl.workspace = true reth-evm.workspace = true +reth-exex.workspace = true +reth-network-p2p.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-storage-errors.workspace = true reth-revm.workspace = true +reth-stages-api = { workspace = true, features = ["test-utils"] } +reth-trie = { workspace = true, features = ["metrics"] } + +reth-testing-utils = { workspace = true, optional = true } # async tokio = { workspace = true, features = ["sync"] } @@ -46,11 +49,13 @@ tempfile = { workspace = true, optional = true } reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils", "mdbx"] } reth-evm-ethereum.workspace = true +reth-execution-errors.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-consensus = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true reth-revm.workspace = true reth-static-file.workspace = true +reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-network-types.workspace = true @@ -78,10 +83,11 @@ pprof = { workspace = true, features = [ [features] test-utils = [ - "reth-interfaces/test-utils", + "reth-network-p2p/test-utils", "reth-db/test-utils", "reth-provider/test-utils", "reth-stages-api/test-utils", + "dep:reth-testing-utils", "dep:tempfile", ] diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 370dd18aca6d5..37b8d0f331eb4 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -15,7 +15,7 @@ //! # use std::sync::Arc; //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; -//! # use reth_interfaces::test_utils::{TestBodiesClient, TestHeadersClient}; +//! # use reth_network_p2p::test_utils::{TestBodiesClient, TestHeadersClient}; //! # use reth_evm_ethereum::execute::EthExecutorProvider; //! # use reth_primitives::{MAINNET, B256, PruneModes}; //! # use reth_network_types::PeerId; diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index e8257047e5825..1f88b29aad646 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -47,9 +47,7 @@ use reth_config::config::StageConfig; use reth_consensus::Consensus; use reth_db::database::Database; use reth_evm::execute::BlockExecutorProvider; -use reth_interfaces::p2p::{ - bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, -}; +use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}; use reth_primitives::PruneModes; use reth_provider::{HeaderSyncGapProvider, HeaderSyncMode}; use std::sync::Arc; diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index cc5a291fed50c..19d9cdcf883ee 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -13,10 +13,7 @@ use reth_db::{ tables, transaction::DbTxMut, }; -use reth_interfaces::{ - p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}, - provider::ProviderResult, -}; +use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, StaticFileSegment, TxNumber, @@ -28,6 +25,7 @@ use reth_provider::{ use reth_stages_api::{ExecInput, ExecOutput, StageError, UnwindInput, UnwindOutput}; use reth_stages_api::Stage; +use reth_storage_errors::provider::ProviderResult; // TODO(onbjerg): Metrics and events (gradual status for e.g. CLI) /// The body stage downloads block bodies. @@ -635,18 +633,12 @@ mod tests { transaction::{DbTx, DbTxMut}, DatabaseEnv, }; - use reth_interfaces::{ - p2p::{ - bodies::{ - downloader::{BodyDownloader, BodyDownloaderResult}, - response::BlockResponse, - }, - error::DownloadResult, - }, - test_utils::{ - generators, - generators::{random_block_range, random_signed_tx}, + use reth_network_p2p::{ + bodies::{ + downloader::{BodyDownloader, BodyDownloaderResult}, + response::BlockResponse, }, + error::DownloadResult, }; use reth_primitives::{ BlockBody, BlockHash, BlockNumber, Header, SealedBlock, SealedHeader, @@ -657,6 +649,10 @@ mod tests { StaticFileProviderFactory, TransactionsProvider, }; use reth_stages_api::{ExecInput, ExecOutput, UnwindInput}; + use reth_testing_utils::{ + generators, + generators::{random_block_range, random_signed_tx}, + }; use crate::{ stages::bodies::BodyStage, diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 2c03e34a434fa..b7057ab501619 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -655,7 +655,7 @@ mod tests { use assert_matches::assert_matches; use reth_db::{models::AccountBeforeTx, transaction::DbTxMut}; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_interfaces::executor::BlockValidationError; + use reth_execution_errors::BlockValidationError; use reth_primitives::{ address, hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Address, Bytecode, ChainSpecBuilder, PruneMode, ReceiptsLogPruneConfig, SealedBlock, StorageEntry, diff --git a/crates/stages/src/stages/finish.rs b/crates/stages/src/stages/finish.rs index c7b2f5a8efaec..cb906396078ba 100644 --- a/crates/stages/src/stages/finish.rs +++ b/crates/stages/src/stages/finish.rs @@ -40,12 +40,12 @@ mod tests { stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestStageDB, UnwindStageTestRunner, }; - use reth_interfaces::test_utils::{ + use reth_primitives::SealedHeader; + use reth_provider::providers::StaticFileWriter; + use reth_testing_utils::{ generators, generators::{random_header, random_header_range}, }; - use reth_primitives::SealedHeader; - use reth_provider::providers::StaticFileWriter; stage_test_suite_ext!(FinishTestRunner, finish); diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 6ae7fc5221b94..3dcb4cd3b3928 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -8,7 +8,6 @@ use reth_db::{ RawKey, RawTable, RawValue, }; use reth_etl::Collector; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ keccak256, stage::{AccountHashingCheckpoint, EntitiesCheckpoint, StageCheckpoint, StageId}, @@ -16,6 +15,7 @@ use reth_primitives::{ }; use reth_provider::{AccountExtReader, DatabaseProviderRW, HashingWriter, StatsReader}; use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; +use reth_storage_errors::provider::ProviderResult; use std::{ fmt::Debug, ops::{Range, RangeInclusive}, @@ -65,12 +65,12 @@ impl AccountHashingStage { opts: SeedOpts, ) -> Result, StageError> { use reth_db::models::AccountBeforeTx; - use reth_interfaces::test_utils::{ + use reth_primitives::U256; + use reth_provider::providers::StaticFileWriter; + use reth_testing_utils::{ generators, generators::{random_block_range, random_eoa_accounts}, }; - use reth_primitives::U256; - use reth_provider::providers::StaticFileWriter; let mut rng = generators::rng(); diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 97f9154c3829b..0ab52f32fcfec 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -10,7 +10,6 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ keccak256, stage::{EntitiesCheckpoint, StageCheckpoint, StageId, StorageHashingCheckpoint}, @@ -18,6 +17,7 @@ use reth_primitives::{ }; use reth_provider::{DatabaseProviderRW, HashingWriter, StatsReader, StorageReader}; use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; +use reth_storage_errors::provider::ProviderResult; use std::{ fmt::Debug, sync::mpsc::{self, Receiver}, @@ -223,12 +223,12 @@ mod tests { cursor::{DbCursorRW, DbDupCursorRO}, models::StoredBlockBodyIndices, }; - use reth_interfaces::test_utils::{ + use reth_primitives::{Address, SealedBlock, U256}; + use reth_provider::providers::StaticFileWriter; + use reth_testing_utils::{ generators, generators::{random_block_range, random_contract_account_range}, }; - use reth_primitives::{Address, SealedBlock, U256}; - use reth_provider::providers::StaticFileWriter; stage_test_suite_ext!(StorageHashingTestRunner, storage_hashing); diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index f0a8c181177b3..6ca02a4aa5dab 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -10,10 +10,7 @@ use reth_db::{ RawKey, RawTable, RawValue, }; use reth_etl::Collector; -use reth_interfaces::{ - p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}, - provider::ProviderError, -}; +use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}; use reth_primitives::{ stage::{ CheckpointBlockRange, EntitiesCheckpoint, HeadersCheckpoint, StageCheckpoint, StageId, @@ -28,6 +25,7 @@ use reth_provider::{ use reth_stages_api::{ BlockErrorKind, ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput, }; +use reth_storage_errors::provider::ProviderError; use std::{ sync::Arc, task::{ready, Context, Poll}, @@ -383,13 +381,13 @@ mod tests { stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; use assert_matches::assert_matches; - use reth_interfaces::test_utils::generators::{self, random_header, random_header_range}; use reth_primitives::{ stage::StageUnitCheckpoint, BlockBody, SealedBlock, SealedBlockWithSenders, B256, }; use reth_provider::{ BlockWriter, BundleStateWithReceipts, ProviderFactory, StaticFileProviderFactory, }; + use reth_testing_utils::generators::{self, random_header, random_header_range}; use reth_trie::{updates::TrieUpdates, HashedPostState}; use test_runner::HeadersTestRunner; @@ -401,7 +399,7 @@ mod tests { use reth_downloaders::headers::reverse_headers::{ ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; - use reth_interfaces::test_utils::{TestHeaderDownloader, TestHeadersClient}; + use reth_network_p2p::test_utils::{TestHeaderDownloader, TestHeadersClient}; use reth_provider::BlockNumReader; use tokio::sync::watch; diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 6c313f0d3e005..0bdc179679ddd 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -157,12 +157,12 @@ mod tests { transaction::DbTx, BlockNumberList, }; - use reth_interfaces::test_utils::{ + use reth_primitives::{address, BlockNumber, B256}; + use reth_provider::providers::StaticFileWriter; + use reth_testing_utils::{ generators, generators::{random_block_range, random_changeset_range, random_contract_account_range}, }; - use reth_primitives::{address, BlockNumber, B256}; - use reth_provider::providers::StaticFileWriter; use std::collections::BTreeMap; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index 51fc92f18b145..70a8520016253 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -164,12 +164,12 @@ mod tests { transaction::DbTx, BlockNumberList, }; - use reth_interfaces::test_utils::{ + use reth_primitives::{address, b256, Address, BlockNumber, StorageEntry, B256, U256}; + use reth_provider::providers::StaticFileWriter; + use reth_testing_utils::{ generators, generators::{random_block_range, random_changeset_range, random_contract_account_range}, }; - use reth_primitives::{address, b256, Address, BlockNumber, StorageEntry, B256, U256}; - use reth_provider::providers::StaticFileWriter; use std::collections::BTreeMap; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 7590f9d066aac..cb50dd39bd816 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -368,16 +368,16 @@ mod tests { }; use assert_matches::assert_matches; use reth_db::cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}; - use reth_interfaces::test_utils::{ + use reth_primitives::{ + keccak256, stage::StageUnitCheckpoint, SealedBlock, StaticFileSegment, StorageEntry, U256, + }; + use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; + use reth_testing_utils::{ generators, generators::{ random_block, random_block_range, random_changeset_range, random_contract_account_range, }, }; - use reth_primitives::{ - keccak256, stage::StageUnitCheckpoint, SealedBlock, StaticFileSegment, StorageEntry, U256, - }; - use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use reth_trie::test_utils::{state_root, state_root_prehashed}; use std::collections::BTreeMap; diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 7bb88ff96e47b..9af26247348e8 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -52,7 +52,6 @@ mod tests { }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex::ExExManagerHandle; - use reth_interfaces::test_utils::generators::{self, random_block}; use reth_primitives::{ address, hex_literal::hex, keccak256, Account, Bytecode, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, StaticFileSegment, U256, @@ -62,6 +61,7 @@ mod tests { StorageReader, }; use reth_stages_api::{ExecInput, Stage}; + use reth_testing_utils::generators::{self, random_block}; use std::sync::Arc; #[tokio::test] diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 2695fb074c27e..28e3df51e7f3f 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -285,10 +285,6 @@ struct FailedSenderRecoveryError { mod tests { use assert_matches::assert_matches; use reth_db::cursor::DbCursorRO; - use reth_interfaces::test_utils::{ - generators, - generators::{random_block, random_block_range}, - }; use reth_primitives::{ stage::StageUnitCheckpoint, BlockNumber, PruneCheckpoint, PruneMode, SealedBlock, TransactionSigned, B256, @@ -297,6 +293,10 @@ mod tests { providers::StaticFileWriter, PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, }; + use reth_testing_utils::{ + generators, + generators::{random_block, random_block_range}, + }; use super::*; use crate::test_utils::{ diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 332bcf8e70008..9f1704958ab21 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -8,7 +8,6 @@ use reth_db::{ RawKey, RawValue, }; use reth_etl::Collector; -use reth_interfaces::provider::ProviderError; use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, TxHash, TxNumber, @@ -18,6 +17,7 @@ use reth_provider::{ TransactionsProvider, TransactionsProviderExt, }; use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; +use reth_storage_errors::provider::ProviderError; use tracing::*; /// The transaction lookup stage. @@ -242,12 +242,12 @@ mod tests { TestRunnerError, TestStageDB, UnwindStageTestRunner, }; use assert_matches::assert_matches; - use reth_interfaces::test_utils::{ + use reth_primitives::{stage::StageUnitCheckpoint, BlockNumber, SealedBlock, B256}; + use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; + use reth_testing_utils::{ generators, generators::{random_block, random_block_range}, }; - use reth_primitives::{stage::StageUnitCheckpoint, BlockNumber, SealedBlock, B256}; - use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use std::ops::Sub; // Implement stage test suite. diff --git a/crates/stages/src/test_utils/runner.rs b/crates/stages/src/test_utils/runner.rs index fd2064ac47f99..17650287eb54d 100644 --- a/crates/stages/src/test_utils/runner.rs +++ b/crates/stages/src/test_utils/runner.rs @@ -1,10 +1,10 @@ use super::TestStageDB; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; -use reth_interfaces::db::DatabaseError; use reth_provider::ProviderError; use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput, }; +use reth_storage_errors::db::DatabaseError; use std::sync::Arc; use tokio::sync::oneshot; diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index 5fe65a737349b..5998fc878734e 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -11,7 +11,6 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, DatabaseEnv, DatabaseError as DbError, }; -use reth_interfaces::{provider::ProviderResult, test_utils::generators::ChangeSet}; use reth_primitives::{ keccak256, Account, Address, BlockNumber, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, TxHash, TxNumber, B256, MAINNET, U256, @@ -20,6 +19,8 @@ use reth_provider::{ providers::{StaticFileProviderRWRefMut, StaticFileWriter}, HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, }; +use reth_storage_errors::provider::ProviderResult; +use reth_testing_utils::generators::ChangeSet; use std::{collections::BTreeMap, path::Path, sync::Arc}; use tempfile::TempDir; From e21a2578d56e28bd0dc252fddcd627c600dd1a05 Mon Sep 17 00:00:00 2001 From: jakevin Date: Wed, 29 May 2024 17:34:52 +0800 Subject: [PATCH 679/700] chore(deps): remove useless deps (#8453) --- Cargo.lock | 47 ---------------------------- crates/e2e-test-utils/Cargo.toml | 4 --- crates/net/eth-wire-types/Cargo.toml | 5 --- crates/net/network/Cargo.toml | 2 -- crates/net/p2p/Cargo.toml | 2 -- crates/node-core/Cargo.toml | 3 -- crates/optimism/evm/Cargo.toml | 1 - crates/optimism/node/Cargo.toml | 1 - crates/prune/Cargo.toml | 3 -- crates/static-file/Cargo.toml | 4 --- crates/trie/Cargo.toml | 1 - examples/exex/minimal/Cargo.toml | 3 -- examples/exex/op-bridge/Cargo.toml | 3 -- examples/exex/rollup/Cargo.toml | 4 --- examples/node-custom-rpc/Cargo.toml | 1 - testing/ef-tests/Cargo.toml | 2 -- 16 files changed, 86 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8ce071a3992e7..841f67254c1a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2703,12 +2703,10 @@ dependencies = [ "reth-interfaces", "reth-primitives", "reth-provider", - "reth-revm", "reth-stages", "serde", "serde_json", "thiserror", - "tokio", "walkdir", ] @@ -2949,11 +2947,8 @@ dependencies = [ "reth", "reth-exex", "reth-node-api", - "reth-node-core", "reth-node-ethereum", - "reth-primitives", "reth-tracing", - "tokio", ] [[package]] @@ -2963,17 +2958,14 @@ dependencies = [ "alloy-sol-types", "eyre", "futures", - "itertools 0.12.1", "reth", "reth-exex", "reth-node-api", - "reth-node-core", "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-tracing", "rusqlite", - "tokio", ] [[package]] @@ -2985,21 +2977,17 @@ dependencies = [ "alloy-sol-types", "eyre", "foundry-blob-explorers", - "futures", "once_cell", "reth", - "reth-cli-runner", "reth-execution-errors", "reth-exex", "reth-node-api", - "reth-node-core", "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-revm", "reth-testing-utils", "reth-tracing", - "reth-trie", "rusqlite", "secp256k1 0.28.2", "serde_json", @@ -3260,23 +3248,6 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" -[[package]] -name = "futures-test" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce388237b32ac42eca0df1ba55ed3bbda4eaf005d7d4b5dbc0b20ab962928ac9" -dependencies = [ - "futures-core", - "futures-executor", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "futures-util", - "pin-project", - "pin-utils", -] - [[package]] name = "futures-timer" version = "3.0.3" @@ -5231,7 +5202,6 @@ name = "node-custom-rpc" version = "0.0.0" dependencies = [ "clap", - "eyre", "jsonrpsee", "reth", "reth-node-ethereum", @@ -6852,12 +6822,9 @@ dependencies = [ "eyre", "futures-util", "jsonrpsee", - "rand 0.8.5", "reth", "reth-db", "reth-node-builder", - "reth-node-core", - "reth-node-ethereum", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -6865,7 +6832,6 @@ dependencies = [ "reth-rpc-layer", "reth-tokio-util", "reth-tracing", - "secp256k1 0.28.2", "serde_json", "tokio", "tokio-stream", @@ -6977,7 +6943,6 @@ dependencies = [ "reth-net-common", "reth-primitives", "reth-tracing", - "secp256k1 0.28.2", "serde", "test-fuzz", "thiserror", @@ -7269,10 +7234,8 @@ dependencies = [ "enr", "fnv", "futures", - "futures-test", "humantime-serde", "itertools 0.12.1", - "linked_hash_set", "metrics", "parking_lot 0.12.3", "pin-project", @@ -7332,14 +7295,12 @@ dependencies = [ "auto_impl", "futures", "parking_lot 0.12.3", - "rand 0.8.5", "reth-consensus", "reth-eth-wire-types", "reth-network-api", "reth-network-types", "reth-primitives", "reth-storage-errors", - "secp256k1 0.28.2", "thiserror", "tokio", "tracing", @@ -7443,7 +7404,6 @@ name = "reth-node-core" version = "0.2.0-beta.7" dependencies = [ "alloy-rpc-types-engine", - "assert_matches", "clap", "const-str", "derive_more", @@ -7453,7 +7413,6 @@ dependencies = [ "futures", "humantime", "hyper 0.14.28", - "jsonrpsee", "metrics", "metrics-exporter-prometheus", "metrics-process", @@ -7490,7 +7449,6 @@ dependencies = [ "serde", "serde_json", "shellexpand", - "tempfile", "thiserror", "tikv-jemalloc-ctl", "tokio", @@ -7581,7 +7539,6 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "reth-transaction-pool", - "revm", "revm-primitives", "serde", "serde_json", @@ -7748,7 +7705,6 @@ name = "reth-prune" version = "0.2.0-beta.7" dependencies = [ "assert_matches", - "derive_more", "itertools 0.12.1", "metrics", "rayon", @@ -8064,8 +8020,6 @@ dependencies = [ "reth-testing-utils", "reth-tokio-util", "tempfile", - "tokio", - "tokio-stream", "tracing", ] @@ -8209,7 +8163,6 @@ dependencies = [ "revm", "serde_json", "similar-asserts", - "thiserror", "tokio", "tokio-stream", "tracing", diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 4165044ae2e59..19fafd5536824 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -10,9 +10,7 @@ repository.workspace = true [dependencies] reth.workspace = true -reth-node-core.workspace = true reth-primitives.workspace = true -reth-node-ethereum.workspace = true reth-tracing.workspace = true reth-db.workspace = true reth-rpc.workspace = true @@ -29,8 +27,6 @@ eyre.workspace = true tokio.workspace = true tokio-stream.workspace = true serde_json.workspace = true -rand.workspace = true -secp256k1.workspace = true alloy-signer.workspace = true alloy-signer-wallet = { workspace = true, features = ["mnemonic"] } alloy-rpc-types.workspace = true diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 9954dba10fc00..0ed24a64e10e4 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -35,11 +35,6 @@ reth-tracing.workspace = true test-fuzz.workspace = true tokio-util = { workspace = true, features = ["io", "codec"] } rand.workspace = true -secp256k1 = { workspace = true, features = [ - "global-context", - "rand-std", - "recovery", -] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 0c8e079f4da78..e70958450a3d4 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -41,7 +41,6 @@ pin-project.workspace = true tokio = { workspace = true, features = ["io-util", "net", "macros", "rt-multi-thread", "time"] } tokio-stream.workspace = true tokio-util = { workspace = true, features = ["codec"] } -futures-test = "0.3.30" # io serde = { workspace = true, optional = true } @@ -59,7 +58,6 @@ tracing.workspace = true fnv = "1.0" thiserror.workspace = true parking_lot.workspace = true -linked_hash_set = "0.1" rand.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } derive_more.workspace = true diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index 0e3146ab6f39d..46ce37a384914 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -34,9 +34,7 @@ parking_lot = { workspace = true, optional = true } reth-consensus = { workspace = true, features = ["test-utils"] } parking_lot.workspace = true -rand.workspace = true tokio = { workspace = true, features = ["full"] } -secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] test-utils = ["reth-consensus/test-utils", "parking_lot"] diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 6b8a35c7994ab..8f536e471ab9b 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -94,9 +94,6 @@ procfs = "0.16.0" [dev-dependencies] # test vectors generation proptest.workspace = true -tempfile.workspace = true -jsonrpsee.workspace = true -assert_matches.workspace = true [features] optimism = [ diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 0423f1bd7dd6c..c3f4643b22710 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -37,6 +37,5 @@ reth-revm = { workspace = true, features = ["test-utils"] } optimism = [ "reth-primitives/optimism", "reth-provider/optimism", - "revm-primitives/optimism", "reth-optimism-consensus/optimism", ] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 9432ce9edb5b5..ddd2fb283896d 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -30,7 +30,6 @@ reth-evm.workspace = true reth-revm.workspace = true reth-evm-optimism.workspace = true reth-beacon-consensus.workspace = true -revm.workspace = true revm-primitives.workspace = true # async diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 65b4ba19c6c1f..a645dd40a44a7 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -37,7 +37,4 @@ reth-db = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true -# misc -derive_more.workspace = true - assert_matches.workspace = true diff --git a/crates/static-file/Cargo.toml b/crates/static-file/Cargo.toml index 290ffa4eb220f..67da49960b64c 100644 --- a/crates/static-file/Cargo.toml +++ b/crates/static-file/Cargo.toml @@ -20,10 +20,6 @@ reth-storage-errors.workspace = true reth-nippy-jar.workspace = true reth-tokio-util.workspace = true -# async -tokio.workspace = true -tokio-stream.workspace = true - # misc tracing.workspace = true rayon.workspace = true diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index e20e6dd6fd770..a09615d2203bb 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -26,7 +26,6 @@ alloy-rlp.workspace = true tracing.workspace = true # misc -thiserror.workspace = true derive_more.workspace = true auto_impl.workspace = true diff --git a/examples/exex/minimal/Cargo.toml b/examples/exex/minimal/Cargo.toml index a7bcc327ac163..afbafce8b71ac 100644 --- a/examples/exex/minimal/Cargo.toml +++ b/examples/exex/minimal/Cargo.toml @@ -9,11 +9,8 @@ license.workspace = true reth.workspace = true reth-exex.workspace = true reth-node-api.workspace = true -reth-node-core.workspace = true reth-node-ethereum.workspace = true -reth-primitives.workspace = true reth-tracing.workspace = true eyre.workspace = true -tokio.workspace = true futures.workspace = true diff --git a/examples/exex/op-bridge/Cargo.toml b/examples/exex/op-bridge/Cargo.toml index d8669e9147377..5145080c1b11e 100644 --- a/examples/exex/op-bridge/Cargo.toml +++ b/examples/exex/op-bridge/Cargo.toml @@ -9,15 +9,12 @@ license.workspace = true reth.workspace = true reth-exex.workspace = true reth-node-api.workspace = true -reth-node-core.workspace = true reth-node-ethereum.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-tracing.workspace = true eyre.workspace = true -tokio.workspace = true futures.workspace = true alloy-sol-types = { workspace = true, features = ["json"] } -itertools.workspace = true rusqlite = { version = "0.31.0", features = ["bundled"] } diff --git a/examples/exex/rollup/Cargo.toml b/examples/exex/rollup/Cargo.toml index bea03566618e8..5a4dcb5f4fea6 100644 --- a/examples/exex/rollup/Cargo.toml +++ b/examples/exex/rollup/Cargo.toml @@ -8,20 +8,16 @@ license.workspace = true [dependencies] # reth reth.workspace = true -reth-cli-runner.workspace = true reth-exex.workspace = true reth-node-api.workspace = true -reth-node-core.workspace = true reth-node-ethereum.workspace = true reth-primitives.workspace = true reth-execution-errors.workspace = true reth-provider.workspace = true reth-revm.workspace = true reth-tracing.workspace = true -reth-trie.workspace = true # async -futures.workspace = true tokio.workspace = true # misc diff --git a/examples/node-custom-rpc/Cargo.toml b/examples/node-custom-rpc/Cargo.toml index f1c5d95d9f1d2..473e9acaf9fad 100644 --- a/examples/node-custom-rpc/Cargo.toml +++ b/examples/node-custom-rpc/Cargo.toml @@ -12,7 +12,6 @@ reth-node-ethereum.workspace = true clap = { workspace = true, features = ["derive"] } jsonrpsee = { workspace = true, features = ["server", "macros"] } -eyre.workspace = true [dev-dependencies] tokio.workspace = true diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index 2584c42d67c5b..d89baf4108f32 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -21,11 +21,9 @@ reth-db = { workspace = true, features = ["mdbx", "test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-stages.workspace = true reth-interfaces.workspace = true -reth-revm.workspace = true reth-evm-ethereum.workspace = true alloy-rlp.workspace = true -tokio.workspace = true walkdir = "2.3.3" serde.workspace = true serde_json.workspace = true From dd2113173e4408e0565c5edda439c1b15092ed8e Mon Sep 17 00:00:00 2001 From: Luca Provini Date: Wed, 29 May 2024 11:39:11 +0200 Subject: [PATCH 680/700] Rexport tx pool identifiers (#8458) --- crates/transaction-pool/src/identifier.rs | 1 + crates/transaction-pool/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 4e4bec4d1b1bf..ecc46cae8adfd 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -1,3 +1,4 @@ +//! Identifier types for transactions and senders. use reth_primitives::Address; use rustc_hash::FxHashMap; use std::collections::HashMap; diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 0ddc8ec016fc6..f81977d67153b 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -188,7 +188,7 @@ pub mod validate; pub mod blobstore; mod config; -mod identifier; +pub mod identifier; mod ordering; mod traits; From efccfbfc65014e3d6ac71a7df66a11d9752a49cf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 11:59:00 +0200 Subject: [PATCH 681/700] chore: rm reth-interfaces from beacon (#8456) --- Cargo.lock | 4 +++- crates/consensus/beacon/Cargo.toml | 7 ++++-- crates/consensus/beacon/src/engine/error.rs | 10 ++++---- crates/consensus/beacon/src/engine/handle.rs | 2 +- .../beacon/src/engine/hooks/controller.rs | 2 +- .../consensus/beacon/src/engine/hooks/mod.rs | 2 +- .../beacon/src/engine/hooks/prune.rs | 2 +- .../beacon/src/engine/hooks/static_file.rs | 2 +- crates/consensus/beacon/src/engine/message.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 24 +++++++++---------- crates/consensus/beacon/src/engine/sync.rs | 4 ++-- .../consensus/beacon/src/engine/test_utils.rs | 5 ++-- crates/errors/src/lib.rs | 2 +- 13 files changed, 35 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 841f67254c1a8..3020465a05dc9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6499,17 +6499,19 @@ dependencies = [ "futures", "metrics", "reth-blockchain-tree", + "reth-blockchain-tree-api", "reth-config", "reth-consensus", "reth-db", "reth-downloaders", "reth-engine-primitives", + "reth-errors", "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-evm", "reth-evm-ethereum", - "reth-interfaces", "reth-metrics", + "reth-network-p2p", "reth-payload-builder", "reth-payload-validator", "reth-primitives", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index a5cef8e342777..67f6558583c9b 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -13,9 +13,10 @@ workspace = true [dependencies] # reth reth-ethereum-consensus.workspace = true +reth-blockchain-tree-api.workspace = true reth-primitives.workspace = true -reth-interfaces.workspace = true reth-stages-api.workspace = true +reth-errors.workspace = true reth-db.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true @@ -26,6 +27,8 @@ reth-prune.workspace = true reth-static-file.workspace = true reth-tokio-util.workspace = true reth-engine-primitives.workspace = true +reth-network-p2p.workspace = true + # async tokio = { workspace = true, features = ["sync"] } @@ -46,12 +49,12 @@ schnellru.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["test-utils"] } reth-consensus = { workspace = true, features = ["test-utils"] } -reth-interfaces = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } reth-blockchain-tree = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-evm = { workspace = true, features = ["test-utils"] } +reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true reth-rpc.workspace = true reth-tracing.workspace = true diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index 92bd031e5666c..aa37e8482a35e 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -1,5 +1,5 @@ use crate::engine::hooks::EngineHookError; -use reth_interfaces::RethError; +use reth_errors::{DatabaseError, RethError}; use reth_rpc_types::engine::ForkchoiceUpdateError; use reth_stages_api::PipelineError; @@ -37,8 +37,8 @@ impl From for BeaconConsensusEngineError { } // for convenience in the beacon engine -impl From for BeaconConsensusEngineError { - fn from(e: reth_interfaces::db::DatabaseError) -> Self { +impl From for BeaconConsensusEngineError { + fn from(e: DatabaseError) -> Self { Self::Common(e.into()) } } @@ -72,8 +72,8 @@ impl From for BeaconForkChoiceUpdateError { Self::internal(e) } } -impl From for BeaconForkChoiceUpdateError { - fn from(e: reth_interfaces::db::DatabaseError) -> Self { +impl From for BeaconForkChoiceUpdateError { + fn from(e: DatabaseError) -> Self { Self::internal(e) } } diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index bec289bf4a7fa..4208714873d2b 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -6,7 +6,7 @@ use crate::{ }; use futures::TryFutureExt; use reth_engine_primitives::EngineTypes; -use reth_interfaces::RethResult; +use reth_errors::RethResult; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index a2845c9cce175..41519b456c9b7 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -176,7 +176,7 @@ mod tests { EngineHooksController, }; use futures::poll; - use reth_interfaces::{RethError, RethResult}; + use reth_errors::{RethError, RethResult}; use std::{ collections::VecDeque, future::poll_fn, diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs index 3e78e484817b8..0777186b67568 100644 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ b/crates/consensus/beacon/src/engine/hooks/mod.rs @@ -1,4 +1,4 @@ -use reth_interfaces::{RethError, RethResult}; +use reth_errors::{RethError, RethResult}; use reth_primitives::BlockNumber; use std::{ fmt, diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index ea9078aca919e..e6b5306e3bc6b 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -7,7 +7,7 @@ use crate::{ use futures::FutureExt; use metrics::Counter; use reth_db::database::Database; -use reth_interfaces::{RethError, RethResult}; +use reth_errors::{RethError, RethResult}; use reth_primitives::BlockNumber; use reth_prune::{Pruner, PrunerError, PrunerWithResult}; use reth_tasks::TaskSpawner; diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 3d78d51d93cc4..5cf643216f1c9 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -6,7 +6,7 @@ use crate::{ }; use futures::FutureExt; use reth_db::database::Database; -use reth_interfaces::RethResult; +use reth_errors::RethResult; use reth_primitives::{static_file::HighestStaticFiles, BlockNumber}; use reth_static_file::{StaticFileProducer, StaticFileProducerWithResult}; use reth_tasks::TaskSpawner; diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 108dab41eb0f4..e3433062aa9b0 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -1,7 +1,7 @@ use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; use futures::{future::Either, FutureExt}; use reth_engine_primitives::EngineTypes; -use reth_interfaces::RethResult; +use reth_errors::RethResult; use reth_payload_builder::error::PayloadBuilderError; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index e29ddd62495ba..bcf6770973f04 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,16 +1,15 @@ use futures::{stream::BoxStream, Future, StreamExt}; +use reth_blockchain_tree_api::{ + error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, + BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, +}; use reth_db::database::Database; use reth_engine_primitives::{EngineTypes, PayloadAttributes, PayloadBuilderAttributes}; -use reth_interfaces::{ - blockchain_tree::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, - }, - executor::BlockValidationError, - p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, - provider::ProviderResult, +use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; +use reth_network_p2p::{ + bodies::client::BodiesClient, + headers::client::HeadersClient, sync::{NetworkSyncUpdater, SyncState}, - RethError, RethResult, }; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_validator::ExecutionPayloadValidator; @@ -1957,12 +1956,12 @@ mod tests { BeaconForkChoiceUpdateError, }; use assert_matches::assert_matches; - use reth_interfaces::test_utils::generators::{self, Rng}; use reth_primitives::{stage::StageCheckpoint, ChainSpecBuilder, MAINNET}; use reth_provider::{BlockWriter, ProviderFactory}; use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; + use reth_testing_utils::generators::{self, Rng}; use std::{collections::VecDeque, sync::Arc}; use tokio::sync::oneshot::error::TryRecvError; @@ -2152,9 +2151,9 @@ mod tests { mod fork_choice_updated { use super::*; use reth_db::{tables, test_utils::create_test_static_files_dir, transaction::DbTxMut}; - use reth_interfaces::test_utils::generators::random_block; use reth_primitives::U256; use reth_rpc_types::engine::ForkchoiceUpdateError; + use reth_testing_utils::generators::random_block; #[tokio::test] async fn empty_head() { @@ -2452,10 +2451,9 @@ mod tests { mod new_payload { use super::*; use reth_db::test_utils::create_test_static_files_dir; - use reth_interfaces::test_utils::generators::random_block; use reth_primitives::{genesis::Genesis, Hardfork, U256}; use reth_provider::test_utils::blocks::BlockchainTestData; - use reth_testing_utils::GenesisAllocator; + use reth_testing_utils::{generators::random_block, GenesisAllocator}; #[tokio::test] async fn new_payload_before_forkchoice() { diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 441c3ce0362cd..810e3e7483676 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -6,7 +6,7 @@ use crate::{ }; use futures::FutureExt; use reth_db::database::Database; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::client::BodiesClient, full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, headers::client::HeadersClient, @@ -429,7 +429,7 @@ mod tests { use assert_matches::assert_matches; use futures::poll; use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; - use reth_interfaces::{p2p::either::Either, test_utils::TestFullBlockClient}; + use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient}; use reth_primitives::{ constants::ETHEREUM_BLOCK_GAS_LIMIT, stage::StageCheckpoint, BlockBody, ChainSpecBuilder, Header, PruneModes, SealedHeader, MAINNET, diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 13ffd0c4fff78..73777d94d5c34 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -16,9 +16,8 @@ use reth_downloaders::{ use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::{either::Either, test_utils::MockExecutorProvider}; use reth_evm_ethereum::execute::EthExecutorProvider; -use reth_interfaces::{ - p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, - sync::NoopSyncStateUpdater, +use reth_network_p2p::{ + bodies::client::BodiesClient, headers::client::HeadersClient, sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, }; use reth_payload_builder::test_utils::spawn_test_payload_service; diff --git a/crates/errors/src/lib.rs b/crates/errors/src/lib.rs index 4b8b96fba4956..9dc0ce0ca5bc4 100644 --- a/crates/errors/src/lib.rs +++ b/crates/errors/src/lib.rs @@ -17,7 +17,7 @@ pub use error::{RethError, RethResult}; pub use reth_blockchain_tree_api::error::{BlockchainTreeError, CanonicalError}; pub use reth_consensus::ConsensusError; -pub use reth_execution_errors::BlockExecutionError; +pub use reth_execution_errors::{BlockExecutionError, BlockValidationError}; pub use reth_storage_errors::{ db::DatabaseError, provider::{ProviderError, ProviderResult}, From fe823cba7eb609e62505812a0fc057f2bf41b7ed Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 12:48:02 +0200 Subject: [PATCH 682/700] chore: rm reth-interfaces from consensus (#8461) --- Cargo.lock | 4 ++-- crates/consensus/common/Cargo.toml | 6 +++--- crates/consensus/common/src/validation.rs | 11 +++++------ crates/storage/storage-api/src/lib.rs | 2 ++ 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3020465a05dc9..7b2fd0dbe7810 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6641,11 +6641,11 @@ name = "reth-consensus-common" version = "0.2.0-beta.7" dependencies = [ "mockall", + "rand 0.8.5", "reth-consensus", - "reth-interfaces", "reth-optimism-primitives", "reth-primitives", - "reth-provider", + "reth-storage-api", ] [[package]] diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index b18b4dcf6ddbc..f6f2b56dfae11 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -14,9 +14,9 @@ workspace = true # reth reth-primitives.workspace = true reth-optimism-primitives.workspace = true -reth-consensus.workspace=true +reth-consensus.workspace = true [dev-dependencies] -reth-interfaces = { workspace = true, features = ["test-utils"] } -reth-provider = { workspace = true, features = ["test-utils"] } +reth-storage-api.workspace = true +rand.workspace = true mockall = "0.12" diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 74c515c204b4e..ea80e8477806e 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -190,16 +190,15 @@ pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> mod tests { use super::*; use mockall::mock; - use reth_interfaces::{ - provider::ProviderResult, - test_utils::generators::{self, Rng}, - }; + use rand::Rng; use reth_primitives::{ hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Bytes, ChainSpecBuilder, Signature, Transaction, TransactionSigned, TxEip4844, Withdrawal, Withdrawals, U256, }; - use reth_provider::{AccountReader, HeaderProvider, WithdrawalsProvider}; + use reth_storage_api::{ + errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, + }; use std::ops::RangeBounds; mock! { @@ -300,7 +299,7 @@ mod tests { } fn mock_blob_tx(nonce: u64, num_blobs: usize) -> TransactionSigned { - let mut rng = generators::rng(); + let mut rng = rand::thread_rng(); let request = Transaction::Eip4844(TxEip4844 { chain_id: 1u64, nonce, diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 814b9ac7b7f77..df420ec763569 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -8,6 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// Re-export used error types. +pub use reth_storage_errors as errors; mod account; pub use account::*; From 7262d08f474d386b13830891842ec768f8bc921d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 13:24:32 +0200 Subject: [PATCH 683/700] chore(rpc): rm unused fn (#8463) --- crates/rpc/rpc-builder/src/lib.rs | 37 ------------------------------- 1 file changed, 37 deletions(-) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index c8976f03cf6a7..dfc52b67e1a59 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -715,43 +715,6 @@ impl RpcModuleSelection { } } - /// Creates a new [RpcModule] based on the configured reth modules. - /// - /// Note: This will always create new instance of the module handlers and is therefore only - /// recommended for launching standalone transports. If multiple transports need to be - /// configured it's recommended to use the [RpcModuleBuilder]. - #[allow(clippy::too_many_arguments)] - pub fn standalone_module( - &self, - provider: Provider, - pool: Pool, - network: Network, - executor: Tasks, - events: Events, - config: RpcModuleConfig, - evm_config: EvmConfig, - ) -> RpcModule<()> - where - Provider: BlockReaderIdExt - + AccountReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, - Network: NetworkInfo + Peers + Clone + 'static, - Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, - { - let mut registry = - RethModuleRegistry::new(provider, pool, network, executor, events, config, evm_config); - registry.module_for(self) - } - /// Returns an iterator over all configured [RethRpcModule] pub fn iter_selection(&self) -> Box + '_> { match self { From 0cb5358fef8da1aa4b3dd3e6538a15ca8214614e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 15:13:04 +0200 Subject: [PATCH 684/700] chore: use noop consensus for op import (#8462) --- Cargo.lock | 1 - bin/reth/src/commands/import_op.rs | 6 ++-- crates/consensus/common/Cargo.toml | 1 - crates/consensus/common/src/validation.rs | 7 ++-- crates/consensus/consensus/src/lib.rs | 3 ++ crates/consensus/consensus/src/noop.rs | 41 +++++++++++++++++++++++ 6 files changed, 49 insertions(+), 10 deletions(-) create mode 100644 crates/consensus/consensus/src/noop.rs diff --git a/Cargo.lock b/Cargo.lock index 7b2fd0dbe7810..7f764515ec5cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6643,7 +6643,6 @@ dependencies = [ "mockall", "rand 0.8.5", "reth-consensus", - "reth-optimism-primitives", "reth-primitives", "reth-storage-api", ] diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index a1b23bda82e8b..1da75951b9910 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -11,8 +11,8 @@ use crate::{ version::SHORT_VERSION, }; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; +use reth_consensus::noop::NoopConsensus; use reth_db::{init_db, tables, transaction::DbTx}; use reth_db_common::init::init_genesis; use reth_downloaders::file_client::{ @@ -98,8 +98,8 @@ impl ImportOpCommand { init_genesis(provider_factory.clone())?; - let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); - info!(target: "reth::cli", "Consensus engine initialized"); + // we use noop here because we expect the inputs to be valid + let consensus = Arc::new(NoopConsensus::default()); // open file let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index f6f2b56dfae11..fa2cac1acdf79 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-optimism-primitives.workspace = true reth-consensus.workspace = true [dev-dependencies] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index ea80e8477806e..e06cceec91b4f 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,7 +1,6 @@ //! Collection of methods for block validation. use reth_consensus::ConsensusError; -use reth_optimism_primitives::bedrock_import::is_dup_tx; use reth_primitives::{ constants::{ eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, @@ -82,10 +81,8 @@ pub fn validate_block_pre_execution( } // Check transaction root - if !chain_spec.is_optimism_mainnet() || !is_dup_tx(block.number) { - if let Err(error) = block.ensure_transaction_root_valid() { - return Err(ConsensusError::BodyTransactionRootDiff(error.into())) - } + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) } // EIP-4895: Beacon chain push withdrawals as operations diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index d117b2ea2eb14..e90e73e932995 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -15,6 +15,9 @@ use reth_primitives::{ }; use std::fmt::Debug; +/// A consensus implementation that does nothing. +pub mod noop; + #[cfg(any(test, feature = "test-utils"))] /// test helpers for mocking consensus pub mod test_utils; diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs new file mode 100644 index 0000000000000..31c168eb0c83d --- /dev/null +++ b/crates/consensus/consensus/src/noop.rs @@ -0,0 +1,41 @@ +use crate::{Consensus, ConsensusError, PostExecutionInput}; +use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader, U256}; + +/// A Consensus implementation that does nothing. +#[derive(Debug, Copy, Clone, Default)] +#[non_exhaustive] +pub struct NoopConsensus; + +impl Consensus for NoopConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { + Ok(()) + } + + fn validate_header_against_parent( + &self, + _header: &SealedHeader, + _parent: &SealedHeader, + ) -> Result<(), ConsensusError> { + Ok(()) + } + + fn validate_header_with_total_difficulty( + &self, + _header: &Header, + _total_difficulty: U256, + ) -> Result<(), ConsensusError> { + Ok(()) + } + + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + Ok(()) + } + + fn validate_block_post_execution( + &self, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + Ok(()) + } +} From 19c529e8df96fc4913dd0a8d1e838fd52f44cac5 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 29 May 2024 15:14:14 +0200 Subject: [PATCH 685/700] add `use_self` clippy lint (#8325) --- Cargo.toml | 4 +- bin/reth/src/cli/mod.rs | 2 +- bin/reth/src/commands/db/tui.rs | 8 +- bin/reth/src/commands/stage/unwind.rs | 4 +- crates/blockchain-tree-api/src/error.rs | 41 +- crates/blockchain-tree-api/src/lib.rs | 18 +- crates/blockchain-tree/src/metrics.rs | 26 +- crates/blockchain-tree/src/state.rs | 2 +- crates/consensus/auto-seal/src/mode.rs | 16 +- .../consensus/beacon/src/engine/forkchoice.rs | 26 +- .../beacon/src/engine/hooks/prune.rs | 2 +- crates/consensus/beacon/src/engine/sync.rs | 2 +- crates/consensus/consensus/src/lib.rs | 2 +- crates/errors/src/error.rs | 8 +- crates/ethereum-forks/src/forkid.rs | 5 +- crates/ethereum-forks/src/hardfork.rs | 550 +++++++++--------- .../ethereum/engine-primitives/src/payload.rs | 9 +- crates/evm/execution-types/src/chain.rs | 6 +- crates/evm/src/either.rs | 28 +- crates/fs-util/src/lib.rs | 22 +- crates/metrics/metrics-derive/src/expand.rs | 2 +- crates/net/common/src/ratelimit.rs | 4 +- crates/net/discv4/src/error.rs | 2 +- crates/net/discv4/src/lib.rs | 2 +- crates/net/discv4/src/node.rs | 6 +- crates/net/discv4/src/proto.rs | 50 +- crates/net/discv5/src/filter.rs | 4 +- crates/net/discv5/src/lib.rs | 12 +- crates/net/dns/src/query.rs | 4 +- crates/net/dns/src/sync.rs | 2 +- crates/net/dns/src/tree.rs | 8 +- crates/net/downloaders/src/bodies/bodies.rs | 2 +- crates/net/downloaders/src/file_client.rs | 2 +- .../src/headers/reverse_headers.rs | 2 +- crates/net/ecies/src/algorithm.rs | 2 +- crates/net/ecies/src/error.rs | 2 +- crates/net/eth-wire-types/src/blocks.rs | 8 +- crates/net/eth-wire-types/src/broadcast.rs | 82 ++- crates/net/eth-wire-types/src/message.rs | 175 +++--- crates/net/eth-wire-types/src/status.rs | 6 +- crates/net/eth-wire-types/src/transactions.rs | 4 +- crates/net/eth-wire-types/src/version.rs | 30 +- crates/net/eth-wire/src/capability.rs | 30 +- crates/net/eth-wire/src/disconnect.rs | 62 +- crates/net/eth-wire/src/errors/eth.rs | 4 +- crates/net/eth-wire/src/errors/p2p.rs | 4 +- crates/net/eth-wire/src/p2pstream.rs | 52 +- crates/net/eth-wire/tests/fuzz_roundtrip.rs | 4 +- crates/net/nat/src/lib.rs | 20 +- crates/net/network-api/src/error.rs | 4 +- crates/net/network-api/src/lib.rs | 12 +- crates/net/network/src/builder.rs | 8 +- crates/net/network/src/cache.rs | 4 +- crates/net/network/src/config.rs | 2 +- crates/net/network/src/error.rs | 66 +-- crates/net/network/src/fetch/mod.rs | 14 +- crates/net/network/src/message.rs | 52 +- crates/net/network/src/peers/manager.rs | 21 +- crates/net/network/src/peers/reputation.rs | 2 +- crates/net/network/src/session/active.rs | 8 +- crates/net/network/src/session/config.rs | 2 +- crates/net/network/src/session/mod.rs | 2 +- crates/net/network/src/swarm.rs | 4 +- crates/net/network/src/test_utils/testnet.rs | 4 +- .../net/network/src/transactions/fetcher.rs | 2 +- crates/net/network/src/transactions/mod.rs | 20 +- crates/net/p2p/src/bodies/response.rs | 12 +- crates/net/p2p/src/either.rs | 16 +- crates/net/p2p/src/error.rs | 8 +- crates/net/p2p/src/headers/downloader.rs | 6 +- crates/net/p2p/src/priority.rs | 4 +- crates/net/p2p/src/sync.rs | 2 +- crates/net/types/src/lib.rs | 20 +- crates/net/types/src/node_record.rs | 2 +- crates/node-core/src/args/log.rs | 6 +- crates/node-core/src/cli/config.rs | 2 +- crates/node-core/src/dirs.rs | 6 +- crates/node/builder/src/builder/mod.rs | 2 +- crates/node/events/src/node.rs | 16 +- crates/optimism/evm/src/error.rs | 2 +- crates/optimism/node/src/rpc.rs | 4 +- crates/optimism/payload/src/payload.rs | 9 +- crates/payload/builder/src/error.rs | 6 +- crates/payload/builder/src/service.rs | 10 +- crates/primitives/benches/integer_list.rs | 2 +- crates/primitives/src/account.rs | 6 +- crates/primitives/src/alloy_compat.rs | 10 +- crates/primitives/src/chain/info.rs | 2 +- crates/primitives/src/chain/spec.rs | 28 +- crates/primitives/src/header.rs | 18 +- crates/primitives/src/integer_list.rs | 2 +- crates/primitives/src/log.rs | 4 +- crates/primitives/src/prune/mode.rs | 20 +- crates/primitives/src/prune/target.rs | 2 +- crates/primitives/src/receipt.rs | 4 +- crates/primitives/src/stage/checkpoints.rs | 2 +- crates/primitives/src/stage/id.rs | 76 +-- crates/primitives/src/stage/mod.rs | 8 +- crates/primitives/src/transaction/mod.rs | 315 +++++----- crates/primitives/src/transaction/pooled.rs | 52 +- crates/primitives/src/transaction/tx_type.rs | 54 +- crates/primitives/src/transaction/variant.rs | 52 +- crates/primitives/src/trie/subnode.rs | 2 +- crates/prune/src/builder.rs | 2 +- crates/prune/src/error.rs | 8 +- crates/prune/src/segments/set.rs | 2 +- crates/rpc/ipc/src/server/mod.rs | 4 +- crates/rpc/ipc/src/stream_codec.rs | 6 +- crates/rpc/rpc-builder/src/error.rs | 22 +- crates/rpc/rpc-builder/src/lib.rs | 66 +-- crates/rpc/rpc-builder/src/metrics.rs | 6 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 35 +- crates/rpc/rpc-testing-util/src/debug.rs | 4 +- crates/rpc/rpc-testing-util/src/trace.rs | 2 +- crates/rpc/rpc-types/src/mev.rs | 12 +- crates/rpc/rpc/src/admin.rs | 2 +- crates/rpc/rpc/src/debug.rs | 13 +- crates/rpc/rpc/src/eth/api/fee_history.rs | 4 +- crates/rpc/rpc/src/eth/api/pending_block.rs | 16 +- crates/rpc/rpc/src/eth/api/server.rs | 46 +- crates/rpc/rpc/src/eth/api/transactions.rs | 4 +- crates/rpc/rpc/src/eth/bundle.rs | 2 +- crates/rpc/rpc/src/eth/cache/mod.rs | 2 +- crates/rpc/rpc/src/eth/error.rs | 202 +++---- crates/rpc/rpc/src/eth/filter.rs | 16 +- crates/rpc/rpc/src/eth/gas_oracle.rs | 2 +- crates/rpc/rpc/src/eth/optimism.rs | 6 +- crates/rpc/rpc/src/eth/revm_utils.rs | 8 +- crates/rpc/rpc/src/eth/signer.rs | 2 +- crates/rpc/rpc/src/reth.rs | 2 +- crates/rpc/rpc/src/rpc.rs | 2 +- crates/rpc/rpc/src/trace.rs | 22 +- crates/rpc/rpc/src/txpool.rs | 2 +- crates/rpc/rpc/src/web3.rs | 2 +- crates/stages-api/src/error.rs | 28 +- crates/stages-api/src/pipeline/ctrl.rs | 10 +- crates/stages-api/src/pipeline/set.rs | 2 +- crates/stages/src/stages/execution.rs | 2 +- crates/stages/src/stages/merkle.rs | 14 +- crates/static-file-types/src/segment.rs | 22 +- .../storage/codecs/src/alloy/access_list.rs | 4 +- .../codecs/src/alloy/genesis_account.rs | 2 +- crates/storage/codecs/src/alloy/log.rs | 4 +- crates/storage/codecs/src/alloy/request.rs | 2 +- crates/storage/codecs/src/alloy/txkind.rs | 6 +- crates/storage/codecs/src/alloy/withdrawal.rs | 2 +- crates/storage/codecs/src/lib.rs | 10 +- crates/storage/db/src/abstraction/mock.rs | 12 +- .../storage/db/src/implementation/mdbx/mod.rs | 4 +- crates/storage/db/src/metrics.rs | 40 +- .../db/src/tables/codecs/fuzz/inputs.rs | 2 +- .../storage/db/src/tables/models/accounts.rs | 6 +- .../db/src/tables/models/integer_list.rs | 2 +- crates/storage/db/src/tables/models/mod.rs | 8 +- .../db/src/tables/models/sharded_key.rs | 8 +- crates/storage/db/src/tables/raw.rs | 4 +- crates/storage/errors/src/provider.rs | 2 +- crates/storage/libmdbx-rs/src/database.rs | 2 +- crates/storage/libmdbx-rs/src/environment.rs | 14 +- crates/storage/libmdbx-rs/src/error.rs | 128 ++-- crates/storage/libmdbx-rs/src/transaction.rs | 4 +- .../storage/nippy-jar/src/compression/mod.rs | 24 +- crates/storage/nippy-jar/src/filter/cuckoo.rs | 4 +- crates/storage/nippy-jar/src/filter/mod.rs | 12 +- crates/storage/nippy-jar/src/lib.rs | 6 +- crates/storage/nippy-jar/src/phf/fmph.rs | 4 +- crates/storage/nippy-jar/src/phf/go_fmph.rs | 4 +- crates/storage/nippy-jar/src/phf/mod.rs | 8 +- .../src/providers/database/metrics.rs | 44 +- .../provider/src/providers/database/mod.rs | 6 +- .../provider/src/providers/static_file/jar.rs | 2 +- .../src/providers/static_file/manager.rs | 4 +- .../storage/provider/src/test_utils/mock.rs | 4 +- crates/storage/storage-api/src/block.rs | 4 +- crates/tasks/src/lib.rs | 8 +- crates/tasks/src/metrics.rs | 2 +- crates/tokio-util/src/event_stream.rs | 2 +- crates/tracing/src/formatter.rs | 12 +- crates/transaction-pool/src/blobstore/disk.rs | 2 +- crates/transaction-pool/src/error.rs | 27 +- crates/transaction-pool/src/identifier.rs | 16 +- crates/transaction-pool/src/maintain.rs | 2 +- crates/transaction-pool/src/ordering.rs | 2 +- crates/transaction-pool/src/pool/best.rs | 2 +- crates/transaction-pool/src/pool/events.rs | 7 +- crates/transaction-pool/src/pool/mod.rs | 26 +- crates/transaction-pool/src/pool/size.rs | 2 +- crates/transaction-pool/src/pool/state.rs | 24 +- crates/transaction-pool/src/pool/txpool.rs | 2 +- crates/transaction-pool/src/test_utils/gen.rs | 6 +- .../transaction-pool/src/test_utils/mock.rs | 208 +++---- crates/transaction-pool/src/traits.rs | 22 +- crates/transaction-pool/src/validate/task.rs | 2 +- crates/trie-parallel/src/parallel_root.rs | 2 +- crates/trie/benches/prefix_set.rs | 4 +- crates/trie/src/prefix_set/mod.rs | 2 +- crates/trie/src/trie_cursor/subnode.rs | 2 +- crates/trie/src/updates.rs | 2 +- testing/ef-tests/src/cases/blockchain_test.rs | 2 +- testing/ef-tests/src/result.rs | 2 +- 200 files changed, 1817 insertions(+), 1954 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a94a65c658660..c64b0def995a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -153,6 +153,7 @@ uninhabited_references = "warn" unused_peekable = "warn" unused_rounding = "warn" useless_let_if_seq = "warn" +use_self = "warn" # These are nursery lints which have findings. Allow them for now. Some are not # quite mature enough for use in our codebase and some we don't really want. @@ -175,7 +176,6 @@ significant_drop_tightening = "allow" string_lit_as_bytes = "allow" type_repetition_in_bounds = "allow" unnecessary_struct_initialization = "allow" -use_self = "allow" [workspace.package] version = "0.2.0-beta.7" @@ -440,4 +440,4 @@ test-fuzz = "5" revm = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } revm-interpreter = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } revm-precompile = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } -revm-primitives = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } \ No newline at end of file +revm-primitives = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 4bf413acd9013..69ff733b50300 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -86,7 +86,7 @@ impl Cli { I: IntoIterator, T: Into + Clone, { - Cli::try_parse_from(itr) + Self::try_parse_from(itr) } } diff --git a/bin/reth/src/commands/db/tui.rs b/bin/reth/src/commands/db/tui.rs index 841440a47098d..487ee48c6bbbc 100644 --- a/bin/reth/src/commands/db/tui.rs +++ b/bin/reth/src/commands/db/tui.rs @@ -67,19 +67,19 @@ impl Entries { /// if needed. fn set(&mut self, new_entries: Vec>) { match self { - Entries::RawValues(old_entries) => { + Self::RawValues(old_entries) => { *old_entries = new_entries.into_iter().map(|(key, value)| (key, value.into())).collect() } - Entries::Values(old_entries) => *old_entries = new_entries, + Self::Values(old_entries) => *old_entries = new_entries, } } /// Returns the length of internal [Vec]. fn len(&self) -> usize { match self { - Entries::RawValues(entries) => entries.len(), - Entries::Values(entries) => entries.len(), + Self::RawValues(entries) => entries.len(), + Self::Values(entries) => entries.len(), } } diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index d2ebe70db1850..3205392809752 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -197,13 +197,13 @@ impl Subcommands { let provider = factory.provider()?; let last = provider.last_block_number()?; let target = match self { - Subcommands::ToBlock { target } => match target { + Self::ToBlock { target } => match target { BlockHashOrNumber::Hash(hash) => provider .block_number(*hash)? .ok_or_else(|| eyre::eyre!("Block hash not found in database: {hash:?}"))?, BlockHashOrNumber::Number(num) => *num, }, - Subcommands::NumBlocks { amount } => last.saturating_sub(*amount), + Self::NumBlocks { amount } => last.saturating_sub(*amount), } + 1; if target > last { eyre::bail!("Target block number is higher than the latest block number") diff --git a/crates/blockchain-tree-api/src/error.rs b/crates/blockchain-tree-api/src/error.rs index c48a97676983e..784b8532edaba 100644 --- a/crates/blockchain-tree-api/src/error.rs +++ b/crates/blockchain-tree-api/src/error.rs @@ -81,17 +81,14 @@ impl CanonicalError { /// Returns `true` if the underlying error matches /// [BlockchainTreeError::BlockHashNotFoundInChain]. pub fn is_block_hash_not_found(&self) -> bool { - matches!( - self, - CanonicalError::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. }) - ) + matches!(self, Self::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. })) } /// Returns `Some(BlockNumber)` if the underlying error matches /// [CanonicalError::OptimisticTargetRevert]. pub fn optimistic_revert_block_number(&self) -> Option { match self { - CanonicalError::OptimisticTargetRevert(block_number) => Some(*block_number), + Self::OptimisticTargetRevert(block_number) => Some(*block_number), _ => None, } } @@ -242,25 +239,25 @@ pub enum InsertBlockErrorKind { impl InsertBlockErrorKind { /// Returns true if the error is a tree error pub fn is_tree_error(&self) -> bool { - matches!(self, InsertBlockErrorKind::Tree(_)) + matches!(self, Self::Tree(_)) } /// Returns true if the error is a consensus error pub fn is_consensus_error(&self) -> bool { - matches!(self, InsertBlockErrorKind::Consensus(_)) + matches!(self, Self::Consensus(_)) } /// Returns true if this error is a state root error pub fn is_state_root_error(&self) -> bool { // we need to get the state root errors inside of the different variant branches match self { - InsertBlockErrorKind::Execution(err) => { + Self::Execution(err) => { matches!( err, BlockExecutionError::Validation(BlockValidationError::StateRoot { .. }) ) } - InsertBlockErrorKind::Canonical(err) => { + Self::Canonical(err) => { matches!( err, CanonicalError::Validation(BlockValidationError::StateRoot { .. }) | @@ -270,7 +267,7 @@ impl InsertBlockErrorKind { ) ) } - InsertBlockErrorKind::Provider(err) => { + Self::Provider(err) => { matches!( err, ProviderError::StateRootMismatch(_) | ProviderError::UnwindStateRootMismatch(_) @@ -285,9 +282,9 @@ impl InsertBlockErrorKind { /// This is intended to be used to determine if the block should be marked as invalid. pub fn is_invalid_block(&self) -> bool { match self { - InsertBlockErrorKind::SenderRecovery | InsertBlockErrorKind::Consensus(_) => true, + Self::SenderRecovery | Self::Consensus(_) => true, // other execution errors that are considered internal errors - InsertBlockErrorKind::Execution(err) => { + Self::Execution(err) => { match err { BlockExecutionError::Validation(_) | BlockExecutionError::Consensus(_) => { // this is caused by an invalid block @@ -303,7 +300,7 @@ impl InsertBlockErrorKind { BlockExecutionError::Other(_) => false, } } - InsertBlockErrorKind::Tree(err) => { + Self::Tree(err) => { match err { BlockchainTreeError::PendingBlockIsFinalized { .. } => { // the block's number is lower than the finalized block's number @@ -317,11 +314,11 @@ impl InsertBlockErrorKind { BlockchainTreeError::GenesisBlockHasNoParent => false, } } - InsertBlockErrorKind::Provider(_) | InsertBlockErrorKind::Internal(_) => { + Self::Provider(_) | Self::Internal(_) => { // any other error, such as database errors, are considered internal errors false } - InsertBlockErrorKind::Canonical(err) => match err { + Self::Canonical(err) => match err { CanonicalError::BlockchainTree(_) | CanonicalError::CanonicalCommit(_) | CanonicalError::CanonicalRevert(_) | @@ -329,7 +326,7 @@ impl InsertBlockErrorKind { CanonicalError::Validation(_) => true, CanonicalError::Provider(_) => false, }, - InsertBlockErrorKind::BlockchainTree(_) => false, + Self::BlockchainTree(_) => false, } } @@ -337,7 +334,7 @@ impl InsertBlockErrorKind { pub fn is_block_pre_merge(&self) -> bool { matches!( self, - InsertBlockErrorKind::Execution(BlockExecutionError::Validation( + Self::Execution(BlockExecutionError::Validation( BlockValidationError::BlockPreMerge { .. } )) ) @@ -345,18 +342,18 @@ impl InsertBlockErrorKind { /// Returns true if the error is an execution error pub fn is_execution_error(&self) -> bool { - matches!(self, InsertBlockErrorKind::Execution(_)) + matches!(self, Self::Execution(_)) } /// Returns true if the error is an internal error pub fn is_internal(&self) -> bool { - matches!(self, InsertBlockErrorKind::Internal(_)) + matches!(self, Self::Internal(_)) } /// Returns the error if it is a tree error pub fn as_tree_error(&self) -> Option { match self { - InsertBlockErrorKind::Tree(err) => Some(*err), + Self::Tree(err) => Some(*err), _ => None, } } @@ -364,7 +361,7 @@ impl InsertBlockErrorKind { /// Returns the error if it is a consensus error pub fn as_consensus_error(&self) -> Option<&ConsensusError> { match self { - InsertBlockErrorKind::Consensus(err) => Some(err), + Self::Consensus(err) => Some(err), _ => None, } } @@ -372,7 +369,7 @@ impl InsertBlockErrorKind { /// Returns the error if it is an execution error pub fn as_execution_error(&self) -> Option<&BlockExecutionError> { match self { - InsertBlockErrorKind::Execution(err) => Some(err), + Self::Execution(err) => Some(err), _ => None, } } diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs index 19fa256ebc73d..ce8bc1bebd25d 100644 --- a/crates/blockchain-tree-api/src/lib.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -140,17 +140,17 @@ pub enum BlockValidationKind { impl BlockValidationKind { /// Returns true if the state root should be validated if possible. pub fn is_exhaustive(&self) -> bool { - matches!(self, BlockValidationKind::Exhaustive) + matches!(self, Self::Exhaustive) } } impl std::fmt::Display for BlockValidationKind { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - BlockValidationKind::Exhaustive => { + Self::Exhaustive => { write!(f, "Exhaustive") } - BlockValidationKind::SkipStateRootValidation => { + Self::SkipStateRootValidation => { write!(f, "SkipStateRootValidation") } } @@ -179,22 +179,22 @@ impl CanonicalOutcome { /// Returns the header of the block that was made canonical. pub fn header(&self) -> &SealedHeader { match self { - CanonicalOutcome::AlreadyCanonical { header, .. } => header, - CanonicalOutcome::Committed { head } => head, + Self::AlreadyCanonical { header, .. } => header, + Self::Committed { head } => head, } } /// Consumes the outcome and returns the header of the block that was made canonical. pub fn into_header(self) -> SealedHeader { match self { - CanonicalOutcome::AlreadyCanonical { header, .. } => header, - CanonicalOutcome::Committed { head } => head, + Self::AlreadyCanonical { header, .. } => header, + Self::Committed { head } => head, } } /// Returns true if the block was already canonical. pub fn is_already_canonical(&self) -> bool { - matches!(self, CanonicalOutcome::AlreadyCanonical { .. }) + matches!(self, Self::AlreadyCanonical { .. }) } } @@ -241,7 +241,7 @@ impl BlockAttachment { /// Returns `true` if the block is canonical or a descendant of the canonical head. #[inline] pub const fn is_canonical(&self) -> bool { - matches!(self, BlockAttachment::Canonical) + matches!(self, Self::Canonical) } } diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs index 71a4475c5c585..403b3014c3e4d 100644 --- a/crates/blockchain-tree/src/metrics.rs +++ b/crates/blockchain-tree/src/metrics.rs @@ -92,21 +92,17 @@ pub(crate) enum MakeCanonicalAction { impl MakeCanonicalAction { fn as_str(&self) -> &'static str { match self { - MakeCanonicalAction::CloneOldBlocks => "clone old blocks", - MakeCanonicalAction::FindCanonicalHeader => "find canonical header", - MakeCanonicalAction::SplitChain => "split chain", - MakeCanonicalAction::SplitChainForks => "split chain forks", - MakeCanonicalAction::MergeAllChains => "merge all chains", - MakeCanonicalAction::UpdateCanonicalIndex => "update canonical index", - MakeCanonicalAction::RetrieveStateTrieUpdates => "retrieve state trie updates", - MakeCanonicalAction::CommitCanonicalChainToDatabase => { - "commit canonical chain to database" - } - MakeCanonicalAction::RevertCanonicalChainFromDatabase => { - "revert canonical chain from database" - } - MakeCanonicalAction::InsertOldCanonicalChain => "insert old canonical chain", - MakeCanonicalAction::ClearTrieUpdatesForOtherChilds => { + Self::CloneOldBlocks => "clone old blocks", + Self::FindCanonicalHeader => "find canonical header", + Self::SplitChain => "split chain", + Self::SplitChainForks => "split chain forks", + Self::MergeAllChains => "merge all chains", + Self::UpdateCanonicalIndex => "update canonical index", + Self::RetrieveStateTrieUpdates => "retrieve state trie updates", + Self::CommitCanonicalChainToDatabase => "commit canonical chain to database", + Self::RevertCanonicalChainFromDatabase => "revert canonical chain from database", + Self::InsertOldCanonicalChain => "insert old canonical chain", + Self::ClearTrieUpdatesForOtherChilds => { "clear trie updates of other childs chains after fork choice update" } } diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index f02890654c3c4..86501a6d18b64 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -124,6 +124,6 @@ impl From for u64 { #[cfg(test)] impl From for BlockchainId { fn from(value: u64) -> Self { - BlockchainId(value) + Self(value) } } diff --git a/crates/consensus/auto-seal/src/mode.rs b/crates/consensus/auto-seal/src/mode.rs index b124010e62814..2ff918af63dc2 100644 --- a/crates/consensus/auto-seal/src/mode.rs +++ b/crates/consensus/auto-seal/src/mode.rs @@ -33,7 +33,7 @@ impl MiningMode { /// Creates a new instant mining mode that listens for new transactions and tries to build /// non-empty blocks as soon as transactions arrive. pub fn instant(max_transactions: usize, listener: Receiver) -> Self { - MiningMode::Auto(ReadyTransactionMiner { + Self::Auto(ReadyTransactionMiner { max_transactions, has_pending_txs: None, rx: ReceiverStream::new(listener).fuse(), @@ -42,7 +42,7 @@ impl MiningMode { /// Creates a new interval miner that builds a block ever `duration`. pub fn interval(duration: Duration) -> Self { - MiningMode::FixedBlockTime(FixedBlockTimeMiner::new(duration)) + Self::FixedBlockTime(FixedBlockTimeMiner::new(duration)) } /// polls the Pool and returns those transactions that should be put in a block, if any. @@ -55,9 +55,9 @@ impl MiningMode { Pool: TransactionPool, { match self { - MiningMode::None => Poll::Pending, - MiningMode::Auto(miner) => miner.poll(pool, cx), - MiningMode::FixedBlockTime(miner) => miner.poll(pool, cx), + Self::None => Poll::Pending, + Self::Auto(miner) => miner.poll(pool, cx), + Self::FixedBlockTime(miner) => miner.poll(pool, cx), } } } @@ -65,9 +65,9 @@ impl MiningMode { impl fmt::Display for MiningMode { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let kind = match self { - MiningMode::None => "None", - MiningMode::Auto(_) => "Auto", - MiningMode::FixedBlockTime(_) => "FixedBlockTime", + Self::None => "None", + Self::Auto(_) => "Auto", + Self::FixedBlockTime(_) => "FixedBlockTime", }; write!(f, "{kind}") } diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index 461881c6488cf..2eb115b1a933a 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -107,15 +107,15 @@ pub enum ForkchoiceStatus { impl ForkchoiceStatus { pub(crate) fn is_valid(&self) -> bool { - matches!(self, ForkchoiceStatus::Valid) + matches!(self, Self::Valid) } pub(crate) fn is_invalid(&self) -> bool { - matches!(self, ForkchoiceStatus::Invalid) + matches!(self, Self::Invalid) } pub(crate) fn is_syncing(&self) -> bool { - matches!(self, ForkchoiceStatus::Syncing) + matches!(self, Self::Syncing) } /// Converts the general purpose [PayloadStatusEnum] into a [ForkchoiceStatus]. @@ -123,17 +123,17 @@ impl ForkchoiceStatus { match status { PayloadStatusEnum::Valid | PayloadStatusEnum::Accepted => { // `Accepted` is only returned on `newPayload`. It would be a valid state here. - ForkchoiceStatus::Valid + Self::Valid } - PayloadStatusEnum::Invalid { .. } => ForkchoiceStatus::Invalid, - PayloadStatusEnum::Syncing => ForkchoiceStatus::Syncing, + PayloadStatusEnum::Invalid { .. } => Self::Invalid, + PayloadStatusEnum::Syncing => Self::Syncing, } } } impl From for ForkchoiceStatus { fn from(status: PayloadStatusEnum) -> Self { - ForkchoiceStatus::from_payload_status(&status) + Self::from_payload_status(&status) } } @@ -149,11 +149,11 @@ impl ForkchoiceStateHash { /// Tries to find a matching hash in the given [ForkchoiceState]. pub(crate) fn find(state: &ForkchoiceState, hash: B256) -> Option { if state.head_block_hash == hash { - Some(ForkchoiceStateHash::Head(hash)) + Some(Self::Head(hash)) } else if state.safe_block_hash == hash { - Some(ForkchoiceStateHash::Safe(hash)) + Some(Self::Safe(hash)) } else if state.finalized_block_hash == hash { - Some(ForkchoiceStateHash::Finalized(hash)) + Some(Self::Finalized(hash)) } else { None } @@ -161,16 +161,14 @@ impl ForkchoiceStateHash { /// Returns true if this is the head hash of the [ForkchoiceState] pub(crate) fn is_head(&self) -> bool { - matches!(self, ForkchoiceStateHash::Head(_)) + matches!(self, Self::Head(_)) } } impl AsRef for ForkchoiceStateHash { fn as_ref(&self) -> &B256 { match self { - ForkchoiceStateHash::Head(h) | - ForkchoiceStateHash::Safe(h) | - ForkchoiceStateHash::Finalized(h) => h, + Self::Head(h) | Self::Safe(h) | Self::Finalized(h) => h, } } } diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index e6b5306e3bc6b..b70bd6d18a3d5 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -167,7 +167,7 @@ impl From for EngineHookError { fn from(err: PrunerError) -> Self { match err { PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { - EngineHookError::Internal(Box::new(err)) + Self::Internal(Box::new(err)) } PrunerError::Database(err) => RethError::Database(err).into(), PrunerError::Provider(err) => RethError::Provider(err).into(), diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 810e3e7483676..730b0a4d035a8 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -419,7 +419,7 @@ enum PipelineState { impl PipelineState { /// Returns `true` if the state matches idle. fn is_idle(&self) -> bool { - matches!(self, PipelineState::Idle(_)) + matches!(self, Self::Idle(_)) } } diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index e90e73e932995..a84808800cc8e 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -328,7 +328,7 @@ pub enum ConsensusError { impl ConsensusError { /// Returns `true` if the error is a state root error. pub fn is_state_root_error(&self) -> bool { - matches!(self, ConsensusError::BodyStateRootDiff(_)) + matches!(self, Self::BodyStateRootDiff(_)) } } diff --git a/crates/errors/src/error.rs b/crates/errors/src/error.rs index 4017be351aa9d..e74d582759b79 100644 --- a/crates/errors/src/error.rs +++ b/crates/errors/src/error.rs @@ -46,24 +46,24 @@ impl RethError { where E: std::error::Error + Send + Sync + 'static, { - RethError::Other(Box::new(error)) + Self::Other(Box::new(error)) } /// Create a new `RethError` from a given message. pub fn msg(msg: impl Display) -> Self { - RethError::Other(msg.to_string().into()) + Self::Other(msg.to_string().into()) } } impl From for RethError { fn from(error: BlockchainTreeError) -> Self { - RethError::Canonical(CanonicalError::BlockchainTree(error)) + Self::Canonical(CanonicalError::BlockchainTree(error)) } } impl From for RethError { fn from(err: FsPathError) -> Self { - RethError::other(err) + Self::other(err) } } diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index b5d031c5e00e2..cf28cec88c031 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -87,9 +87,8 @@ impl PartialOrd for ForkFilterKey { impl Ord for ForkFilterKey { fn cmp(&self, other: &Self) -> Ordering { match (self, other) { - (ForkFilterKey::Block(a), ForkFilterKey::Block(b)) | - (ForkFilterKey::Time(a), ForkFilterKey::Time(b)) => a.cmp(b), - (ForkFilterKey::Block(_), ForkFilterKey::Time(_)) => Ordering::Less, + (Self::Block(a), Self::Block(b)) | (Self::Time(a), Self::Time(b)) => a.cmp(b), + (Self::Block(_), Self::Time(_)) => Ordering::Less, _ => Ordering::Greater, } } diff --git a/crates/ethereum-forks/src/hardfork.rs b/crates/ethereum-forks/src/hardfork.rs index 41d1f13021a09..11e9cac455f13 100644 --- a/crates/ethereum-forks/src/hardfork.rs +++ b/crates/ethereum-forks/src/hardfork.rs @@ -81,7 +81,7 @@ pub enum Hardfork { impl Hardfork { /// Retrieves the consensus type for the specified hardfork. pub fn consensus_type(&self) -> ConsensusType { - if *self >= Hardfork::Paris { + if *self >= Self::Paris { ConsensusType::ProofOfStake } else { ConsensusType::ProofOfWork @@ -127,23 +127,23 @@ impl Hardfork { pub fn mainnet_activation_block(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(0), - Hardfork::Homestead => Some(1150000), - Hardfork::Dao => Some(1920000), - Hardfork::Tangerine => Some(2463000), - Hardfork::SpuriousDragon => Some(2675000), - Hardfork::Byzantium => Some(4370000), - Hardfork::Constantinople => Some(7280000), - Hardfork::Petersburg => Some(7280000), - Hardfork::Istanbul => Some(9069000), - Hardfork::MuirGlacier => Some(9200000), - Hardfork::Berlin => Some(12244000), - Hardfork::London => Some(12965000), - Hardfork::ArrowGlacier => Some(13773000), - Hardfork::GrayGlacier => Some(15050000), - Hardfork::Paris => Some(15537394), - Hardfork::Shanghai => Some(17034870), - Hardfork::Cancun => Some(19426587), + Self::Frontier => Some(0), + Self::Homestead => Some(1150000), + Self::Dao => Some(1920000), + Self::Tangerine => Some(2463000), + Self::SpuriousDragon => Some(2675000), + Self::Byzantium => Some(4370000), + Self::Constantinople => Some(7280000), + Self::Petersburg => Some(7280000), + Self::Istanbul => Some(9069000), + Self::MuirGlacier => Some(9200000), + Self::Berlin => Some(12244000), + Self::London => Some(12965000), + Self::ArrowGlacier => Some(13773000), + Self::GrayGlacier => Some(15050000), + Self::Paris => Some(15537394), + Self::Shanghai => Some(17034870), + Self::Cancun => Some(19426587), _ => None, } @@ -153,23 +153,23 @@ impl Hardfork { pub fn sepolia_activation_block(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Paris => Some(1735371), - Hardfork::Shanghai => Some(2990908), - Hardfork::Cancun => Some(5187023), - Hardfork::Frontier => Some(0), - Hardfork::Homestead => Some(0), - Hardfork::Dao => Some(0), - Hardfork::Tangerine => Some(0), - Hardfork::SpuriousDragon => Some(0), - Hardfork::Byzantium => Some(0), - Hardfork::Constantinople => Some(0), - Hardfork::Petersburg => Some(0), - Hardfork::Istanbul => Some(0), - Hardfork::MuirGlacier => Some(0), - Hardfork::Berlin => Some(0), - Hardfork::London => Some(0), - Hardfork::ArrowGlacier => Some(0), - Hardfork::GrayGlacier => Some(0), + Self::Paris => Some(1735371), + Self::Shanghai => Some(2990908), + Self::Cancun => Some(5187023), + Self::Frontier => Some(0), + Self::Homestead => Some(0), + Self::Dao => Some(0), + Self::Tangerine => Some(0), + Self::SpuriousDragon => Some(0), + Self::Byzantium => Some(0), + Self::Constantinople => Some(0), + Self::Petersburg => Some(0), + Self::Istanbul => Some(0), + Self::MuirGlacier => Some(0), + Self::Berlin => Some(0), + Self::London => Some(0), + Self::ArrowGlacier => Some(0), + Self::GrayGlacier => Some(0), _ => None, } } @@ -178,24 +178,24 @@ impl Hardfork { pub fn arbitrum_sepolia_activation_block(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(0), - Hardfork::Homestead => Some(0), - Hardfork::Dao => Some(0), - Hardfork::Tangerine => Some(0), - Hardfork::SpuriousDragon => Some(0), - Hardfork::Byzantium => Some(0), - Hardfork::Constantinople => Some(0), - Hardfork::Petersburg => Some(0), - Hardfork::Istanbul => Some(0), - Hardfork::MuirGlacier => Some(0), - Hardfork::Berlin => Some(0), - Hardfork::London => Some(0), - Hardfork::ArrowGlacier => Some(0), - Hardfork::GrayGlacier => Some(0), - Hardfork::Paris => Some(0), - Hardfork::Shanghai => Some(10653737), + Self::Frontier => Some(0), + Self::Homestead => Some(0), + Self::Dao => Some(0), + Self::Tangerine => Some(0), + Self::SpuriousDragon => Some(0), + Self::Byzantium => Some(0), + Self::Constantinople => Some(0), + Self::Petersburg => Some(0), + Self::Istanbul => Some(0), + Self::MuirGlacier => Some(0), + Self::Berlin => Some(0), + Self::London => Some(0), + Self::ArrowGlacier => Some(0), + Self::GrayGlacier => Some(0), + Self::Paris => Some(0), + Self::Shanghai => Some(10653737), // Hardfork::ArbOS11 => Some(10653737), - Hardfork::Cancun => Some(18683405), + Self::Cancun => Some(18683405), // Hardfork::ArbOS20Atlas => Some(18683405), _ => None, } @@ -205,24 +205,24 @@ impl Hardfork { pub fn arbitrum_activation_block(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(0), - Hardfork::Homestead => Some(0), - Hardfork::Dao => Some(0), - Hardfork::Tangerine => Some(0), - Hardfork::SpuriousDragon => Some(0), - Hardfork::Byzantium => Some(0), - Hardfork::Constantinople => Some(0), - Hardfork::Petersburg => Some(0), - Hardfork::Istanbul => Some(0), - Hardfork::MuirGlacier => Some(0), - Hardfork::Berlin => Some(0), - Hardfork::London => Some(0), - Hardfork::ArrowGlacier => Some(0), - Hardfork::GrayGlacier => Some(0), - Hardfork::Paris => Some(0), - Hardfork::Shanghai => Some(184097479), + Self::Frontier => Some(0), + Self::Homestead => Some(0), + Self::Dao => Some(0), + Self::Tangerine => Some(0), + Self::SpuriousDragon => Some(0), + Self::Byzantium => Some(0), + Self::Constantinople => Some(0), + Self::Petersburg => Some(0), + Self::Istanbul => Some(0), + Self::MuirGlacier => Some(0), + Self::Berlin => Some(0), + Self::London => Some(0), + Self::ArrowGlacier => Some(0), + Self::GrayGlacier => Some(0), + Self::Paris => Some(0), + Self::Shanghai => Some(184097479), // Hardfork::ArbOS11 => Some(184097479), - Hardfork::Cancun => Some(190301729), + Self::Cancun => Some(190301729), // Hardfork::ArbOS20Atlas => Some(190301729), _ => None, } @@ -233,27 +233,27 @@ impl Hardfork { pub fn base_sepolia_activation_block(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(0), - Hardfork::Homestead => Some(0), - Hardfork::Dao => Some(0), - Hardfork::Tangerine => Some(0), - Hardfork::SpuriousDragon => Some(0), - Hardfork::Byzantium => Some(0), - Hardfork::Constantinople => Some(0), - Hardfork::Petersburg => Some(0), - Hardfork::Istanbul => Some(0), - Hardfork::MuirGlacier => Some(0), - Hardfork::Berlin => Some(0), - Hardfork::London => Some(0), - Hardfork::ArrowGlacier => Some(0), - Hardfork::GrayGlacier => Some(0), - Hardfork::Paris => Some(0), - Hardfork::Bedrock => Some(0), - Hardfork::Regolith => Some(0), - Hardfork::Shanghai => Some(2106456), - Hardfork::Canyon => Some(2106456), - Hardfork::Cancun => Some(6383256), - Hardfork::Ecotone => Some(6383256), + Self::Frontier => Some(0), + Self::Homestead => Some(0), + Self::Dao => Some(0), + Self::Tangerine => Some(0), + Self::SpuriousDragon => Some(0), + Self::Byzantium => Some(0), + Self::Constantinople => Some(0), + Self::Petersburg => Some(0), + Self::Istanbul => Some(0), + Self::MuirGlacier => Some(0), + Self::Berlin => Some(0), + Self::London => Some(0), + Self::ArrowGlacier => Some(0), + Self::GrayGlacier => Some(0), + Self::Paris => Some(0), + Self::Bedrock => Some(0), + Self::Regolith => Some(0), + Self::Shanghai => Some(2106456), + Self::Canyon => Some(2106456), + Self::Cancun => Some(6383256), + Self::Ecotone => Some(6383256), _ => None, } } @@ -263,27 +263,27 @@ impl Hardfork { pub fn base_mainnet_activation_block(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(0), - Hardfork::Homestead => Some(0), - Hardfork::Dao => Some(0), - Hardfork::Tangerine => Some(0), - Hardfork::SpuriousDragon => Some(0), - Hardfork::Byzantium => Some(0), - Hardfork::Constantinople => Some(0), - Hardfork::Petersburg => Some(0), - Hardfork::Istanbul => Some(0), - Hardfork::MuirGlacier => Some(0), - Hardfork::Berlin => Some(0), - Hardfork::London => Some(0), - Hardfork::ArrowGlacier => Some(0), - Hardfork::GrayGlacier => Some(0), - Hardfork::Paris => Some(0), - Hardfork::Bedrock => Some(0), - Hardfork::Regolith => Some(0), - Hardfork::Shanghai => Some(9101527), - Hardfork::Canyon => Some(9101527), - Hardfork::Cancun => Some(11188936), - Hardfork::Ecotone => Some(11188936), + Self::Frontier => Some(0), + Self::Homestead => Some(0), + Self::Dao => Some(0), + Self::Tangerine => Some(0), + Self::SpuriousDragon => Some(0), + Self::Byzantium => Some(0), + Self::Constantinople => Some(0), + Self::Petersburg => Some(0), + Self::Istanbul => Some(0), + Self::MuirGlacier => Some(0), + Self::Berlin => Some(0), + Self::London => Some(0), + Self::ArrowGlacier => Some(0), + Self::GrayGlacier => Some(0), + Self::Paris => Some(0), + Self::Bedrock => Some(0), + Self::Regolith => Some(0), + Self::Shanghai => Some(9101527), + Self::Canyon => Some(9101527), + Self::Cancun => Some(11188936), + Self::Ecotone => Some(11188936), _ => None, } } @@ -292,21 +292,21 @@ impl Hardfork { fn holesky_activation_block(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Dao => Some(0), - Hardfork::Tangerine => Some(0), - Hardfork::SpuriousDragon => Some(0), - Hardfork::Byzantium => Some(0), - Hardfork::Constantinople => Some(0), - Hardfork::Petersburg => Some(0), - Hardfork::Istanbul => Some(0), - Hardfork::MuirGlacier => Some(0), - Hardfork::Berlin => Some(0), - Hardfork::London => Some(0), - Hardfork::ArrowGlacier => Some(0), - Hardfork::GrayGlacier => Some(0), - Hardfork::Paris => Some(0), - Hardfork::Shanghai => Some(6698), - Hardfork::Cancun => Some(894733), + Self::Dao => Some(0), + Self::Tangerine => Some(0), + Self::SpuriousDragon => Some(0), + Self::Byzantium => Some(0), + Self::Constantinople => Some(0), + Self::Petersburg => Some(0), + Self::Istanbul => Some(0), + Self::MuirGlacier => Some(0), + Self::Berlin => Some(0), + Self::London => Some(0), + Self::ArrowGlacier => Some(0), + Self::GrayGlacier => Some(0), + Self::Paris => Some(0), + Self::Shanghai => Some(6698), + Self::Cancun => Some(894733), _ => None, } } @@ -339,23 +339,23 @@ impl Hardfork { pub fn mainnet_activation_timestamp(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(1438226773), - Hardfork::Homestead => Some(1457938193), - Hardfork::Dao => Some(1468977640), - Hardfork::Tangerine => Some(1476753571), - Hardfork::SpuriousDragon => Some(1479788144), - Hardfork::Byzantium => Some(1508131331), - Hardfork::Constantinople => Some(1551340324), - Hardfork::Petersburg => Some(1551340324), - Hardfork::Istanbul => Some(1575807909), - Hardfork::MuirGlacier => Some(1577953849), - Hardfork::Berlin => Some(1618481223), - Hardfork::London => Some(1628166822), - Hardfork::ArrowGlacier => Some(1639036523), - Hardfork::GrayGlacier => Some(1656586444), - Hardfork::Paris => Some(1663224162), - Hardfork::Shanghai => Some(1681338455), - Hardfork::Cancun => Some(1710338135), + Self::Frontier => Some(1438226773), + Self::Homestead => Some(1457938193), + Self::Dao => Some(1468977640), + Self::Tangerine => Some(1476753571), + Self::SpuriousDragon => Some(1479788144), + Self::Byzantium => Some(1508131331), + Self::Constantinople => Some(1551340324), + Self::Petersburg => Some(1551340324), + Self::Istanbul => Some(1575807909), + Self::MuirGlacier => Some(1577953849), + Self::Berlin => Some(1618481223), + Self::London => Some(1628166822), + Self::ArrowGlacier => Some(1639036523), + Self::GrayGlacier => Some(1656586444), + Self::Paris => Some(1663224162), + Self::Shanghai => Some(1681338455), + Self::Cancun => Some(1710338135), // upcoming hardforks _ => None, @@ -366,23 +366,23 @@ impl Hardfork { pub fn sepolia_activation_timestamp(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(1633267481), - Hardfork::Homestead => Some(1633267481), - Hardfork::Dao => Some(1633267481), - Hardfork::Tangerine => Some(1633267481), - Hardfork::SpuriousDragon => Some(1633267481), - Hardfork::Byzantium => Some(1633267481), - Hardfork::Constantinople => Some(1633267481), - Hardfork::Petersburg => Some(1633267481), - Hardfork::Istanbul => Some(1633267481), - Hardfork::MuirGlacier => Some(1633267481), - Hardfork::Berlin => Some(1633267481), - Hardfork::London => Some(1633267481), - Hardfork::ArrowGlacier => Some(1633267481), - Hardfork::GrayGlacier => Some(1633267481), - Hardfork::Paris => Some(1633267481), - Hardfork::Shanghai => Some(1677557088), - Hardfork::Cancun => Some(1706655072), + Self::Frontier => Some(1633267481), + Self::Homestead => Some(1633267481), + Self::Dao => Some(1633267481), + Self::Tangerine => Some(1633267481), + Self::SpuriousDragon => Some(1633267481), + Self::Byzantium => Some(1633267481), + Self::Constantinople => Some(1633267481), + Self::Petersburg => Some(1633267481), + Self::Istanbul => Some(1633267481), + Self::MuirGlacier => Some(1633267481), + Self::Berlin => Some(1633267481), + Self::London => Some(1633267481), + Self::ArrowGlacier => Some(1633267481), + Self::GrayGlacier => Some(1633267481), + Self::Paris => Some(1633267481), + Self::Shanghai => Some(1677557088), + Self::Cancun => Some(1706655072), _ => None, } } @@ -391,23 +391,23 @@ impl Hardfork { pub fn holesky_activation_timestamp(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Shanghai => Some(1696000704), - Hardfork::Cancun => Some(1707305664), - Hardfork::Frontier => Some(1695902100), - Hardfork::Homestead => Some(1695902100), - Hardfork::Dao => Some(1695902100), - Hardfork::Tangerine => Some(1695902100), - Hardfork::SpuriousDragon => Some(1695902100), - Hardfork::Byzantium => Some(1695902100), - Hardfork::Constantinople => Some(1695902100), - Hardfork::Petersburg => Some(1695902100), - Hardfork::Istanbul => Some(1695902100), - Hardfork::MuirGlacier => Some(1695902100), - Hardfork::Berlin => Some(1695902100), - Hardfork::London => Some(1695902100), - Hardfork::ArrowGlacier => Some(1695902100), - Hardfork::GrayGlacier => Some(1695902100), - Hardfork::Paris => Some(1695902100), + Self::Shanghai => Some(1696000704), + Self::Cancun => Some(1707305664), + Self::Frontier => Some(1695902100), + Self::Homestead => Some(1695902100), + Self::Dao => Some(1695902100), + Self::Tangerine => Some(1695902100), + Self::SpuriousDragon => Some(1695902100), + Self::Byzantium => Some(1695902100), + Self::Constantinople => Some(1695902100), + Self::Petersburg => Some(1695902100), + Self::Istanbul => Some(1695902100), + Self::MuirGlacier => Some(1695902100), + Self::Berlin => Some(1695902100), + Self::London => Some(1695902100), + Self::ArrowGlacier => Some(1695902100), + Self::GrayGlacier => Some(1695902100), + Self::Paris => Some(1695902100), _ => None, } } @@ -417,24 +417,24 @@ impl Hardfork { pub fn arbitrum_sepolia_activation_timestamp(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(1692726996), - Hardfork::Homestead => Some(1692726996), - Hardfork::Dao => Some(1692726996), - Hardfork::Tangerine => Some(1692726996), - Hardfork::SpuriousDragon => Some(1692726996), - Hardfork::Byzantium => Some(1692726996), - Hardfork::Constantinople => Some(1692726996), - Hardfork::Petersburg => Some(1692726996), - Hardfork::Istanbul => Some(1692726996), - Hardfork::MuirGlacier => Some(1692726996), - Hardfork::Berlin => Some(1692726996), - Hardfork::London => Some(1692726996), - Hardfork::ArrowGlacier => Some(1692726996), - Hardfork::GrayGlacier => Some(1692726996), - Hardfork::Paris => Some(1692726996), - Hardfork::Shanghai => Some(1706634000), + Self::Frontier => Some(1692726996), + Self::Homestead => Some(1692726996), + Self::Dao => Some(1692726996), + Self::Tangerine => Some(1692726996), + Self::SpuriousDragon => Some(1692726996), + Self::Byzantium => Some(1692726996), + Self::Constantinople => Some(1692726996), + Self::Petersburg => Some(1692726996), + Self::Istanbul => Some(1692726996), + Self::MuirGlacier => Some(1692726996), + Self::Berlin => Some(1692726996), + Self::London => Some(1692726996), + Self::ArrowGlacier => Some(1692726996), + Self::GrayGlacier => Some(1692726996), + Self::Paris => Some(1692726996), + Self::Shanghai => Some(1706634000), // Hardfork::ArbOS11 => Some(1706634000), - Hardfork::Cancun => Some(1709229600), + Self::Cancun => Some(1709229600), // Hardfork::ArbOS20Atlas => Some(1709229600), _ => None, } @@ -444,24 +444,24 @@ impl Hardfork { pub fn arbitrum_activation_timestamp(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(1622240000), - Hardfork::Homestead => Some(1622240000), - Hardfork::Dao => Some(1622240000), - Hardfork::Tangerine => Some(1622240000), - Hardfork::SpuriousDragon => Some(1622240000), - Hardfork::Byzantium => Some(1622240000), - Hardfork::Constantinople => Some(1622240000), - Hardfork::Petersburg => Some(1622240000), - Hardfork::Istanbul => Some(1622240000), - Hardfork::MuirGlacier => Some(1622240000), - Hardfork::Berlin => Some(1622240000), - Hardfork::London => Some(1622240000), - Hardfork::ArrowGlacier => Some(1622240000), - Hardfork::GrayGlacier => Some(1622240000), - Hardfork::Paris => Some(1622240000), - Hardfork::Shanghai => Some(1708804873), + Self::Frontier => Some(1622240000), + Self::Homestead => Some(1622240000), + Self::Dao => Some(1622240000), + Self::Tangerine => Some(1622240000), + Self::SpuriousDragon => Some(1622240000), + Self::Byzantium => Some(1622240000), + Self::Constantinople => Some(1622240000), + Self::Petersburg => Some(1622240000), + Self::Istanbul => Some(1622240000), + Self::MuirGlacier => Some(1622240000), + Self::Berlin => Some(1622240000), + Self::London => Some(1622240000), + Self::ArrowGlacier => Some(1622240000), + Self::GrayGlacier => Some(1622240000), + Self::Paris => Some(1622240000), + Self::Shanghai => Some(1708804873), // Hardfork::ArbOS11 => Some(1708804873), - Hardfork::Cancun => Some(1710424089), + Self::Cancun => Some(1710424089), // Hardfork::ArbOS20Atlas => Some(1710424089), _ => None, } @@ -472,27 +472,27 @@ impl Hardfork { pub fn base_sepolia_activation_timestamp(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(1695768288), - Hardfork::Homestead => Some(1695768288), - Hardfork::Dao => Some(1695768288), - Hardfork::Tangerine => Some(1695768288), - Hardfork::SpuriousDragon => Some(1695768288), - Hardfork::Byzantium => Some(1695768288), - Hardfork::Constantinople => Some(1695768288), - Hardfork::Petersburg => Some(1695768288), - Hardfork::Istanbul => Some(1695768288), - Hardfork::MuirGlacier => Some(1695768288), - Hardfork::Berlin => Some(1695768288), - Hardfork::London => Some(1695768288), - Hardfork::ArrowGlacier => Some(1695768288), - Hardfork::GrayGlacier => Some(1695768288), - Hardfork::Paris => Some(1695768288), - Hardfork::Bedrock => Some(1695768288), - Hardfork::Regolith => Some(1695768288), - Hardfork::Shanghai => Some(1699981200), - Hardfork::Canyon => Some(1699981200), - Hardfork::Cancun => Some(1708534800), - Hardfork::Ecotone => Some(1708534800), + Self::Frontier => Some(1695768288), + Self::Homestead => Some(1695768288), + Self::Dao => Some(1695768288), + Self::Tangerine => Some(1695768288), + Self::SpuriousDragon => Some(1695768288), + Self::Byzantium => Some(1695768288), + Self::Constantinople => Some(1695768288), + Self::Petersburg => Some(1695768288), + Self::Istanbul => Some(1695768288), + Self::MuirGlacier => Some(1695768288), + Self::Berlin => Some(1695768288), + Self::London => Some(1695768288), + Self::ArrowGlacier => Some(1695768288), + Self::GrayGlacier => Some(1695768288), + Self::Paris => Some(1695768288), + Self::Bedrock => Some(1695768288), + Self::Regolith => Some(1695768288), + Self::Shanghai => Some(1699981200), + Self::Canyon => Some(1699981200), + Self::Cancun => Some(1708534800), + Self::Ecotone => Some(1708534800), _ => None, } } @@ -502,27 +502,27 @@ impl Hardfork { pub fn base_mainnet_activation_timestamp(&self) -> Option { #[allow(unreachable_patterns)] match self { - Hardfork::Frontier => Some(1686789347), - Hardfork::Homestead => Some(1686789347), - Hardfork::Dao => Some(1686789347), - Hardfork::Tangerine => Some(1686789347), - Hardfork::SpuriousDragon => Some(1686789347), - Hardfork::Byzantium => Some(1686789347), - Hardfork::Constantinople => Some(1686789347), - Hardfork::Petersburg => Some(1686789347), - Hardfork::Istanbul => Some(1686789347), - Hardfork::MuirGlacier => Some(1686789347), - Hardfork::Berlin => Some(1686789347), - Hardfork::London => Some(1686789347), - Hardfork::ArrowGlacier => Some(1686789347), - Hardfork::GrayGlacier => Some(1686789347), - Hardfork::Paris => Some(1686789347), - Hardfork::Bedrock => Some(1686789347), - Hardfork::Regolith => Some(1686789347), - Hardfork::Shanghai => Some(1704992401), - Hardfork::Canyon => Some(1704992401), - Hardfork::Cancun => Some(1710374401), - Hardfork::Ecotone => Some(1710374401), + Self::Frontier => Some(1686789347), + Self::Homestead => Some(1686789347), + Self::Dao => Some(1686789347), + Self::Tangerine => Some(1686789347), + Self::SpuriousDragon => Some(1686789347), + Self::Byzantium => Some(1686789347), + Self::Constantinople => Some(1686789347), + Self::Petersburg => Some(1686789347), + Self::Istanbul => Some(1686789347), + Self::MuirGlacier => Some(1686789347), + Self::Berlin => Some(1686789347), + Self::London => Some(1686789347), + Self::ArrowGlacier => Some(1686789347), + Self::GrayGlacier => Some(1686789347), + Self::Paris => Some(1686789347), + Self::Bedrock => Some(1686789347), + Self::Regolith => Some(1686789347), + Self::Shanghai => Some(1704992401), + Self::Canyon => Some(1704992401), + Self::Cancun => Some(1710374401), + Self::Ecotone => Some(1710374401), _ => None, } } @@ -533,32 +533,32 @@ impl FromStr for Hardfork { fn from_str(s: &str) -> Result { Ok(match s.to_lowercase().as_str() { - "frontier" => Hardfork::Frontier, - "homestead" => Hardfork::Homestead, - "dao" => Hardfork::Dao, - "tangerine" => Hardfork::Tangerine, - "spuriousdragon" => Hardfork::SpuriousDragon, - "byzantium" => Hardfork::Byzantium, - "constantinople" => Hardfork::Constantinople, - "petersburg" => Hardfork::Petersburg, - "istanbul" => Hardfork::Istanbul, - "muirglacier" => Hardfork::MuirGlacier, - "berlin" => Hardfork::Berlin, - "london" => Hardfork::London, - "arrowglacier" => Hardfork::ArrowGlacier, - "grayglacier" => Hardfork::GrayGlacier, - "paris" => Hardfork::Paris, - "shanghai" => Hardfork::Shanghai, - "cancun" => Hardfork::Cancun, + "frontier" => Self::Frontier, + "homestead" => Self::Homestead, + "dao" => Self::Dao, + "tangerine" => Self::Tangerine, + "spuriousdragon" => Self::SpuriousDragon, + "byzantium" => Self::Byzantium, + "constantinople" => Self::Constantinople, + "petersburg" => Self::Petersburg, + "istanbul" => Self::Istanbul, + "muirglacier" => Self::MuirGlacier, + "berlin" => Self::Berlin, + "london" => Self::London, + "arrowglacier" => Self::ArrowGlacier, + "grayglacier" => Self::GrayGlacier, + "paris" => Self::Paris, + "shanghai" => Self::Shanghai, + "cancun" => Self::Cancun, #[cfg(feature = "optimism")] - "bedrock" => Hardfork::Bedrock, + "bedrock" => Self::Bedrock, #[cfg(feature = "optimism")] - "regolith" => Hardfork::Regolith, + "regolith" => Self::Regolith, #[cfg(feature = "optimism")] - "canyon" => Hardfork::Canyon, + "canyon" => Self::Canyon, #[cfg(feature = "optimism")] - "ecotone" => Hardfork::Ecotone, - "prague" => Hardfork::Prague, + "ecotone" => Self::Ecotone, + "prague" => Self::Prague, // "arbos11" => Hardfork::ArbOS11, // "arbos20atlas" => Hardfork::ArbOS20Atlas, _ => return Err(format!("Unknown hardfork: {s}")), diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 37261d9954784..4910412b5aafe 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -101,10 +101,7 @@ impl From for ExecutionPayloadEnvelopeV2 { fn from(value: EthBuiltPayload) -> Self { let EthBuiltPayload { block, fees, .. } = value; - ExecutionPayloadEnvelopeV2 { - block_value: fees, - execution_payload: convert_block_to_payload_field_v2(block), - } + Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } } } @@ -112,7 +109,7 @@ impl From for ExecutionPayloadEnvelopeV3 { fn from(value: EthBuiltPayload) -> Self { let EthBuiltPayload { block, fees, sidecars, .. } = value; - ExecutionPayloadEnvelopeV3 { + Self { execution_payload: block_to_payload_v3(block).0, block_value: fees, // From the engine API spec: @@ -133,7 +130,7 @@ impl From for ExecutionPayloadEnvelopeV4 { fn from(value: EthBuiltPayload) -> Self { let EthBuiltPayload { block, fees, sidecars, .. } = value; - ExecutionPayloadEnvelopeV4 { + Self { execution_payload: block_to_payload_v4(block), block_value: fees, // From the engine API spec: diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 8b5240719b714..c6b441d66f3b2 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -240,7 +240,7 @@ impl Chain { /// Merge two chains by appending the given chain into the current one. /// /// The state of accounts for this chain is set to the state of the newest chain. - pub fn append_chain(&mut self, other: Chain) -> Result<(), BlockExecutionError> { + pub fn append_chain(&mut self, other: Self) -> Result<(), BlockExecutionError> { let chain_tip = self.tip(); let other_fork_block = other.fork_block(); if chain_tip.hash() != other_fork_block.hash { @@ -315,12 +315,12 @@ impl Chain { // TODO: Currently, trie updates are reset on chain split. // Add tests ensuring that it is valid to leave updates in the pending chain. ChainSplit::Split { - canonical: Chain { + canonical: Self { state: canonical_state.expect("split in range"), blocks: self.blocks, trie_updates: None, }, - pending: Chain { + pending: Self { state: pending_state, blocks: higher_number_blocks, trie_updates: None, diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 7d8320c315c0a..695da5d281305 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -26,8 +26,8 @@ where DB: Database, { match self { - Either::Left(a) => Either::Left(a.executor(db)), - Either::Right(b) => Either::Right(b.executor(db)), + Self::Left(a) => Either::Left(a.executor(db)), + Self::Right(b) => Either::Right(b.executor(db)), } } @@ -36,8 +36,8 @@ where DB: Database, { match self { - Either::Left(a) => Either::Left(a.batch_executor(db, prune_modes)), - Either::Right(b) => Either::Right(b.batch_executor(db, prune_modes)), + Self::Left(a) => Either::Left(a.batch_executor(db, prune_modes)), + Self::Right(b) => Either::Right(b.batch_executor(db, prune_modes)), } } } @@ -64,8 +64,8 @@ where fn execute(self, input: Self::Input<'_>) -> Result { match self { - Either::Left(a) => a.execute(input), - Either::Right(b) => b.execute(input), + Self::Left(a) => a.execute(input), + Self::Right(b) => b.execute(input), } } } @@ -92,29 +92,29 @@ where fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { match self { - Either::Left(a) => a.execute_and_verify_one(input), - Either::Right(b) => b.execute_and_verify_one(input), + Self::Left(a) => a.execute_and_verify_one(input), + Self::Right(b) => b.execute_and_verify_one(input), } } fn finalize(self) -> Self::Output { match self { - Either::Left(a) => a.finalize(), - Either::Right(b) => b.finalize(), + Self::Left(a) => a.finalize(), + Self::Right(b) => b.finalize(), } } fn set_tip(&mut self, tip: BlockNumber) { match self { - Either::Left(a) => a.set_tip(tip), - Either::Right(b) => b.set_tip(tip), + Self::Left(a) => a.set_tip(tip), + Self::Right(b) => b.set_tip(tip), } } fn size_hint(&self) -> Option { match self { - Either::Left(a) => a.size_hint(), - Either::Right(b) => b.size_hint(), + Self::Left(a) => a.size_hint(), + Self::Right(b) => b.size_hint(), } } } diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index c6c56dd44181b..6c5c2a72213dd 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -143,57 +143,57 @@ pub enum FsPathError { impl FsPathError { /// Returns the complementary error variant for [`std::fs::write`]. pub fn write(source: io::Error, path: impl Into) -> Self { - FsPathError::Write { source, path: path.into() } + Self::Write { source, path: path.into() } } /// Returns the complementary error variant for [`std::fs::read`]. pub fn read(source: io::Error, path: impl Into) -> Self { - FsPathError::Read { source, path: path.into() } + Self::Read { source, path: path.into() } } /// Returns the complementary error variant for [`std::fs::read_link`]. pub fn read_link(source: io::Error, path: impl Into) -> Self { - FsPathError::ReadLink { source, path: path.into() } + Self::ReadLink { source, path: path.into() } } /// Returns the complementary error variant for [`std::fs::File::create`]. pub fn create_file(source: io::Error, path: impl Into) -> Self { - FsPathError::CreateFile { source, path: path.into() } + Self::CreateFile { source, path: path.into() } } /// Returns the complementary error variant for [`std::fs::remove_file`]. pub fn remove_file(source: io::Error, path: impl Into) -> Self { - FsPathError::RemoveFile { source, path: path.into() } + Self::RemoveFile { source, path: path.into() } } /// Returns the complementary error variant for [`std::fs::create_dir`]. pub fn create_dir(source: io::Error, path: impl Into) -> Self { - FsPathError::CreateDir { source, path: path.into() } + Self::CreateDir { source, path: path.into() } } /// Returns the complementary error variant for [`std::fs::remove_dir`]. pub fn remove_dir(source: io::Error, path: impl Into) -> Self { - FsPathError::RemoveDir { source, path: path.into() } + Self::RemoveDir { source, path: path.into() } } /// Returns the complementary error variant for [`std::fs::read_dir`]. pub fn read_dir(source: io::Error, path: impl Into) -> Self { - FsPathError::ReadDir { source, path: path.into() } + Self::ReadDir { source, path: path.into() } } /// Returns the complementary error variant for [`std::fs::File::open`]. pub fn open(source: io::Error, path: impl Into) -> Self { - FsPathError::Open { source, path: path.into() } + Self::Open { source, path: path.into() } } /// Returns the complementary error variant for [`std::fs::rename`]. pub fn rename(source: io::Error, from: impl Into, to: impl Into) -> Self { - FsPathError::Rename { source, from: from.into(), to: to.into() } + Self::Rename { source, from: from.into(), to: to.into() } } /// Returns the complementary error variant for [`std::fs::File::metadata`]. pub fn metadata(source: io::Error, path: impl Into) -> Self { - FsPathError::Metadata { source, path: path.into() } + Self::Metadata { source, path: path.into() } } } diff --git a/crates/metrics/metrics-derive/src/expand.rs b/crates/metrics/metrics-derive/src/expand.rs index cc14fb1b6d575..a4bb61b48bb07 100644 --- a/crates/metrics/metrics-derive/src/expand.rs +++ b/crates/metrics/metrics-derive/src/expand.rs @@ -220,7 +220,7 @@ impl MetricsAttr { fn separator(&self) -> String { match &self.separator { Some(sep) => sep.value(), - None => MetricsAttr::DEFAULT_SEPARATOR.to_owned(), + None => Self::DEFAULT_SEPARATOR.to_owned(), } } } diff --git a/crates/net/common/src/ratelimit.rs b/crates/net/common/src/ratelimit.rs index 26440ae3ca929..2294e8552f8b3 100644 --- a/crates/net/common/src/ratelimit.rs +++ b/crates/net/common/src/ratelimit.rs @@ -24,7 +24,7 @@ impl RateLimit { let until = tokio::time::Instant::now(); let state = State::Ready { until, remaining: rate.limit() }; - RateLimit { rate, state, sleep: Box::pin(tokio::time::sleep_until(until)) } + Self { rate, state, sleep: Box::pin(tokio::time::sleep_until(until)) } } /// Returns the configured limit of the [RateLimit] @@ -107,7 +107,7 @@ pub struct Rate { impl Rate { /// Create a new [Rate] with the given `limit/duration` ratio. pub fn new(limit: u64, duration: Duration) -> Self { - Rate { limit, duration } + Self { limit, duration } } fn limit(&self) -> u64 { diff --git a/crates/net/discv4/src/error.rs b/crates/net/discv4/src/error.rs index 1fbd67d47e2bd..13ed56531b86e 100644 --- a/crates/net/discv4/src/error.rs +++ b/crates/net/discv4/src/error.rs @@ -40,6 +40,6 @@ pub enum Discv4Error { impl From> for Discv4Error { fn from(_: SendError) -> Self { - Discv4Error::Send + Self::Send } } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 8e2ff1251351d..de6fd507abf0e 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -562,7 +562,7 @@ impl Discv4Service { let shared_node_record = Arc::new(Mutex::new(local_node_record)); - Discv4Service { + Self { local_address, local_eip_868_enr, local_node_record, diff --git a/crates/net/discv4/src/node.rs b/crates/net/discv4/src/node.rs index 62e45db0e1d69..c2e6d329e0d21 100644 --- a/crates/net/discv4/src/node.rs +++ b/crates/net/discv4/src/node.rs @@ -8,7 +8,7 @@ pub(crate) struct NodeKey(pub(crate) PeerId); impl From for NodeKey { fn from(value: PeerId) -> Self { - NodeKey(value) + Self(value) } } @@ -16,13 +16,13 @@ impl From for discv5::Key { fn from(value: NodeKey) -> Self { let hash = keccak256(value.0.as_slice()); let hash = *GenericArray::from_slice(hash.as_slice()); - discv5::Key::new_raw(value, hash) + Self::new_raw(value, hash) } } impl From<&NodeRecord> for NodeKey { fn from(node: &NodeRecord) -> Self { - NodeKey(node.id) + Self(node.id) } } diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index be26487a6907e..9131b91dab565 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -40,12 +40,12 @@ impl MessageId { /// Converts the byte that represents the message id to the enum. fn from_u8(msg: u8) -> Result { Ok(match msg { - 1 => MessageId::Ping, - 2 => MessageId::Pong, - 3 => MessageId::FindNode, - 4 => MessageId::Neighbours, - 5 => MessageId::EnrRequest, - 6 => MessageId::EnrResponse, + 1 => Self::Ping, + 2 => Self::Pong, + 3 => Self::FindNode, + 4 => Self::Neighbours, + 5 => Self::EnrRequest, + 6 => Self::EnrResponse, _ => return Err(msg), }) } @@ -74,12 +74,12 @@ impl Message { /// Returns the id for this type pub const fn msg_type(&self) -> MessageId { match self { - Message::Ping(_) => MessageId::Ping, - Message::Pong(_) => MessageId::Pong, - Message::FindNode(_) => MessageId::FindNode, - Message::Neighbours(_) => MessageId::Neighbours, - Message::EnrRequest(_) => MessageId::EnrRequest, - Message::EnrResponse(_) => MessageId::EnrResponse, + Self::Ping(_) => MessageId::Ping, + Self::Pong(_) => MessageId::Pong, + Self::FindNode(_) => MessageId::FindNode, + Self::Neighbours(_) => MessageId::Neighbours, + Self::EnrRequest(_) => MessageId::EnrRequest, + Self::EnrResponse(_) => MessageId::EnrResponse, } } @@ -101,12 +101,12 @@ impl Message { // Match the message type and encode the corresponding message into the payload match self { - Message::Ping(message) => message.encode(&mut payload), - Message::Pong(message) => message.encode(&mut payload), - Message::FindNode(message) => message.encode(&mut payload), - Message::Neighbours(message) => message.encode(&mut payload), - Message::EnrRequest(message) => message.encode(&mut payload), - Message::EnrResponse(message) => message.encode(&mut payload), + Self::Ping(message) => message.encode(&mut payload), + Self::Pong(message) => message.encode(&mut payload), + Self::FindNode(message) => message.encode(&mut payload), + Self::Neighbours(message) => message.encode(&mut payload), + Self::EnrRequest(message) => message.encode(&mut payload), + Self::EnrResponse(message) => message.encode(&mut payload), } // Sign the payload with the secret key using recoverable ECDSA @@ -165,12 +165,12 @@ impl Message { let payload = &mut &packet[98..]; let msg = match MessageId::from_u8(msg_type).map_err(DecodePacketError::UnknownMessage)? { - MessageId::Ping => Message::Ping(Ping::decode(payload)?), - MessageId::Pong => Message::Pong(Pong::decode(payload)?), - MessageId::FindNode => Message::FindNode(FindNode::decode(payload)?), - MessageId::Neighbours => Message::Neighbours(Neighbours::decode(payload)?), - MessageId::EnrRequest => Message::EnrRequest(EnrRequest::decode(payload)?), - MessageId::EnrResponse => Message::EnrResponse(EnrResponse::decode(payload)?), + MessageId::Ping => Self::Ping(Ping::decode(payload)?), + MessageId::Pong => Self::Pong(Pong::decode(payload)?), + MessageId::FindNode => Self::FindNode(FindNode::decode(payload)?), + MessageId::Neighbours => Self::Neighbours(Neighbours::decode(payload)?), + MessageId::EnrRequest => Self::EnrRequest(EnrRequest::decode(payload)?), + MessageId::EnrResponse => Self::EnrResponse(EnrResponse::decode(payload)?), }; Ok(Packet { msg, node_id, hash: header_hash }) @@ -210,7 +210,7 @@ impl From for NodeEndpoint { impl NodeEndpoint { /// Creates a new [`NodeEndpoint`] from a given UDP address and TCP port. pub fn from_udp_address(udp_address: &std::net::SocketAddr, tcp_port: u16) -> Self { - NodeEndpoint { address: udp_address.ip(), udp_port: udp_address.port(), tcp_port } + Self { address: udp_address.ip(), udp_port: udp_address.port(), tcp_port } } } diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index d62a7584a6328..8c8e1f72723ee 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -20,7 +20,7 @@ pub enum FilterOutcome { impl FilterOutcome { /// Returns `true` for [`FilterOutcome::Ok`]. pub fn is_ok(&self) -> bool { - matches!(self, FilterOutcome::Ok) + matches!(self, Self::Ok) } } @@ -58,7 +58,7 @@ impl MustNotIncludeKeys { _ = keys.insert(MustIncludeKey::new(key)); } - MustNotIncludeKeys { keys } + Self { keys } } } diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index ffa3c9caf4807..6f30244e38ab6 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -798,23 +798,23 @@ mod test { } impl PartialEq for Key { - fn eq(&self, other: &Key) -> bool { + fn eq(&self, other: &Self) -> bool { self.hash == other.hash } } impl Eq for Key {} - impl AsRef> for Key { - fn as_ref(&self) -> &Key { + impl AsRef for Key { + fn as_ref(&self) -> &Self { self } } impl Key { /// Construct a new `Key` by providing the raw 32 byte hash. - pub fn new_raw(preimage: T, hash: GenericArray) -> Key { - Key { preimage, hash } + pub fn new_raw(preimage: T, hash: GenericArray) -> Self { + Self { preimage, hash } } /// Borrows the preimage of the key. @@ -846,7 +846,7 @@ mod test { impl From for Key { fn from(node_id: NodeId) -> Self { - Key { preimage: node_id, hash: *GenericArray::from_slice(&node_id.raw()) } + Self { preimage: node_id, hash: *GenericArray::from_slice(&node_id.raw()) } } } diff --git a/crates/net/dns/src/query.rs b/crates/net/dns/src/query.rs index a3cba5caf1105..dece359252adf 100644 --- a/crates/net/dns/src/query.rs +++ b/crates/net/dns/src/query.rs @@ -135,11 +135,11 @@ impl Query { /// Advances the query fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { match self { - Query::Root(ref mut query) => { + Self::Root(ref mut query) => { let outcome = ready!(query.as_mut().poll(cx)); Poll::Ready(QueryOutcome::Root(outcome)) } - Query::Entry(ref mut query) => { + Self::Entry(ref mut query) => { let outcome = ready!(query.as_mut().poll(cx)); Poll::Ready(QueryOutcome::Entry(outcome)) } diff --git a/crates/net/dns/src/sync.rs b/crates/net/dns/src/sync.rs index 0174670b324e1..d33d0861c756f 100644 --- a/crates/net/dns/src/sync.rs +++ b/crates/net/dns/src/sync.rs @@ -157,6 +157,6 @@ pub(crate) enum ResolveKind { impl ResolveKind { pub(crate) fn is_link(&self) -> bool { - matches!(self, ResolveKind::Link) + matches!(self, Self::Link) } } diff --git a/crates/net/dns/src/tree.rs b/crates/net/dns/src/tree.rs index 614d5f1d23bab..1c8bb51cf7186 100644 --- a/crates/net/dns/src/tree.rs +++ b/crates/net/dns/src/tree.rs @@ -58,10 +58,10 @@ pub enum DnsEntry { impl fmt::Display for DnsEntry { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - DnsEntry::Root(entry) => entry.fmt(f), - DnsEntry::Link(entry) => entry.fmt(f), - DnsEntry::Branch(entry) => entry.fmt(f), - DnsEntry::Node(entry) => entry.fmt(f), + Self::Root(entry) => entry.fmt(f), + Self::Link(entry) => entry.fmt(f), + Self::Branch(entry) => entry.fmt(f), + Self::Node(entry) => entry.fmt(f), } } } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index ed5cdf64a5e31..d8f0bfcbb0846 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -505,7 +505,7 @@ impl BodiesDownloaderBuilder { /// Creates a new [BodiesDownloaderBuilder] with configurations based on the provided /// [BodiesConfig]. pub fn new(config: BodiesConfig) -> Self { - BodiesDownloaderBuilder::default() + Self::default() .with_stream_batch_size(config.downloader_stream_batch_size) .with_request_limit(config.downloader_request_limit) .with_max_buffered_blocks_size_bytes(config.downloader_max_buffered_blocks_size_bytes) diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 9411099e8557e..462f935eb41df 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -74,7 +74,7 @@ impl FileClient { /// Create a new file client from a file path. pub async fn new>(path: P) -> Result { let file = File::open(path).await?; - FileClient::from_file(file).await + Self::from_file(file).await } /// Initialize the [`FileClient`] with a file directly. diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 2889eb84aa07c..605123cb11a51 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -1090,7 +1090,7 @@ impl ReverseHeadersDownloaderBuilder { /// Creates a new [ReverseHeadersDownloaderBuilder] with configurations based on the provided /// [HeadersConfig]. pub fn new(config: HeadersConfig) -> Self { - ReverseHeadersDownloaderBuilder::default() + Self::default() .request_limit(config.downloader_request_limit) .min_concurrent_requests(config.downloader_min_concurrent_requests) .max_concurrent_requests(config.downloader_max_concurrent_requests) diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 65d74627e27f9..c1555c4685403 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -631,7 +631,7 @@ impl ECIES { self.egress_mac.as_mut().unwrap().update_header(&header); let tag = self.egress_mac.as_mut().unwrap().digest(); - out.reserve(ECIES::header_len()); + out.reserve(Self::header_len()); out.extend_from_slice(&header[..]); out.extend_from_slice(tag.as_slice()); } diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index 64526f16d6bad..c4b18a89f7982 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -88,7 +88,7 @@ pub enum ECIESErrorImpl { impl From for ECIESError { fn from(source: ECIESErrorImpl) -> Self { - ECIESError { inner: Box::new(source) } + Self { inner: Box::new(source) } } } diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index ae5cfee9e1e7e..3a949f686b424 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -79,13 +79,13 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockHeaders { )) } - Ok(BlockHeaders(headers)) + Ok(Self(headers)) } } impl From> for BlockHeaders { fn from(headers: Vec
) -> Self { - BlockHeaders(headers) + Self(headers) } } @@ -100,7 +100,7 @@ pub struct GetBlockBodies( impl From> for GetBlockBodies { fn from(hashes: Vec) -> Self { - GetBlockBodies(hashes) + Self(hashes) } } @@ -122,7 +122,7 @@ pub struct BlockBodies( impl From> for BlockBodies { fn from(bodies: Vec) -> Self { - BlockBodies(bodies) + Self(bodies) } } diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index b648f5a22d609..86485e8e456d9 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -60,7 +60,7 @@ pub struct BlockHashNumber { impl From> for NewBlockHashes { fn from(v: Vec) -> Self { - NewBlockHashes(v) + Self(v) } } @@ -101,7 +101,7 @@ impl Transactions { impl From> for Transactions { fn from(txs: Vec) -> Self { - Transactions(txs) + Self(txs) } } @@ -139,18 +139,18 @@ impl NewPooledTransactionHashes { /// Returns the message [`EthVersion`]. pub fn version(&self) -> EthVersion { match self { - NewPooledTransactionHashes::Eth66(_) => EthVersion::Eth66, - NewPooledTransactionHashes::Eth68(_) => EthVersion::Eth68, + Self::Eth66(_) => EthVersion::Eth66, + Self::Eth68(_) => EthVersion::Eth68, } } /// Returns `true` if the payload is valid for the given version pub fn is_valid_for_version(&self, version: EthVersion) -> bool { match self { - NewPooledTransactionHashes::Eth66(_) => { + Self::Eth66(_) => { matches!(version, EthVersion::Eth67 | EthVersion::Eth66) } - NewPooledTransactionHashes::Eth68(_) => { + Self::Eth68(_) => { matches!(version, EthVersion::Eth68) } } @@ -159,40 +159,40 @@ impl NewPooledTransactionHashes { /// Returns an iterator over all transaction hashes. pub fn iter_hashes(&self) -> impl Iterator + '_ { match self { - NewPooledTransactionHashes::Eth66(msg) => msg.0.iter(), - NewPooledTransactionHashes::Eth68(msg) => msg.hashes.iter(), + Self::Eth66(msg) => msg.0.iter(), + Self::Eth68(msg) => msg.hashes.iter(), } } /// Returns an immutable reference to transaction hashes. pub fn hashes(&self) -> &Vec { match self { - NewPooledTransactionHashes::Eth66(msg) => &msg.0, - NewPooledTransactionHashes::Eth68(msg) => &msg.hashes, + Self::Eth66(msg) => &msg.0, + Self::Eth68(msg) => &msg.hashes, } } /// Returns a mutable reference to transaction hashes. pub fn hashes_mut(&mut self) -> &mut Vec { match self { - NewPooledTransactionHashes::Eth66(msg) => &mut msg.0, - NewPooledTransactionHashes::Eth68(msg) => &mut msg.hashes, + Self::Eth66(msg) => &mut msg.0, + Self::Eth68(msg) => &mut msg.hashes, } } /// Consumes the type and returns all hashes pub fn into_hashes(self) -> Vec { match self { - NewPooledTransactionHashes::Eth66(msg) => msg.0, - NewPooledTransactionHashes::Eth68(msg) => msg.hashes, + Self::Eth66(msg) => msg.0, + Self::Eth68(msg) => msg.hashes, } } /// Returns an iterator over all transaction hashes. pub fn into_iter_hashes(self) -> impl Iterator { match self { - NewPooledTransactionHashes::Eth66(msg) => msg.0.into_iter(), - NewPooledTransactionHashes::Eth68(msg) => msg.hashes.into_iter(), + Self::Eth66(msg) => msg.0.into_iter(), + Self::Eth68(msg) => msg.hashes.into_iter(), } } @@ -200,8 +200,8 @@ impl NewPooledTransactionHashes { /// the rest. If `len` is greater than the number of hashes, this has no effect. pub fn truncate(&mut self, len: usize) { match self { - NewPooledTransactionHashes::Eth66(msg) => msg.0.truncate(len), - NewPooledTransactionHashes::Eth68(msg) => { + Self::Eth66(msg) => msg.0.truncate(len), + Self::Eth68(msg) => { msg.types.truncate(len); msg.sizes.truncate(len); msg.hashes.truncate(len); @@ -212,56 +212,56 @@ impl NewPooledTransactionHashes { /// Returns true if the message is empty pub fn is_empty(&self) -> bool { match self { - NewPooledTransactionHashes::Eth66(msg) => msg.0.is_empty(), - NewPooledTransactionHashes::Eth68(msg) => msg.hashes.is_empty(), + Self::Eth66(msg) => msg.0.is_empty(), + Self::Eth68(msg) => msg.hashes.is_empty(), } } /// Returns the number of hashes in the message pub fn len(&self) -> usize { match self { - NewPooledTransactionHashes::Eth66(msg) => msg.0.len(), - NewPooledTransactionHashes::Eth68(msg) => msg.hashes.len(), + Self::Eth66(msg) => msg.0.len(), + Self::Eth68(msg) => msg.hashes.len(), } } /// Returns an immutable reference to the inner type if this an eth68 announcement. pub fn as_eth68(&self) -> Option<&NewPooledTransactionHashes68> { match self { - NewPooledTransactionHashes::Eth66(_) => None, - NewPooledTransactionHashes::Eth68(msg) => Some(msg), + Self::Eth66(_) => None, + Self::Eth68(msg) => Some(msg), } } /// Returns a mutable reference to the inner type if this an eth68 announcement. pub fn as_eth68_mut(&mut self) -> Option<&mut NewPooledTransactionHashes68> { match self { - NewPooledTransactionHashes::Eth66(_) => None, - NewPooledTransactionHashes::Eth68(msg) => Some(msg), + Self::Eth66(_) => None, + Self::Eth68(msg) => Some(msg), } } /// Returns a mutable reference to the inner type if this an eth66 announcement. pub fn as_eth66_mut(&mut self) -> Option<&mut NewPooledTransactionHashes66> { match self { - NewPooledTransactionHashes::Eth66(msg) => Some(msg), - NewPooledTransactionHashes::Eth68(_) => None, + Self::Eth66(msg) => Some(msg), + Self::Eth68(_) => None, } } /// Returns the inner type if this an eth68 announcement. pub fn take_eth68(&mut self) -> Option { match self { - NewPooledTransactionHashes::Eth66(_) => None, - NewPooledTransactionHashes::Eth68(msg) => Some(mem::take(msg)), + Self::Eth66(_) => None, + Self::Eth68(msg) => Some(mem::take(msg)), } } /// Returns the inner type if this an eth66 announcement. pub fn take_eth66(&mut self) -> Option { match self { - NewPooledTransactionHashes::Eth66(msg) => Some(mem::take(msg)), - NewPooledTransactionHashes::Eth68(_) => None, + Self::Eth66(msg) => Some(mem::take(msg)), + Self::Eth68(_) => None, } } } @@ -269,8 +269,8 @@ impl NewPooledTransactionHashes { impl From for EthMessage { fn from(value: NewPooledTransactionHashes) -> Self { match value { - NewPooledTransactionHashes::Eth66(msg) => EthMessage::NewPooledTransactionHashes66(msg), - NewPooledTransactionHashes::Eth68(msg) => EthMessage::NewPooledTransactionHashes68(msg), + NewPooledTransactionHashes::Eth66(msg) => Self::NewPooledTransactionHashes66(msg), + NewPooledTransactionHashes::Eth68(msg) => Self::NewPooledTransactionHashes68(msg), } } } @@ -301,7 +301,7 @@ pub struct NewPooledTransactionHashes66( impl From> for NewPooledTransactionHashes66 { fn from(v: Vec) -> Self { - NewPooledTransactionHashes66(v) + Self(v) } } @@ -359,11 +359,7 @@ impl Arbitrary for NewPooledTransactionHashes68 { (types_vec, sizes_vec, hashes_vec) }) - .prop_map(|(types, sizes, hashes)| NewPooledTransactionHashes68 { - types, - sizes, - hashes, - }) + .prop_map(|(types, sizes, hashes)| Self { types, sizes, hashes }) .boxed() } @@ -476,8 +472,8 @@ impl DedupPayload for NewPooledTransactionHashes { fn dedup(self) -> PartiallyValidData { match self { - NewPooledTransactionHashes::Eth66(msg) => msg.dedup(), - NewPooledTransactionHashes::Eth68(msg) => msg.dedup(), + Self::Eth66(msg) => msg.dedup(), + Self::Eth68(msg) => msg.dedup(), } } } @@ -733,7 +729,7 @@ impl RequestTxHashes { impl FromIterator<(TxHash, Eth68TxMetadata)> for RequestTxHashes { fn from_iter>(iter: I) -> Self { - RequestTxHashes::new(iter.into_iter().map(|(hash, _)| hash).collect::>()) + Self::new(iter.into_iter().map(|(hash, _)| hash).collect::>()) } } diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index c4101e852d2cb..2239353f228b5 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -114,7 +114,7 @@ impl ProtocolMessage { EthMessage::Receipts(request_pair) } }; - Ok(ProtocolMessage { message_type, message }) + Ok(Self { message_type, message }) } } @@ -132,7 +132,7 @@ impl Encodable for ProtocolMessage { impl From for ProtocolMessage { fn from(message: EthMessage) -> Self { - ProtocolMessage { message_type: message.message_id(), message } + Self { message_type: message.message_id(), message } } } @@ -160,7 +160,7 @@ impl Encodable for ProtocolBroadcastMessage { impl From for ProtocolBroadcastMessage { fn from(message: EthBroadcastMessage) -> Self { - ProtocolBroadcastMessage { message_type: message.message_id(), message } + Self { message_type: message.message_id(), message } } } @@ -223,22 +223,23 @@ impl EthMessage { /// Returns the message's ID. pub fn message_id(&self) -> EthMessageID { match self { - EthMessage::Status(_) => EthMessageID::Status, - EthMessage::NewBlockHashes(_) => EthMessageID::NewBlockHashes, - EthMessage::NewBlock(_) => EthMessageID::NewBlock, - EthMessage::Transactions(_) => EthMessageID::Transactions, - EthMessage::NewPooledTransactionHashes66(_) | - EthMessage::NewPooledTransactionHashes68(_) => EthMessageID::NewPooledTransactionHashes, - EthMessage::GetBlockHeaders(_) => EthMessageID::GetBlockHeaders, - EthMessage::BlockHeaders(_) => EthMessageID::BlockHeaders, - EthMessage::GetBlockBodies(_) => EthMessageID::GetBlockBodies, - EthMessage::BlockBodies(_) => EthMessageID::BlockBodies, - EthMessage::GetPooledTransactions(_) => EthMessageID::GetPooledTransactions, - EthMessage::PooledTransactions(_) => EthMessageID::PooledTransactions, - EthMessage::GetNodeData(_) => EthMessageID::GetNodeData, - EthMessage::NodeData(_) => EthMessageID::NodeData, - EthMessage::GetReceipts(_) => EthMessageID::GetReceipts, - EthMessage::Receipts(_) => EthMessageID::Receipts, + Self::Status(_) => EthMessageID::Status, + Self::NewBlockHashes(_) => EthMessageID::NewBlockHashes, + Self::NewBlock(_) => EthMessageID::NewBlock, + Self::Transactions(_) => EthMessageID::Transactions, + Self::NewPooledTransactionHashes66(_) | Self::NewPooledTransactionHashes68(_) => { + EthMessageID::NewPooledTransactionHashes + } + Self::GetBlockHeaders(_) => EthMessageID::GetBlockHeaders, + Self::BlockHeaders(_) => EthMessageID::BlockHeaders, + Self::GetBlockBodies(_) => EthMessageID::GetBlockBodies, + Self::BlockBodies(_) => EthMessageID::BlockBodies, + Self::GetPooledTransactions(_) => EthMessageID::GetPooledTransactions, + Self::PooledTransactions(_) => EthMessageID::PooledTransactions, + Self::GetNodeData(_) => EthMessageID::GetNodeData, + Self::NodeData(_) => EthMessageID::NodeData, + Self::GetReceipts(_) => EthMessageID::GetReceipts, + Self::Receipts(_) => EthMessageID::Receipts, } } } @@ -246,42 +247,42 @@ impl EthMessage { impl Encodable for EthMessage { fn encode(&self, out: &mut dyn BufMut) { match self { - EthMessage::Status(status) => status.encode(out), - EthMessage::NewBlockHashes(new_block_hashes) => new_block_hashes.encode(out), - EthMessage::NewBlock(new_block) => new_block.encode(out), - EthMessage::Transactions(transactions) => transactions.encode(out), - EthMessage::NewPooledTransactionHashes66(hashes) => hashes.encode(out), - EthMessage::NewPooledTransactionHashes68(hashes) => hashes.encode(out), - EthMessage::GetBlockHeaders(request) => request.encode(out), - EthMessage::BlockHeaders(headers) => headers.encode(out), - EthMessage::GetBlockBodies(request) => request.encode(out), - EthMessage::BlockBodies(bodies) => bodies.encode(out), - EthMessage::GetPooledTransactions(request) => request.encode(out), - EthMessage::PooledTransactions(transactions) => transactions.encode(out), - EthMessage::GetNodeData(request) => request.encode(out), - EthMessage::NodeData(data) => data.encode(out), - EthMessage::GetReceipts(request) => request.encode(out), - EthMessage::Receipts(receipts) => receipts.encode(out), + Self::Status(status) => status.encode(out), + Self::NewBlockHashes(new_block_hashes) => new_block_hashes.encode(out), + Self::NewBlock(new_block) => new_block.encode(out), + Self::Transactions(transactions) => transactions.encode(out), + Self::NewPooledTransactionHashes66(hashes) => hashes.encode(out), + Self::NewPooledTransactionHashes68(hashes) => hashes.encode(out), + Self::GetBlockHeaders(request) => request.encode(out), + Self::BlockHeaders(headers) => headers.encode(out), + Self::GetBlockBodies(request) => request.encode(out), + Self::BlockBodies(bodies) => bodies.encode(out), + Self::GetPooledTransactions(request) => request.encode(out), + Self::PooledTransactions(transactions) => transactions.encode(out), + Self::GetNodeData(request) => request.encode(out), + Self::NodeData(data) => data.encode(out), + Self::GetReceipts(request) => request.encode(out), + Self::Receipts(receipts) => receipts.encode(out), } } fn length(&self) -> usize { match self { - EthMessage::Status(status) => status.length(), - EthMessage::NewBlockHashes(new_block_hashes) => new_block_hashes.length(), - EthMessage::NewBlock(new_block) => new_block.length(), - EthMessage::Transactions(transactions) => transactions.length(), - EthMessage::NewPooledTransactionHashes66(hashes) => hashes.length(), - EthMessage::NewPooledTransactionHashes68(hashes) => hashes.length(), - EthMessage::GetBlockHeaders(request) => request.length(), - EthMessage::BlockHeaders(headers) => headers.length(), - EthMessage::GetBlockBodies(request) => request.length(), - EthMessage::BlockBodies(bodies) => bodies.length(), - EthMessage::GetPooledTransactions(request) => request.length(), - EthMessage::PooledTransactions(transactions) => transactions.length(), - EthMessage::GetNodeData(request) => request.length(), - EthMessage::NodeData(data) => data.length(), - EthMessage::GetReceipts(request) => request.length(), - EthMessage::Receipts(receipts) => receipts.length(), + Self::Status(status) => status.length(), + Self::NewBlockHashes(new_block_hashes) => new_block_hashes.length(), + Self::NewBlock(new_block) => new_block.length(), + Self::Transactions(transactions) => transactions.length(), + Self::NewPooledTransactionHashes66(hashes) => hashes.length(), + Self::NewPooledTransactionHashes68(hashes) => hashes.length(), + Self::GetBlockHeaders(request) => request.length(), + Self::BlockHeaders(headers) => headers.length(), + Self::GetBlockBodies(request) => request.length(), + Self::BlockBodies(bodies) => bodies.length(), + Self::GetPooledTransactions(request) => request.length(), + Self::PooledTransactions(transactions) => transactions.length(), + Self::GetNodeData(request) => request.length(), + Self::NodeData(data) => data.length(), + Self::GetReceipts(request) => request.length(), + Self::Receipts(receipts) => receipts.length(), } } } @@ -307,8 +308,8 @@ impl EthBroadcastMessage { /// Returns the message's ID. pub fn message_id(&self) -> EthMessageID { match self { - EthBroadcastMessage::NewBlock(_) => EthMessageID::NewBlock, - EthBroadcastMessage::Transactions(_) => EthMessageID::Transactions, + Self::NewBlock(_) => EthMessageID::NewBlock, + Self::Transactions(_) => EthMessageID::Transactions, } } } @@ -316,15 +317,15 @@ impl EthBroadcastMessage { impl Encodable for EthBroadcastMessage { fn encode(&self, out: &mut dyn BufMut) { match self { - EthBroadcastMessage::NewBlock(new_block) => new_block.encode(out), - EthBroadcastMessage::Transactions(transactions) => transactions.encode(out), + Self::NewBlock(new_block) => new_block.encode(out), + Self::Transactions(transactions) => transactions.encode(out), } } fn length(&self) -> usize { match self { - EthBroadcastMessage::NewBlock(new_block) => new_block.length(), - EthBroadcastMessage::Transactions(transactions) => transactions.length(), + Self::NewBlock(new_block) => new_block.length(), + Self::Transactions(transactions) => transactions.length(), } } } @@ -385,21 +386,21 @@ impl Encodable for EthMessageID { impl Decodable for EthMessageID { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { let id = match buf.first().ok_or(alloy_rlp::Error::InputTooShort)? { - 0x00 => EthMessageID::Status, - 0x01 => EthMessageID::NewBlockHashes, - 0x02 => EthMessageID::Transactions, - 0x03 => EthMessageID::GetBlockHeaders, - 0x04 => EthMessageID::BlockHeaders, - 0x05 => EthMessageID::GetBlockBodies, - 0x06 => EthMessageID::BlockBodies, - 0x07 => EthMessageID::NewBlock, - 0x08 => EthMessageID::NewPooledTransactionHashes, - 0x09 => EthMessageID::GetPooledTransactions, - 0x0a => EthMessageID::PooledTransactions, - 0x0d => EthMessageID::GetNodeData, - 0x0e => EthMessageID::NodeData, - 0x0f => EthMessageID::GetReceipts, - 0x10 => EthMessageID::Receipts, + 0x00 => Self::Status, + 0x01 => Self::NewBlockHashes, + 0x02 => Self::Transactions, + 0x03 => Self::GetBlockHeaders, + 0x04 => Self::BlockHeaders, + 0x05 => Self::GetBlockBodies, + 0x06 => Self::BlockBodies, + 0x07 => Self::NewBlock, + 0x08 => Self::NewPooledTransactionHashes, + 0x09 => Self::GetPooledTransactions, + 0x0a => Self::PooledTransactions, + 0x0d => Self::GetNodeData, + 0x0e => Self::NodeData, + 0x0f => Self::GetReceipts, + 0x10 => Self::Receipts, _ => return Err(alloy_rlp::Error::Custom("Invalid message ID")), }; buf.advance(1); @@ -412,21 +413,21 @@ impl TryFrom for EthMessageID { fn try_from(value: usize) -> Result { match value { - 0x00 => Ok(EthMessageID::Status), - 0x01 => Ok(EthMessageID::NewBlockHashes), - 0x02 => Ok(EthMessageID::Transactions), - 0x03 => Ok(EthMessageID::GetBlockHeaders), - 0x04 => Ok(EthMessageID::BlockHeaders), - 0x05 => Ok(EthMessageID::GetBlockBodies), - 0x06 => Ok(EthMessageID::BlockBodies), - 0x07 => Ok(EthMessageID::NewBlock), - 0x08 => Ok(EthMessageID::NewPooledTransactionHashes), - 0x09 => Ok(EthMessageID::GetPooledTransactions), - 0x0a => Ok(EthMessageID::PooledTransactions), - 0x0d => Ok(EthMessageID::GetNodeData), - 0x0e => Ok(EthMessageID::NodeData), - 0x0f => Ok(EthMessageID::GetReceipts), - 0x10 => Ok(EthMessageID::Receipts), + 0x00 => Ok(Self::Status), + 0x01 => Ok(Self::NewBlockHashes), + 0x02 => Ok(Self::Transactions), + 0x03 => Ok(Self::GetBlockHeaders), + 0x04 => Ok(Self::BlockHeaders), + 0x05 => Ok(Self::GetBlockBodies), + 0x06 => Ok(Self::BlockBodies), + 0x07 => Ok(Self::NewBlock), + 0x08 => Ok(Self::NewPooledTransactionHashes), + 0x09 => Ok(Self::GetPooledTransactions), + 0x0a => Ok(Self::PooledTransactions), + 0x0d => Ok(Self::GetNodeData), + 0x0e => Ok(Self::NodeData), + 0x0f => Ok(Self::GetReceipts), + 0x10 => Ok(Self::Receipts), _ => Err("Invalid message ID"), } } diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index fc6f7fd2c7d7b..a28f4ec45cbfc 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -43,12 +43,12 @@ pub struct Status { } impl From for Status { - fn from(genesis: Genesis) -> Status { + fn from(genesis: Genesis) -> Self { let chain = genesis.config.chain_id; let total_difficulty = genesis.difficulty; let chainspec = ChainSpec::from(genesis); - Status { + Self { version: EthVersion::Eth68 as u8, chain: Chain::from_id(chain), total_difficulty, @@ -135,7 +135,7 @@ impl Debug for Status { impl Default for Status { fn default() -> Self { let mainnet_genesis = MAINNET.genesis_hash(); - Status { + Self { version: EthVersion::Eth68 as u8, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(17_179_869_184u64), diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 2a7313ad1f31c..d0a42d49beecc 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -24,7 +24,7 @@ where T: Into, { fn from(hashes: Vec) -> Self { - GetPooledTransactions(hashes.into_iter().map(|h| h.into()).collect()) + Self(hashes.into_iter().map(|h| h.into()).collect()) } } @@ -71,7 +71,7 @@ impl TryFrom> for PooledTransactions { impl FromIterator for PooledTransactions { fn from_iter>(iter: I) -> Self { - PooledTransactions(iter.into_iter().collect()) + Self(iter.into_iter().collect()) } } diff --git a/crates/net/eth-wire-types/src/version.rs b/crates/net/eth-wire-types/src/version.rs index e121ea6d7ab2f..add1ab3785892 100644 --- a/crates/net/eth-wire-types/src/version.rs +++ b/crates/net/eth-wire-types/src/version.rs @@ -25,13 +25,13 @@ pub enum EthVersion { impl EthVersion { /// The latest known eth version - pub const LATEST: EthVersion = EthVersion::Eth68; + pub const LATEST: Self = Self::Eth68; /// Returns the total number of messages the protocol version supports. pub const fn total_messages(&self) -> u8 { match self { - EthVersion::Eth66 => 15, - EthVersion::Eth67 | EthVersion::Eth68 => { + Self::Eth66 => 15, + Self::Eth67 | Self::Eth68 => { // eth/67,68 are eth/66 minus GetNodeData and NodeData messages 13 } @@ -40,17 +40,17 @@ impl EthVersion { /// Returns true if the version is eth/66 pub const fn is_eth66(&self) -> bool { - matches!(self, EthVersion::Eth66) + matches!(self, Self::Eth66) } /// Returns true if the version is eth/67 pub const fn is_eth67(&self) -> bool { - matches!(self, EthVersion::Eth67) + matches!(self, Self::Eth67) } /// Returns true if the version is eth/68 pub const fn is_eth68(&self) -> bool { - matches!(self, EthVersion::Eth68) + matches!(self, Self::Eth68) } } @@ -69,9 +69,9 @@ impl TryFrom<&str> for EthVersion { #[inline] fn try_from(s: &str) -> Result { match s { - "66" => Ok(EthVersion::Eth66), - "67" => Ok(EthVersion::Eth67), - "68" => Ok(EthVersion::Eth68), + "66" => Ok(Self::Eth66), + "67" => Ok(Self::Eth67), + "68" => Ok(Self::Eth68), _ => Err(ParseVersionError(s.to_string())), } } @@ -92,9 +92,9 @@ impl TryFrom for EthVersion { #[inline] fn try_from(u: u8) -> Result { match u { - 66 => Ok(EthVersion::Eth66), - 67 => Ok(EthVersion::Eth67), - 68 => Ok(EthVersion::Eth68), + 66 => Ok(Self::Eth66), + 67 => Ok(Self::Eth67), + 68 => Ok(Self::Eth68), _ => Err(ParseVersionError(u.to_string())), } } @@ -105,14 +105,14 @@ impl FromStr for EthVersion { #[inline] fn from_str(s: &str) -> Result { - EthVersion::try_from(s) + Self::try_from(s) } } impl From for u8 { #[inline] - fn from(v: EthVersion) -> u8 { - v as u8 + fn from(v: EthVersion) -> Self { + v as Self } } diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 25e6bf0e2c655..657ee5b290bcd 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -116,7 +116,7 @@ impl fmt::Display for Capability { impl From for Capability { #[inline] fn from(value: EthVersion) -> Self { - Capability::eth(value) + Self::eth(value) } } @@ -137,12 +137,12 @@ impl proptest::arbitrary::Arbitrary for Capability { proptest::arbitrary::any_with::(args) // TODO: what possible values? .prop_flat_map(move |name| { proptest::arbitrary::any_with::(()) // TODO: What's the max? - .prop_map(move |version| Capability::new(name.clone(), version)) + .prop_map(move |version| Self::new(name.clone(), version)) }) .boxed() } - type Strategy = proptest::strategy::BoxedStrategy; + type Strategy = proptest::strategy::BoxedStrategy; } /// Represents all capabilities of a node. @@ -289,8 +289,8 @@ impl SharedCapability { /// Returns the capability. pub fn capability(&self) -> Cow<'_, Capability> { match self { - SharedCapability::Eth { version, .. } => Cow::Owned(Capability::eth(*version)), - SharedCapability::UnknownCapability { cap, .. } => Cow::Borrowed(cap), + Self::Eth { version, .. } => Cow::Owned(Capability::eth(*version)), + Self::UnknownCapability { cap, .. } => Cow::Borrowed(cap), } } @@ -298,29 +298,29 @@ impl SharedCapability { #[inline] pub fn name(&self) -> &str { match self { - SharedCapability::Eth { .. } => "eth", - SharedCapability::UnknownCapability { cap, .. } => cap.name.as_ref(), + Self::Eth { .. } => "eth", + Self::UnknownCapability { cap, .. } => cap.name.as_ref(), } } /// Returns true if the capability is eth. #[inline] pub fn is_eth(&self) -> bool { - matches!(self, SharedCapability::Eth { .. }) + matches!(self, Self::Eth { .. }) } /// Returns the version of the capability. pub fn version(&self) -> u8 { match self { - SharedCapability::Eth { version, .. } => *version as u8, - SharedCapability::UnknownCapability { cap, .. } => cap.version as u8, + Self::Eth { version, .. } => *version as u8, + Self::UnknownCapability { cap, .. } => cap.version as u8, } } /// Returns the eth version if it's the `eth` capability. pub fn eth_version(&self) -> Option { match self { - SharedCapability::Eth { version, .. } => Some(*version), + Self::Eth { version, .. } => Some(*version), _ => None, } } @@ -331,8 +331,8 @@ impl SharedCapability { /// message id space. pub fn message_id_offset(&self) -> u8 { match self { - SharedCapability::Eth { offset, .. } => *offset, - SharedCapability::UnknownCapability { offset, .. } => *offset, + Self::Eth { offset, .. } => *offset, + Self::UnknownCapability { offset, .. } => *offset, } } @@ -345,8 +345,8 @@ impl SharedCapability { /// Returns the number of protocol messages supported by this capability. pub fn num_messages(&self) -> u8 { match self { - SharedCapability::Eth { version: _version, .. } => EthMessageID::max() + 1, - SharedCapability::UnknownCapability { messages, .. } => *messages, + Self::Eth { version: _version, .. } => EthMessageID::max() + 1, + Self::UnknownCapability { messages, .. } => *messages, } } } diff --git a/crates/net/eth-wire/src/disconnect.rs b/crates/net/eth-wire/src/disconnect.rs index dbf24269c9d85..50ced132f2a61 100644 --- a/crates/net/eth-wire/src/disconnect.rs +++ b/crates/net/eth-wire/src/disconnect.rs @@ -50,25 +50,19 @@ pub enum DisconnectReason { impl Display for DisconnectReason { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let message = match self { - DisconnectReason::DisconnectRequested => "disconnect requested", - DisconnectReason::TcpSubsystemError => "TCP sub-system error", - DisconnectReason::ProtocolBreach => { - "breach of protocol, e.g. a malformed message, bad RLP, etc." - } - DisconnectReason::UselessPeer => "useless peer", - DisconnectReason::TooManyPeers => "too many peers", - DisconnectReason::AlreadyConnected => "already connected", - DisconnectReason::IncompatibleP2PProtocolVersion => "incompatible P2P protocol version", - DisconnectReason::NullNodeIdentity => { - "null node identity received - this is automatically invalid" - } - DisconnectReason::ClientQuitting => "client quitting", - DisconnectReason::UnexpectedHandshakeIdentity => "unexpected identity in handshake", - DisconnectReason::ConnectedToSelf => { - "identity is the same as this node (i.e. connected to itself)" - } - DisconnectReason::PingTimeout => "ping timeout", - DisconnectReason::SubprotocolSpecific => "some other reason specific to a subprotocol", + Self::DisconnectRequested => "disconnect requested", + Self::TcpSubsystemError => "TCP sub-system error", + Self::ProtocolBreach => "breach of protocol, e.g. a malformed message, bad RLP, etc.", + Self::UselessPeer => "useless peer", + Self::TooManyPeers => "too many peers", + Self::AlreadyConnected => "already connected", + Self::IncompatibleP2PProtocolVersion => "incompatible P2P protocol version", + Self::NullNodeIdentity => "null node identity received - this is automatically invalid", + Self::ClientQuitting => "client quitting", + Self::UnexpectedHandshakeIdentity => "unexpected identity in handshake", + Self::ConnectedToSelf => "identity is the same as this node (i.e. connected to itself)", + Self::PingTimeout => "ping timeout", + Self::SubprotocolSpecific => "some other reason specific to a subprotocol", }; f.write_str(message) } @@ -86,19 +80,19 @@ impl TryFrom for DisconnectReason { fn try_from(value: u8) -> Result { match value { - 0x00 => Ok(DisconnectReason::DisconnectRequested), - 0x01 => Ok(DisconnectReason::TcpSubsystemError), - 0x02 => Ok(DisconnectReason::ProtocolBreach), - 0x03 => Ok(DisconnectReason::UselessPeer), - 0x04 => Ok(DisconnectReason::TooManyPeers), - 0x05 => Ok(DisconnectReason::AlreadyConnected), - 0x06 => Ok(DisconnectReason::IncompatibleP2PProtocolVersion), - 0x07 => Ok(DisconnectReason::NullNodeIdentity), - 0x08 => Ok(DisconnectReason::ClientQuitting), - 0x09 => Ok(DisconnectReason::UnexpectedHandshakeIdentity), - 0x0a => Ok(DisconnectReason::ConnectedToSelf), - 0x0b => Ok(DisconnectReason::PingTimeout), - 0x10 => Ok(DisconnectReason::SubprotocolSpecific), + 0x00 => Ok(Self::DisconnectRequested), + 0x01 => Ok(Self::TcpSubsystemError), + 0x02 => Ok(Self::ProtocolBreach), + 0x03 => Ok(Self::UselessPeer), + 0x04 => Ok(Self::TooManyPeers), + 0x05 => Ok(Self::AlreadyConnected), + 0x06 => Ok(Self::IncompatibleP2PProtocolVersion), + 0x07 => Ok(Self::NullNodeIdentity), + 0x08 => Ok(Self::ClientQuitting), + 0x09 => Ok(Self::UnexpectedHandshakeIdentity), + 0x0a => Ok(Self::ConnectedToSelf), + 0x0b => Ok(Self::PingTimeout), + 0x10 => Ok(Self::SubprotocolSpecific), _ => Err(UnknownDisconnectReason(value)), } } @@ -143,9 +137,9 @@ impl Decodable for DisconnectReason { // string 0x80 if buf[0] == 0x00 { buf.advance(1); - Ok(DisconnectReason::DisconnectRequested) + Ok(Self::DisconnectRequested) } else { - DisconnectReason::try_from(u8::decode(buf)?) + Self::try_from(u8::decode(buf)?) .map_err(|_| RlpError::Custom("unknown disconnect reason")) } } diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index c9bf39882b83a..c7b0718fc248d 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -44,7 +44,7 @@ pub enum EthStreamError { impl EthStreamError { /// Returns the [`DisconnectReason`] if the error is a disconnect message pub fn as_disconnected(&self) -> Option { - if let EthStreamError::P2PStreamError(err) = self { + if let Self::P2PStreamError(err) = self { err.as_disconnected() } else { None @@ -53,7 +53,7 @@ impl EthStreamError { /// Returns the [io::Error] if it was caused by IO pub fn as_io(&self) -> Option<&io::Error> { - if let EthStreamError::P2PStreamError(P2PStreamError::Io(io)) = self { + if let Self::P2PStreamError(P2PStreamError::Io(io)) = self { return Some(io) } None diff --git a/crates/net/eth-wire/src/errors/p2p.rs b/crates/net/eth-wire/src/errors/p2p.rs index 90512bf069376..83a39e2b6912c 100644 --- a/crates/net/eth-wire/src/errors/p2p.rs +++ b/crates/net/eth-wire/src/errors/p2p.rs @@ -86,8 +86,8 @@ impl P2PStreamError { /// Returns the [`DisconnectReason`] if it is the `Disconnected` variant. pub fn as_disconnected(&self) -> Option { let reason = match self { - P2PStreamError::HandshakeError(P2PHandshakeError::Disconnected(reason)) => reason, - P2PStreamError::Disconnected(reason) => reason, + Self::HandshakeError(P2PHandshakeError::Disconnected(reason)) => reason, + Self::Disconnected(reason) => reason, _ => return None, }; diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 3e8bb096a74fa..1523a3a3596eb 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -665,10 +665,10 @@ impl P2PMessage { /// Gets the [`P2PMessageID`] for the given message. pub fn message_id(&self) -> P2PMessageID { match self { - P2PMessage::Hello(_) => P2PMessageID::Hello, - P2PMessage::Disconnect(_) => P2PMessageID::Disconnect, - P2PMessage::Ping => P2PMessageID::Ping, - P2PMessage::Pong => P2PMessageID::Pong, + Self::Hello(_) => P2PMessageID::Hello, + Self::Disconnect(_) => P2PMessageID::Disconnect, + Self::Ping => P2PMessageID::Ping, + Self::Pong => P2PMessageID::Pong, } } } @@ -681,15 +681,15 @@ impl Encodable for P2PMessage { fn encode(&self, out: &mut dyn BufMut) { (self.message_id() as u8).encode(out); match self { - P2PMessage::Hello(msg) => msg.encode(out), - P2PMessage::Disconnect(msg) => msg.encode(out), - P2PMessage::Ping => { + Self::Hello(msg) => msg.encode(out), + Self::Disconnect(msg) => msg.encode(out), + Self::Ping => { // Ping payload is _always_ snappy encoded out.put_u8(0x01); out.put_u8(0x00); out.put_u8(EMPTY_LIST_CODE); } - P2PMessage::Pong => { + Self::Pong => { // Pong payload is _always_ snappy encoded out.put_u8(0x01); out.put_u8(0x00); @@ -700,11 +700,11 @@ impl Encodable for P2PMessage { fn length(&self) -> usize { let payload_len = match self { - P2PMessage::Hello(msg) => msg.length(), - P2PMessage::Disconnect(msg) => msg.length(), + Self::Hello(msg) => msg.length(), + Self::Disconnect(msg) => msg.length(), // id + snappy encoded payload - P2PMessage::Ping => 3, // len([0x01, 0x00, 0xc0]) = 3 - P2PMessage::Pong => 3, // len([0x01, 0x00, 0xc0]) = 3 + Self::Ping => 3, // len([0x01, 0x00, 0xc0]) = 3 + Self::Pong => 3, // len([0x01, 0x00, 0xc0]) = 3 }; payload_len + 1 // (1 for length of p2p message id) } @@ -735,15 +735,15 @@ impl Decodable for P2PMessage { .or(Err(RlpError::Custom("unknown p2p message id")))?; buf.advance(1); match id { - P2PMessageID::Hello => Ok(P2PMessage::Hello(HelloMessage::decode(buf)?)), - P2PMessageID::Disconnect => Ok(P2PMessage::Disconnect(DisconnectReason::decode(buf)?)), + P2PMessageID::Hello => Ok(Self::Hello(HelloMessage::decode(buf)?)), + P2PMessageID::Disconnect => Ok(Self::Disconnect(DisconnectReason::decode(buf)?)), P2PMessageID::Ping => { advance_snappy_ping_pong_payload(buf)?; - Ok(P2PMessage::Ping) + Ok(Self::Ping) } P2PMessageID::Pong => { advance_snappy_ping_pong_payload(buf)?; - Ok(P2PMessage::Pong) + Ok(Self::Pong) } } } @@ -768,10 +768,10 @@ pub enum P2PMessageID { impl From for P2PMessageID { fn from(msg: P2PMessage) -> Self { match msg { - P2PMessage::Hello(_) => P2PMessageID::Hello, - P2PMessage::Disconnect(_) => P2PMessageID::Disconnect, - P2PMessage::Ping => P2PMessageID::Ping, - P2PMessage::Pong => P2PMessageID::Pong, + P2PMessage::Hello(_) => Self::Hello, + P2PMessage::Disconnect(_) => Self::Disconnect, + P2PMessage::Ping => Self::Ping, + P2PMessage::Pong => Self::Pong, } } } @@ -781,10 +781,10 @@ impl TryFrom for P2PMessageID { fn try_from(id: u8) -> Result { match id { - 0x00 => Ok(P2PMessageID::Hello), - 0x01 => Ok(P2PMessageID::Disconnect), - 0x02 => Ok(P2PMessageID::Ping), - 0x03 => Ok(P2PMessageID::Pong), + 0x00 => Ok(Self::Hello), + 0x01 => Ok(Self::Disconnect), + 0x02 => Ok(Self::Ping), + 0x03 => Ok(Self::Pong), _ => Err(P2PStreamError::UnknownReservedMessageId(id)), } } @@ -822,8 +822,8 @@ impl Decodable for ProtocolVersion { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { let version = u8::decode(buf)?; match version { - 4 => Ok(ProtocolVersion::V4), - 5 => Ok(ProtocolVersion::V5), + 4 => Ok(Self::V4), + 5 => Ok(Self::V5), _ => Err(RlpError::Custom("unknown p2p protocol version")), } } diff --git a/crates/net/eth-wire/tests/fuzz_roundtrip.rs b/crates/net/eth-wire/tests/fuzz_roundtrip.rs index 1fc5ea0bf578f..7bb3b7ad78e6e 100644 --- a/crates/net/eth-wire/tests/fuzz_roundtrip.rs +++ b/crates/net/eth-wire/tests/fuzz_roundtrip.rs @@ -96,7 +96,7 @@ pub mod fuzz_rlp { impl Default for HelloMessageWrapper { fn default() -> Self { - HelloMessageWrapper(HelloMessage { + Self(HelloMessage { client_version: Default::default(), capabilities: Default::default(), protocol_version: Default::default(), @@ -138,7 +138,7 @@ pub mod fuzz_rlp { impl Default for GetBlockHeadersWrapper { fn default() -> Self { - GetBlockHeadersWrapper(GetBlockHeaders { + Self(GetBlockHeaders { start_block: BlockHashOrNumber::Number(0), limit: Default::default(), skip: Default::default(), diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index adc72aa83b4a9..33722bb5e5c11 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -57,11 +57,11 @@ impl NatResolver { impl fmt::Display for NatResolver { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - NatResolver::Any => f.write_str("any"), - NatResolver::Upnp => f.write_str("upnp"), - NatResolver::PublicIp => f.write_str("publicip"), - NatResolver::ExternalIp(ip) => write!(f, "extip:{ip}"), - NatResolver::None => f.write_str("none"), + Self::Any => f.write_str("any"), + Self::Upnp => f.write_str("upnp"), + Self::PublicIp => f.write_str("publicip"), + Self::ExternalIp(ip) => write!(f, "extip:{ip}"), + Self::None => f.write_str("none"), } } } @@ -82,17 +82,17 @@ impl FromStr for NatResolver { fn from_str(s: &str) -> Result { let r = match s { - "any" => NatResolver::Any, - "upnp" => NatResolver::Upnp, - "none" => NatResolver::None, - "publicip" | "public-ip" => NatResolver::PublicIp, + "any" => Self::Any, + "upnp" => Self::Upnp, + "none" => Self::None, + "publicip" | "public-ip" => Self::PublicIp, s => { let Some(ip) = s.strip_prefix("extip:") else { return Err(ParseNatResolverError::UnknownVariant(format!( "Unknown Nat Resolver: {s}" ))) }; - NatResolver::ExternalIp(ip.parse::()?) + Self::ExternalIp(ip.parse::()?) } }; Ok(r) diff --git a/crates/net/network-api/src/error.rs b/crates/net/network-api/src/error.rs index 4572145406e8f..a500c4d48a521 100644 --- a/crates/net/network-api/src/error.rs +++ b/crates/net/network-api/src/error.rs @@ -11,12 +11,12 @@ pub enum NetworkError { impl From> for NetworkError { fn from(_: mpsc::error::SendError) -> Self { - NetworkError::ChannelClosed + Self::ChannelClosed } } impl From for NetworkError { fn from(_: oneshot::error::RecvError) -> Self { - NetworkError::ChannelClosed + Self::ChannelClosed } } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 10ffddb6a3074..c197ecba5d291 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -154,12 +154,12 @@ pub enum PeerKind { impl PeerKind { /// Returns `true` if the peer is trusted. pub const fn is_trusted(&self) -> bool { - matches!(self, PeerKind::Trusted) + matches!(self, Self::Trusted) } /// Returns `true` if the peer is basic. pub const fn is_basic(&self) -> bool { - matches!(self, PeerKind::Basic) + matches!(self, Self::Basic) } } @@ -198,20 +198,20 @@ pub enum Direction { impl Direction { /// Returns `true` if this an incoming connection. pub fn is_incoming(&self) -> bool { - matches!(self, Direction::Incoming) + matches!(self, Self::Incoming) } /// Returns `true` if this an outgoing connection. pub fn is_outgoing(&self) -> bool { - matches!(self, Direction::Outgoing(_)) + matches!(self, Self::Outgoing(_)) } } impl std::fmt::Display for Direction { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Direction::Incoming => write!(f, "incoming"), - Direction::Outgoing(_) => write!(f, "outgoing"), + Self::Incoming => write!(f, "incoming"), + Self::Outgoing(_) => write!(f, "outgoing"), } } } diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index 5fb673707ddb8..ed74de94f92ce 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -25,7 +25,7 @@ pub struct NetworkBuilder { impl NetworkBuilder { /// Consumes the type and returns all fields. pub fn split(self) -> (NetworkManager, Tx, Eth) { - let NetworkBuilder { network, transactions, request_handler } = self; + let Self { network, transactions, request_handler } = self; (network, transactions, request_handler) } @@ -46,7 +46,7 @@ impl NetworkBuilder { /// Consumes the type and returns all fields and also return a [`NetworkHandle`]. pub fn split_with_handle(self) -> (NetworkHandle, NetworkManager, Tx, Eth) { - let NetworkBuilder { network, transactions, request_handler } = self; + let Self { network, transactions, request_handler } = self; let handle = network.handle().clone(); (handle, network, transactions, request_handler) } @@ -57,7 +57,7 @@ impl NetworkBuilder { pool: Pool, transactions_manager_config: TransactionsManagerConfig, ) -> NetworkBuilder, Eth> { - let NetworkBuilder { mut network, request_handler, .. } = self; + let Self { mut network, request_handler, .. } = self; let (tx, rx) = mpsc::unbounded_channel(); network.set_transactions(tx); let handle = network.handle().clone(); @@ -70,7 +70,7 @@ impl NetworkBuilder { self, client: Client, ) -> NetworkBuilder> { - let NetworkBuilder { mut network, transactions, .. } = self; + let Self { mut network, transactions, .. } = self; let (tx, rx) = mpsc::channel(ETH_REQUEST_CHANNEL_CAPACITY); network.set_eth_request_handler(tx); let peers = network.handle().peers_handle().clone(); diff --git a/crates/net/network/src/cache.rs b/crates/net/network/src/cache.rs index 2be4180d4886b..8f33127e7baee 100644 --- a/crates/net/network/src/cache.rs +++ b/crates/net/network/src/cache.rs @@ -152,7 +152,7 @@ where { /// Returns a new cache with default limiter and hash builder. pub fn new(max_length: u32) -> Self { - LruMap(schnellru::LruMap::new(ByLength::new(max_length))) + Self(schnellru::LruMap::new(ByLength::new(max_length))) } } @@ -162,7 +162,7 @@ where { /// Returns a new cache with [`Unlimited`] limiter and default hash builder. pub fn new_unlimited() -> Self { - LruMap(schnellru::LruMap::new(Unlimited)) + Self(schnellru::LruMap::new(Unlimited)) } } diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 368f958b2a309..6a5c16777790d 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -566,7 +566,7 @@ pub enum NetworkMode { impl NetworkMode { /// Returns true if network has entered proof-of-stake pub fn is_stake(&self) -> bool { - matches!(self, NetworkMode::Stake) + matches!(self, Self::Stake) } } diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 2bfa9f9c108a7..95cb5c6305fb4 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -21,8 +21,8 @@ impl ServiceKind { /// Returns the appropriate flags for each variant. pub fn flags(&self) -> &'static str { match self { - ServiceKind::Listener(_) => "--port", - ServiceKind::Discovery(_) => "--discovery.port", + Self::Listener(_) => "--port", + Self::Discovery(_) => "--discovery.port", } } } @@ -30,8 +30,8 @@ impl ServiceKind { impl fmt::Display for ServiceKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - ServiceKind::Listener(addr) => write!(f, "{addr} (listener service)"), - ServiceKind::Discovery(addr) => write!(f, "{addr} (discovery service)"), + Self::Listener(addr) => write!(f, "{addr} (listener service)"), + Self::Discovery(addr) => write!(f, "{addr} (discovery service)"), } } } @@ -67,12 +67,12 @@ impl NetworkError { /// Converts a `std::io::Error` to a more descriptive `NetworkError`. pub fn from_io_error(err: io::Error, kind: ServiceKind) -> Self { match err.kind() { - ErrorKind::AddrInUse => NetworkError::AddressAlreadyInUse { kind, error: err }, + ErrorKind::AddrInUse => Self::AddressAlreadyInUse { kind, error: err }, _ => { if let ServiceKind::Discovery(_) = kind { - return NetworkError::Discovery(err) + return Self::Discovery(err) } - NetworkError::Io(err) + Self::Io(err) } } } @@ -128,27 +128,27 @@ pub enum BackoffKind { impl BackoffKind { /// Returns true if the backoff is considered severe. pub(crate) fn is_severe(&self) -> bool { - matches!(self, BackoffKind::Medium | BackoffKind::High) + matches!(self, Self::Medium | Self::High) } } impl SessionError for EthStreamError { fn merits_discovery_ban(&self) -> bool { match self { - EthStreamError::P2PStreamError(P2PStreamError::HandshakeError( + Self::P2PStreamError(P2PStreamError::HandshakeError( P2PHandshakeError::HelloNotInHandshake, )) | - EthStreamError::P2PStreamError(P2PStreamError::HandshakeError( + Self::P2PStreamError(P2PStreamError::HandshakeError( P2PHandshakeError::NonHelloMessageInHandshake, )) => true, - EthStreamError::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse), + Self::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse), _ => false, } } fn is_fatal_protocol_error(&self) -> bool { match self { - EthStreamError::P2PStreamError(err) => { + Self::P2PStreamError(err) => { matches!( err, P2PStreamError::HandshakeError(P2PHandshakeError::NoSharedCapabilities) | @@ -177,7 +177,7 @@ impl SessionError for EthStreamError { P2PStreamError::MismatchedProtocolVersion { .. } ) } - EthStreamError::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse), + Self::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse), _ => false, } } @@ -214,19 +214,17 @@ impl SessionError for EthStreamError { // [`SessionError::is_fatal_protocol_error`] match self { // timeouts - EthStreamError::EthHandshakeError(EthHandshakeError::NoResponse) | - EthStreamError::P2PStreamError(P2PStreamError::HandshakeError( - P2PHandshakeError::NoResponse, - )) | - EthStreamError::P2PStreamError(P2PStreamError::PingTimeout) => Some(BackoffKind::Low), + Self::EthHandshakeError(EthHandshakeError::NoResponse) | + Self::P2PStreamError(P2PStreamError::HandshakeError(P2PHandshakeError::NoResponse)) | + Self::P2PStreamError(P2PStreamError::PingTimeout) => Some(BackoffKind::Low), // malformed messages - EthStreamError::P2PStreamError(P2PStreamError::Rlp(_)) | - EthStreamError::P2PStreamError(P2PStreamError::UnknownReservedMessageId(_)) | - EthStreamError::P2PStreamError(P2PStreamError::UnknownDisconnectReason(_)) | - EthStreamError::P2PStreamError(P2PStreamError::MessageTooBig { .. }) | - EthStreamError::P2PStreamError(P2PStreamError::EmptyProtocolMessage) | - EthStreamError::P2PStreamError(P2PStreamError::PingerError(_)) | - EthStreamError::P2PStreamError(P2PStreamError::Snap(_)) => Some(BackoffKind::Medium), + Self::P2PStreamError(P2PStreamError::Rlp(_)) | + Self::P2PStreamError(P2PStreamError::UnknownReservedMessageId(_)) | + Self::P2PStreamError(P2PStreamError::UnknownDisconnectReason(_)) | + Self::P2PStreamError(P2PStreamError::MessageTooBig { .. }) | + Self::P2PStreamError(P2PStreamError::EmptyProtocolMessage) | + Self::P2PStreamError(P2PStreamError::PingerError(_)) | + Self::P2PStreamError(P2PStreamError::Snap(_)) => Some(BackoffKind::Medium), _ => None, } } @@ -235,25 +233,25 @@ impl SessionError for EthStreamError { impl SessionError for PendingSessionHandshakeError { fn merits_discovery_ban(&self) -> bool { match self { - PendingSessionHandshakeError::Eth(eth) => eth.merits_discovery_ban(), - PendingSessionHandshakeError::Ecies(_) => true, - PendingSessionHandshakeError::Timeout => false, + Self::Eth(eth) => eth.merits_discovery_ban(), + Self::Ecies(_) => true, + Self::Timeout => false, } } fn is_fatal_protocol_error(&self) -> bool { match self { - PendingSessionHandshakeError::Eth(eth) => eth.is_fatal_protocol_error(), - PendingSessionHandshakeError::Ecies(_) => true, - PendingSessionHandshakeError::Timeout => false, + Self::Eth(eth) => eth.is_fatal_protocol_error(), + Self::Ecies(_) => true, + Self::Timeout => false, } } fn should_backoff(&self) -> Option { match self { - PendingSessionHandshakeError::Eth(eth) => eth.should_backoff(), - PendingSessionHandshakeError::Ecies(_) => Some(BackoffKind::Low), - PendingSessionHandshakeError::Timeout => Some(BackoffKind::Medium), + Self::Eth(eth) => eth.should_backoff(), + Self::Ecies(_) => Some(BackoffKind::Low), + Self::Timeout => Some(BackoffKind::Medium), } } } diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 3a5ebf14b5130..9844d2f91b5b9 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -372,7 +372,7 @@ enum PeerState { impl PeerState { /// Returns true if the peer is currently idle. fn is_idle(&self) -> bool { - matches!(self, PeerState::Idle) + matches!(self, Self::Idle) } /// Resets the state on a received response. @@ -381,8 +381,8 @@ impl PeerState { /// /// Returns `true` if the peer is ready for another request. fn on_request_finished(&mut self) -> bool { - if !matches!(self, PeerState::Closing) { - *self = PeerState::Idle; + if !matches!(self, Self::Closing) { + *self = Self::Idle; return true } false @@ -423,16 +423,16 @@ impl DownloadRequest { /// Returns the corresponding state for a peer that handles the request. fn peer_state(&self) -> PeerState { match self { - DownloadRequest::GetBlockHeaders { .. } => PeerState::GetBlockHeaders, - DownloadRequest::GetBlockBodies { .. } => PeerState::GetBlockBodies, + Self::GetBlockHeaders { .. } => PeerState::GetBlockHeaders, + Self::GetBlockBodies { .. } => PeerState::GetBlockBodies, } } /// Returns the requested priority of this request fn get_priority(&self) -> &Priority { match self { - DownloadRequest::GetBlockHeaders { priority, .. } => priority, - DownloadRequest::GetBlockBodies { priority, .. } => priority, + Self::GetBlockHeaders { priority, .. } => priority, + Self::GetBlockBodies { priority, .. } => priority, } } diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 861fb304e736a..3a067cbfdfd63 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -135,33 +135,33 @@ impl PeerRequest { /// Send an error back to the receiver. pub(crate) fn send_err_response(self, err: RequestError) { let _ = match self { - PeerRequest::GetBlockHeaders { response, .. } => response.send(Err(err)).ok(), - PeerRequest::GetBlockBodies { response, .. } => response.send(Err(err)).ok(), - PeerRequest::GetPooledTransactions { response, .. } => response.send(Err(err)).ok(), - PeerRequest::GetNodeData { response, .. } => response.send(Err(err)).ok(), - PeerRequest::GetReceipts { response, .. } => response.send(Err(err)).ok(), + Self::GetBlockHeaders { response, .. } => response.send(Err(err)).ok(), + Self::GetBlockBodies { response, .. } => response.send(Err(err)).ok(), + Self::GetPooledTransactions { response, .. } => response.send(Err(err)).ok(), + Self::GetNodeData { response, .. } => response.send(Err(err)).ok(), + Self::GetReceipts { response, .. } => response.send(Err(err)).ok(), }; } /// Returns the [`EthMessage`] for this type pub fn create_request_message(&self, request_id: u64) -> EthMessage { match self { - PeerRequest::GetBlockHeaders { request, .. } => { + Self::GetBlockHeaders { request, .. } => { EthMessage::GetBlockHeaders(RequestPair { request_id, message: *request }) } - PeerRequest::GetBlockBodies { request, .. } => { + Self::GetBlockBodies { request, .. } => { EthMessage::GetBlockBodies(RequestPair { request_id, message: request.clone() }) } - PeerRequest::GetPooledTransactions { request, .. } => { + Self::GetPooledTransactions { request, .. } => { EthMessage::GetPooledTransactions(RequestPair { request_id, message: request.clone(), }) } - PeerRequest::GetNodeData { request, .. } => { + Self::GetNodeData { request, .. } => { EthMessage::GetNodeData(RequestPair { request_id, message: request.clone() }) } - PeerRequest::GetReceipts { request, .. } => { + Self::GetReceipts { request, .. } => { EthMessage::GetReceipts(RequestPair { request_id, message: request.clone() }) } } @@ -170,7 +170,7 @@ impl PeerRequest { /// Consumes the type and returns the inner [`GetPooledTransactions`] variant. pub fn into_get_pooled_transactions(self) -> Option { match self { - PeerRequest::GetPooledTransactions { request, .. } => Some(request), + Self::GetPooledTransactions { request, .. } => Some(request), _ => None, } } @@ -221,19 +221,19 @@ impl PeerResponse { } let res = match self { - PeerResponse::BlockHeaders { response } => { + Self::BlockHeaders { response } => { poll_request!(response, BlockHeaders, cx) } - PeerResponse::BlockBodies { response } => { + Self::BlockBodies { response } => { poll_request!(response, BlockBodies, cx) } - PeerResponse::PooledTransactions { response } => { + Self::PooledTransactions { response } => { poll_request!(response, PooledTransactions, cx) } - PeerResponse::NodeData { response } => { + Self::NodeData { response } => { poll_request!(response, NodeData, cx) } - PeerResponse::Receipts { response } => { + Self::Receipts { response } => { poll_request!(response, Receipts, cx) } }; @@ -273,19 +273,19 @@ impl PeerResponseResult { }; } match self { - PeerResponseResult::BlockHeaders(resp) => { + Self::BlockHeaders(resp) => { to_message!(resp, BlockHeaders, id) } - PeerResponseResult::BlockBodies(resp) => { + Self::BlockBodies(resp) => { to_message!(resp, BlockBodies, id) } - PeerResponseResult::PooledTransactions(resp) => { + Self::PooledTransactions(resp) => { to_message!(resp, PooledTransactions, id) } - PeerResponseResult::NodeData(resp) => { + Self::NodeData(resp) => { to_message!(resp, NodeData, id) } - PeerResponseResult::Receipts(resp) => { + Self::Receipts(resp) => { to_message!(resp, Receipts, id) } } @@ -294,11 +294,11 @@ impl PeerResponseResult { /// Returns the `Err` value if the result is an error. pub fn err(&self) -> Option<&RequestError> { match self { - PeerResponseResult::BlockHeaders(res) => res.as_ref().err(), - PeerResponseResult::BlockBodies(res) => res.as_ref().err(), - PeerResponseResult::PooledTransactions(res) => res.as_ref().err(), - PeerResponseResult::NodeData(res) => res.as_ref().err(), - PeerResponseResult::Receipts(res) => res.as_ref().err(), + Self::BlockHeaders(res) => res.as_ref().err(), + Self::BlockBodies(res) => res.as_ref().err(), + Self::PooledTransactions(res) => res.as_ref().err(), + Self::NodeData(res) => res.as_ref().err(), + Self::Receipts(res) => res.as_ref().err(), } } diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index e13b080afae71..e1ccb13be9e25 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -915,7 +915,7 @@ impl PeersManager { impl Default for PeersManager { fn default() -> Self { - PeersManager::new(Default::default()) + Self::new(Default::default()) } } @@ -1002,7 +1002,7 @@ impl ConnectionInfo { impl Default for ConnectionInfo { fn default() -> Self { - ConnectionInfo { + Self { num_outbound: 0, num_inbound: 0, max_outbound: DEFAULT_MAX_COUNT_PEERS_OUTBOUND as usize, @@ -1161,8 +1161,8 @@ impl PeerConnectionState { #[inline] fn disconnect(&mut self) { match self { - PeerConnectionState::In => *self = PeerConnectionState::DisconnectingIn, - PeerConnectionState::Out => *self = PeerConnectionState::DisconnectingOut, + Self::In => *self = Self::DisconnectingIn, + Self::Out => *self = Self::DisconnectingOut, _ => {} } } @@ -1170,28 +1170,25 @@ impl PeerConnectionState { /// Returns true if this is an active incoming connection. #[inline] fn is_incoming(&self) -> bool { - matches!(self, PeerConnectionState::In) + matches!(self, Self::In) } /// Returns whether we're currently connected with this peer #[inline] fn is_connected(&self) -> bool { - matches!( - self, - PeerConnectionState::In | PeerConnectionState::Out | PeerConnectionState::PendingOut - ) + matches!(self, Self::In | Self::Out | Self::PendingOut) } /// Returns if there's currently no connection to that peer. #[inline] fn is_unconnected(&self) -> bool { - matches!(self, PeerConnectionState::Idle) + matches!(self, Self::Idle) } /// Returns true if there's currently an outbound dial to that peer. #[inline] fn is_pending_out(&self) -> bool { - matches!(self, PeerConnectionState::PendingOut) + matches!(self, Self::PendingOut) } } @@ -1501,7 +1498,7 @@ impl PeerBackoffDurations { /// Returns durations for testing. #[cfg(test)] const fn test() -> Self { - PeerBackoffDurations { + Self { low: Duration::from_millis(200), medium: Duration::from_millis(200), high: Duration::from_millis(200), diff --git a/crates/net/network/src/peers/reputation.rs b/crates/net/network/src/peers/reputation.rs index 1a335adc69c4a..ad87446d089d5 100644 --- a/crates/net/network/src/peers/reputation.rs +++ b/crates/net/network/src/peers/reputation.rs @@ -135,6 +135,6 @@ impl From for Reputation { impl From for ReputationChange { fn from(value: Reputation) -> Self { - ReputationChange(value) + Self(value) } } diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index ce726a78a24e7..7fed5b311aecd 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -713,8 +713,8 @@ enum OnIncomingMessageOutcome { impl From> for OnIncomingMessageOutcome { fn from(res: Result<(), ActiveSessionMessage>) -> Self { match res { - Ok(_) => OnIncomingMessageOutcome::Ok, - Err(msg) => OnIncomingMessageOutcome::NoCapacity(msg), + Ok(_) => Self::Ok, + Err(msg) => Self::NoCapacity(msg), } } } @@ -736,13 +736,13 @@ pub(crate) enum OutgoingMessage { impl From for OutgoingMessage { fn from(value: EthMessage) -> Self { - OutgoingMessage::Eth(value) + Self::Eth(value) } } impl From for OutgoingMessage { fn from(value: EthBroadcastMessage) -> Self { - OutgoingMessage::Broadcast(value) + Self::Broadcast(value) } } diff --git a/crates/net/network/src/session/config.rs b/crates/net/network/src/session/config.rs index 7c21d232c70df..e285d2b7ae3fc 100644 --- a/crates/net/network/src/session/config.rs +++ b/crates/net/network/src/session/config.rs @@ -57,7 +57,7 @@ pub struct SessionsConfig { impl Default for SessionsConfig { fn default() -> Self { - SessionsConfig { + Self { // This should be sufficient to slots for handling commands sent to the session task, // since the manager is the sender. session_command_buffer: 32, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 95f426c5424d3..5cbbc0dfec29b 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -755,7 +755,7 @@ impl PendingSessionHandshakeError { /// Returns the [`DisconnectReason`] if the error is a disconnect message pub fn as_disconnected(&self) -> Option { match self { - PendingSessionHandshakeError::Eth(eth_err) => eth_err.as_disconnected(), + Self::Eth(eth_err) => eth_err.as_disconnected(), _ => None, } } diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 11ac5949aaebd..729c5ff8a4ae0 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -443,11 +443,11 @@ pub enum NetworkConnectionState { impl NetworkConnectionState { /// Returns true if the node is active. pub(crate) fn is_active(&self) -> bool { - matches!(self, NetworkConnectionState::Active) + matches!(self, Self::Active) } /// Returns true if the node is shutting down. pub(crate) fn is_shutting_down(&self) -> bool { - matches!(self, NetworkConnectionState::ShuttingDown) + matches!(self, Self::ShuttingDown) } } diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 99c98db55d5dc..8fd3ac9c7ef6f 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -227,7 +227,7 @@ impl Testnet { /// Creates a new [`Testnet`] with the given number of peers pub async fn try_create(num_peers: usize) -> Result { - let mut this = Testnet::default(); + let mut this = Self::default(); this.extend_peer_with_config((0..num_peers).map(|_| Default::default())).await?; Ok(this) @@ -531,7 +531,7 @@ where { /// Launches the network and returns the [Peer] that manages it pub async fn launch(self) -> Result, NetworkError> { - let PeerConfig { config, client, secret_key } = self; + let Self { config, client, secret_key } = self; let network = NetworkManager::new(config).await?; let peer = Peer { network, diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index e10cee9bd1185..91ae18442257c 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -124,7 +124,7 @@ impl TransactionFetcher { /// Sets up transaction fetcher with config pub fn with_transaction_fetcher_config(config: &TransactionFetcherConfig) -> Self { - let mut tx_fetcher = TransactionFetcher::default(); + let mut tx_fetcher = Self::default(); tx_fetcher.info.soft_limit_byte_size_pooled_transactions_response = config.soft_limit_byte_size_pooled_transactions_response; diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 7a0fe600da547..545c6f3bb237a 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1437,8 +1437,8 @@ impl PooledTransactionsHashesBuilder { /// Push a transaction from the pool to the list. fn push_pooled(&mut self, pooled_tx: Arc>) { match self { - PooledTransactionsHashesBuilder::Eth66(msg) => msg.0.push(*pooled_tx.hash()), - PooledTransactionsHashesBuilder::Eth68(msg) => { + Self::Eth66(msg) => msg.0.push(*pooled_tx.hash()), + Self::Eth68(msg) => { msg.hashes.push(*pooled_tx.hash()); msg.sizes.push(pooled_tx.encoded_length()); msg.types.push(pooled_tx.transaction.tx_type()); @@ -1448,8 +1448,8 @@ impl PooledTransactionsHashesBuilder { fn push(&mut self, tx: &PropagateTransaction) { match self { - PooledTransactionsHashesBuilder::Eth66(msg) => msg.0.push(tx.hash()), - PooledTransactionsHashesBuilder::Eth68(msg) => { + Self::Eth66(msg) => msg.0.push(tx.hash()), + Self::Eth68(msg) => { msg.hashes.push(tx.hash()); msg.sizes.push(tx.size); msg.types.push(tx.transaction.tx_type().into()); @@ -1460,17 +1460,15 @@ impl PooledTransactionsHashesBuilder { /// Create a builder for the negotiated version of the peer's session fn new(version: EthVersion) -> Self { match version { - EthVersion::Eth66 | EthVersion::Eth67 => { - PooledTransactionsHashesBuilder::Eth66(Default::default()) - } - EthVersion::Eth68 => PooledTransactionsHashesBuilder::Eth68(Default::default()), + EthVersion::Eth66 | EthVersion::Eth67 => Self::Eth66(Default::default()), + EthVersion::Eth68 => Self::Eth68(Default::default()), } } fn build(self) -> NewPooledTransactionHashes { match self { - PooledTransactionsHashesBuilder::Eth66(msg) => msg.into(), - PooledTransactionsHashesBuilder::Eth68(msg) => msg.into(), + Self::Eth66(msg) => msg.into(), + Self::Eth68(msg) => msg.into(), } } } @@ -1488,7 +1486,7 @@ enum TransactionSource { impl TransactionSource { /// Whether the transaction were sent as broadcast. fn is_broadcast(&self) -> bool { - matches!(self, TransactionSource::Broadcast) + matches!(self, Self::Broadcast) } } diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 2b32b70097f67..741745a43f510 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -13,8 +13,8 @@ impl BlockResponse { /// Return the reference to the response header pub fn header(&self) -> &SealedHeader { match self { - BlockResponse::Full(block) => &block.header, - BlockResponse::Empty(header) => header, + Self::Full(block) => &block.header, + Self::Empty(header) => header, } } @@ -22,8 +22,8 @@ impl BlockResponse { #[inline] pub fn size(&self) -> usize { match self { - BlockResponse::Full(block) => SealedBlock::size(block), - BlockResponse::Empty(header) => SealedHeader::size(header), + Self::Full(block) => SealedBlock::size(block), + Self::Empty(header) => SealedHeader::size(header), } } @@ -35,8 +35,8 @@ impl BlockResponse { /// Return the reference to the response header pub fn difficulty(&self) -> U256 { match self { - BlockResponse::Full(block) => block.difficulty, - BlockResponse::Empty(header) => header.difficulty, + Self::Full(block) => block.difficulty, + Self::Empty(header) => header.difficulty, } } } diff --git a/crates/net/p2p/src/either.rs b/crates/net/p2p/src/either.rs index 36e95d487a6d8..e8017e880feb6 100644 --- a/crates/net/p2p/src/either.rs +++ b/crates/net/p2p/src/either.rs @@ -17,14 +17,14 @@ where { fn report_bad_message(&self, peer_id: reth_network_types::PeerId) { match self { - Either::Left(a) => a.report_bad_message(peer_id), - Either::Right(b) => b.report_bad_message(peer_id), + Self::Left(a) => a.report_bad_message(peer_id), + Self::Right(b) => b.report_bad_message(peer_id), } } fn num_connected_peers(&self) -> usize { match self { - Either::Left(a) => a.num_connected_peers(), - Either::Right(b) => b.num_connected_peers(), + Self::Left(a) => a.num_connected_peers(), + Self::Right(b) => b.num_connected_peers(), } } } @@ -42,8 +42,8 @@ where priority: Priority, ) -> Self::Output { match self { - Either::Left(a) => Either::Left(a.get_block_bodies_with_priority(hashes, priority)), - Either::Right(b) => Either::Right(b.get_block_bodies_with_priority(hashes, priority)), + Self::Left(a) => Either::Left(a.get_block_bodies_with_priority(hashes, priority)), + Self::Right(b) => Either::Right(b.get_block_bodies_with_priority(hashes, priority)), } } } @@ -61,8 +61,8 @@ where priority: Priority, ) -> Self::Output { match self { - Either::Left(a) => Either::Left(a.get_headers_with_priority(request, priority)), - Either::Right(b) => Either::Right(b.get_headers_with_priority(request, priority)), + Self::Left(a) => Either::Left(a.get_headers_with_priority(request, priority)), + Self::Right(b) => Either::Right(b.get_headers_with_priority(request, priority)), } } } diff --git a/crates/net/p2p/src/error.rs b/crates/net/p2p/src/error.rs index 3bd469e605649..450d8ce97e4c8 100644 --- a/crates/net/p2p/src/error.rs +++ b/crates/net/p2p/src/error.rs @@ -105,24 +105,24 @@ pub enum RequestError { impl RequestError { /// Indicates whether this error is retryable or fatal. pub fn is_retryable(&self) -> bool { - matches!(self, RequestError::Timeout | RequestError::ConnectionDropped) + matches!(self, Self::Timeout | Self::ConnectionDropped) } /// Whether the error happened because the channel was closed. pub fn is_channel_closed(&self) -> bool { - matches!(self, RequestError::ChannelClosed) + matches!(self, Self::ChannelClosed) } } impl From> for RequestError { fn from(_: mpsc::error::SendError) -> Self { - RequestError::ChannelClosed + Self::ChannelClosed } } impl From for RequestError { fn from(_: oneshot::error::RecvError) -> Self { - RequestError::ChannelClosed + Self::ChannelClosed } } diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index b52a8487710fa..16d5c1af858d6 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -61,9 +61,9 @@ impl SyncTarget { /// header in [SyncTarget::Gap] pub fn tip(&self) -> BlockHashOrNumber { match self { - SyncTarget::Tip(tip) => (*tip).into(), - SyncTarget::Gap(gap) => gap.parent_hash.into(), - SyncTarget::TipNum(num) => (*num).into(), + Self::Tip(tip) => (*tip).into(), + Self::Gap(gap) => gap.parent_hash.into(), + Self::TipNum(num) => (*num).into(), } } } diff --git a/crates/net/p2p/src/priority.rs b/crates/net/p2p/src/priority.rs index 5932b4496b694..38be9ead4ec3f 100644 --- a/crates/net/p2p/src/priority.rs +++ b/crates/net/p2p/src/priority.rs @@ -12,11 +12,11 @@ pub enum Priority { impl Priority { /// Returns `true` if this is [Priority::High] pub fn is_high(&self) -> bool { - matches!(self, Priority::High) + matches!(self, Self::High) } /// Returns `true` if this is [Priority::Normal] pub fn is_normal(&self) -> bool { - matches!(self, Priority::Normal) + matches!(self, Self::Normal) } } diff --git a/crates/net/p2p/src/sync.rs b/crates/net/p2p/src/sync.rs index 729271b578586..5b3dd62e3302a 100644 --- a/crates/net/p2p/src/sync.rs +++ b/crates/net/p2p/src/sync.rs @@ -45,7 +45,7 @@ impl SyncState { /// /// Note: this does not include keep-up sync when the state is idle. pub fn is_syncing(&self) -> bool { - !matches!(self, SyncState::Idle) + !matches!(self, Self::Idle) } } diff --git a/crates/net/types/src/lib.rs b/crates/net/types/src/lib.rs index e4b9f28a4fdcf..dfcd4a5c21832 100644 --- a/crates/net/types/src/lib.rs +++ b/crates/net/types/src/lib.rs @@ -68,17 +68,17 @@ impl AnyNode { /// Returns the peer id of the node. pub fn peer_id(&self) -> PeerId { match self { - AnyNode::NodeRecord(record) => record.id, - AnyNode::Enr(enr) => pk2id(&enr.public_key()), - AnyNode::PeerId(peer_id) => *peer_id, + Self::NodeRecord(record) => record.id, + Self::Enr(enr) => pk2id(&enr.public_key()), + Self::PeerId(peer_id) => *peer_id, } } /// Returns the full node record if available. pub fn node_record(&self) -> Option { match self { - AnyNode::NodeRecord(record) => Some(*record), - AnyNode::Enr(enr) => { + Self::NodeRecord(record) => Some(*record), + Self::Enr(enr) => { let node_record = NodeRecord { address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, @@ -111,11 +111,11 @@ impl FromStr for AnyNode { fn from_str(s: &str) -> Result { if let Some(rem) = s.strip_prefix("enode://") { if let Ok(record) = NodeRecord::from_str(s) { - return Ok(AnyNode::NodeRecord(record)) + return Ok(Self::NodeRecord(record)) } // incomplete enode if let Ok(peer_id) = PeerId::from_str(rem) { - return Ok(AnyNode::PeerId(peer_id)) + return Ok(Self::PeerId(peer_id)) } return Err(format!("invalid public key: {rem}")) } @@ -129,9 +129,9 @@ impl FromStr for AnyNode { impl std::fmt::Display for AnyNode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - AnyNode::NodeRecord(record) => write!(f, "{record}"), - AnyNode::Enr(enr) => write!(f, "{enr}"), - AnyNode::PeerId(peer_id) => { + Self::NodeRecord(record) => write!(f, "{record}"), + Self::Enr(enr) => write!(f, "{enr}"), + Self::PeerId(peer_id) => { write!(f, "enode://{}", alloy_primitives::hex::encode(peer_id.as_slice())) } } diff --git a/crates/net/types/src/node_record.rs b/crates/net/types/src/node_record.rs index 5a6706201a73b..f16d43dd1c310 100644 --- a/crates/net/types/src/node_record.rs +++ b/crates/net/types/src/node_record.rs @@ -189,7 +189,7 @@ impl TryFrom<&Enr> for NodeRecord { let id = pk2id(&enr.public_key()); - Ok(NodeRecord { address, tcp_port, udp_port, id }.into_ipv4_mapped()) + Ok(Self { address, tcp_port, udp_port, id }.into_ipv4_mapped()) } } diff --git a/crates/node-core/src/args/log.rs b/crates/node-core/src/args/log.rs index e5475e2697a85..b7e69e663be08 100644 --- a/crates/node-core/src/args/log.rs +++ b/crates/node-core/src/args/log.rs @@ -127,9 +127,9 @@ pub enum ColorMode { impl Display for ColorMode { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - ColorMode::Always => write!(f, "always"), - ColorMode::Auto => write!(f, "auto"), - ColorMode::Never => write!(f, "never"), + Self::Always => write!(f, "always"), + Self::Auto => write!(f, "auto"), + Self::Never => write!(f, "never"), } } } diff --git a/crates/node-core/src/cli/config.rs b/crates/node-core/src/cli/config.rs index 6e5d1f6a2a8b6..0fb8fbf4182dd 100644 --- a/crates/node-core/src/cli/config.rs +++ b/crates/node-core/src/cli/config.rs @@ -122,7 +122,7 @@ pub trait RethNetworkConfig { impl RethNetworkConfig for reth_network::NetworkManager { fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { - reth_network::NetworkManager::add_rlpx_sub_protocol(self, protocol); + Self::add_rlpx_sub_protocol(self, protocol); } fn secret_key(&self) -> secp256k1::SecretKey { diff --git a/crates/node-core/src/dirs.rs b/crates/node-core/src/dirs.rs index b33df18f26f34..86f6a3fd5f376 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node-core/src/dirs.rs @@ -148,8 +148,8 @@ impl From> for PathBuf { impl PlatformPath { /// Returns the path joined with another path - pub fn join>(&self, path: P) -> PlatformPath { - PlatformPath::(self.0.join(path), std::marker::PhantomData) + pub fn join>(&self, path: P) -> Self { + Self(self.0.join(path), std::marker::PhantomData) } } @@ -161,7 +161,7 @@ impl PlatformPath { let path = self.0.join(chain_name); - let platform_path = PlatformPath::(path, std::marker::PhantomData); + let platform_path = Self(path, std::marker::PhantomData); ChainPath::new(platform_path, chain) } diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index c28e435798497..3fd29085ec717 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -164,7 +164,7 @@ impl NodeBuilder { self, task_executor: TaskExecutor, data_dir: ChainPath, - ) -> WithLaunchContext> { + ) -> WithLaunchContext { WithLaunchContext { builder: self, task_executor, data_dir } } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 8c01c0a737a76..87da96a31d9f8 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -398,38 +398,38 @@ pub enum NodeEvent { } impl From for NodeEvent { - fn from(event: NetworkEvent) -> NodeEvent { - NodeEvent::Network(event) + fn from(event: NetworkEvent) -> Self { + Self::Network(event) } } impl From for NodeEvent { - fn from(event: PipelineEvent) -> NodeEvent { - NodeEvent::Pipeline(event) + fn from(event: PipelineEvent) -> Self { + Self::Pipeline(event) } } impl From for NodeEvent { fn from(event: BeaconConsensusEngineEvent) -> Self { - NodeEvent::ConsensusEngine(event) + Self::ConsensusEngine(event) } } impl From for NodeEvent { fn from(event: ConsensusLayerHealthEvent) -> Self { - NodeEvent::ConsensusLayerHealth(event) + Self::ConsensusLayerHealth(event) } } impl From for NodeEvent { fn from(event: PrunerEvent) -> Self { - NodeEvent::Pruner(event) + Self::Pruner(event) } } impl From for NodeEvent { fn from(event: StaticFileProducerEvent) -> Self { - NodeEvent::StaticFileProducer(event) + Self::StaticFileProducer(event) } } diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 1041f30c8112d..57958b5ec52aa 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -24,6 +24,6 @@ pub enum OptimismBlockExecutionError { impl From for BlockExecutionError { fn from(err: OptimismBlockExecutionError) -> Self { - BlockExecutionError::other(err) + Self::other(err) } } diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 515e1d8eb5732..d3d004a193333 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -35,7 +35,7 @@ impl ToRpcError for SequencerRpcError { impl From for EthApiError { fn from(err: SequencerRpcError) -> Self { - EthApiError::other(err) + Self::other(err) } } @@ -108,7 +108,7 @@ impl SequencerClient { #[async_trait::async_trait] impl RawTransactionForwarder for SequencerClient { async fn forward_raw_transaction(&self, tx: &[u8]) -> EthResult<()> { - SequencerClient::forward_raw_transaction(self, tx).await?; + Self::forward_raw_transaction(self, tx).await?; Ok(()) } } diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 738246bf8c4ce..0314bcc847234 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -239,10 +239,7 @@ impl From for ExecutionPayloadEnvelopeV2 { fn from(value: OptimismBuiltPayload) -> Self { let OptimismBuiltPayload { block, fees, .. } = value; - ExecutionPayloadEnvelopeV2 { - block_value: fees, - execution_payload: convert_block_to_payload_field_v2(block), - } + Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } } } @@ -256,7 +253,7 @@ impl From for OptimismExecutionPayloadEnvelopeV3 { } else { B256::ZERO }; - OptimismExecutionPayloadEnvelopeV3 { + Self { execution_payload: block_to_payload_v3(block).0, block_value: fees, // From the engine API spec: @@ -283,7 +280,7 @@ impl From for OptimismExecutionPayloadEnvelopeV4 { } else { B256::ZERO }; - OptimismExecutionPayloadEnvelopeV4 { + Self { execution_payload: block_to_payload_v4(block), block_value: fees, // From the engine API spec: diff --git a/crates/payload/builder/src/error.rs b/crates/payload/builder/src/error.rs index af95b279b56de..a7aa7e88be9a4 100644 --- a/crates/payload/builder/src/error.rs +++ b/crates/payload/builder/src/error.rs @@ -37,18 +37,18 @@ impl PayloadBuilderError { where E: std::error::Error + Send + Sync + 'static, { - PayloadBuilderError::Other(Box::new(error)) + Self::Other(Box::new(error)) } } impl From for PayloadBuilderError { fn from(error: ProviderError) -> Self { - PayloadBuilderError::Internal(RethError::Provider(error)) + Self::Internal(RethError::Provider(error)) } } impl From for PayloadBuilderError { fn from(_: oneshot::error::RecvError) -> Self { - PayloadBuilderError::ChannelClosed + Self::ChannelClosed } } diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 81d3445b02c82..5228083ab7fc2 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -479,17 +479,17 @@ where { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - PayloadServiceCommand::BuildNewPayload(f0, f1) => { + Self::BuildNewPayload(f0, f1) => { f.debug_tuple("BuildNewPayload").field(&f0).field(&f1).finish() } - PayloadServiceCommand::BestPayload(f0, f1) => { + Self::BestPayload(f0, f1) => { f.debug_tuple("BestPayload").field(&f0).field(&f1).finish() } - PayloadServiceCommand::PayloadAttributes(f0, f1) => { + Self::PayloadAttributes(f0, f1) => { f.debug_tuple("PayloadAttributes").field(&f0).field(&f1).finish() } - PayloadServiceCommand::Resolve(f0, _f1) => f.debug_tuple("Resolve").field(&f0).finish(), - PayloadServiceCommand::Subscribe(f0) => f.debug_tuple("Subscribe").field(&f0).finish(), + Self::Resolve(f0, _f1) => f.debug_tuple("Resolve").field(&f0).finish(), + Self::Subscribe(f0) => f.debug_tuple("Subscribe").field(&f0).finish(), } } } diff --git a/crates/primitives/benches/integer_list.rs b/crates/primitives/benches/integer_list.rs index 56b0e9e383d00..8ce985d13010c 100644 --- a/crates/primitives/benches/integer_list.rs +++ b/crates/primitives/benches/integer_list.rs @@ -212,7 +212,7 @@ mod elias_fano { } impl<'de> Deserialize<'de> for IntegerList { - fn deserialize(deserializer: D) -> Result + fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs index 78e796147f1f1..1bc691ec1549a 100644 --- a/crates/primitives/src/account.rs +++ b/crates/primitives/src/account.rs @@ -38,7 +38,7 @@ impl Account { /// Makes an [Account] from [GenesisAccount] type pub fn from_genesis_account(value: &GenesisAccount) -> Self { - Account { + Self { // nonce must exist, so we default to zero when converting a genesis account nonce: value.nonce.unwrap_or_default(), balance: value.balance, @@ -115,9 +115,9 @@ impl Compact for Bytecode { let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); let variant = buf.read_u8().expect("could not read bytecode variant"); let decoded = match variant { - 0 => Bytecode(RevmBytecode::new_raw(bytes)), + 0 => Self(RevmBytecode::new_raw(bytes)), 1 => unreachable!("Junk data in database: checked Bytecode variant was removed"), - 2 => Bytecode(unsafe { + 2 => Self(unsafe { RevmBytecode::new_analyzed( bytes, buf.read_u64::().unwrap() as usize, diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index bd68860d26e5e..b1b75f4db5723 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -122,7 +122,7 @@ impl TryFrom for Transaction { .into(), )) } - Ok(Transaction::Legacy(TxLegacy { + Ok(Self::Legacy(TxLegacy { chain_id: tx.chain_id, nonce: tx.nonce, gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, @@ -137,7 +137,7 @@ impl TryFrom for Transaction { } Some(TxType::Eip2930) => { // eip2930 - Ok(Transaction::Eip2930(TxEip2930 { + Ok(Self::Eip2930(TxEip2930 { chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, nonce: tx.nonce, gas_limit: tx @@ -153,7 +153,7 @@ impl TryFrom for Transaction { } Some(TxType::Eip1559) => { // EIP-1559 - Ok(Transaction::Eip1559(TxEip1559 { + Ok(Self::Eip1559(TxEip1559 { chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, nonce: tx.nonce, max_priority_fee_per_gas: tx @@ -174,7 +174,7 @@ impl TryFrom for Transaction { } Some(TxType::Eip4844) => { // EIP-4844 - Ok(Transaction::Eip4844(TxEip4844 { + Ok(Self::Eip4844(TxEip4844 { chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, nonce: tx.nonce, max_priority_fee_per_gas: tx @@ -215,7 +215,7 @@ impl TryFrom for TransactionSigned { let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; let transaction: Transaction = tx.try_into()?; - Ok(TransactionSigned::from_transaction_and_signature( + Ok(Self::from_transaction_and_signature( transaction.clone(), Signature { r: signature.r, diff --git a/crates/primitives/src/chain/info.rs b/crates/primitives/src/chain/info.rs index fb954345b79ba..38b73e2768ae5 100644 --- a/crates/primitives/src/chain/info.rs +++ b/crates/primitives/src/chain/info.rs @@ -11,6 +11,6 @@ pub struct ChainInfo { impl From for BlockNumHash { fn from(value: ChainInfo) -> Self { - BlockNumHash { number: value.best_number, hash: value.best_hash } + Self { number: value.best_number, hash: value.best_hash } } } diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 3c166b2c7206e..e90b314e41ea6 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -468,13 +468,13 @@ pub enum BaseFeeParamsKind { impl From for BaseFeeParamsKind { fn from(params: BaseFeeParams) -> Self { - BaseFeeParamsKind::Constant(params) + Self::Constant(params) } } impl From for BaseFeeParamsKind { fn from(params: ForkBaseFeeParams) -> Self { - BaseFeeParamsKind::Variable(params) + Self::Variable(params) } } @@ -485,7 +485,7 @@ pub struct ForkBaseFeeParams(Vec<(Hardfork, BaseFeeParams)>); impl From> for ForkBaseFeeParams { fn from(params: Vec<(Hardfork, BaseFeeParams)>) -> Self { - ForkBaseFeeParams(params) + Self(params) } } @@ -533,8 +533,8 @@ pub struct ChainSpec { } impl Default for ChainSpec { - fn default() -> ChainSpec { - ChainSpec { + fn default() -> Self { + Self { chain: Default::default(), genesis_hash: Default::default(), genesis: Default::default(), @@ -1347,7 +1347,7 @@ pub enum ForkCondition { impl ForkCondition { /// Returns true if the fork condition is timestamp based. pub fn is_timestamp(&self) -> bool { - matches!(self, ForkCondition::Timestamp(_)) + matches!(self, Self::Timestamp(_)) } /// Checks whether the fork condition is satisfied at the given block. @@ -1356,15 +1356,15 @@ impl ForkCondition { /// /// For timestamp conditions, this will always return false. pub fn active_at_block(&self, current_block: BlockNumber) -> bool { - matches!(self, ForkCondition::Block(block) - | ForkCondition::TTD { fork_block: Some(block), .. } if current_block >= *block) + matches!(self, Self::Block(block) + | Self::TTD { fork_block: Some(block), .. } if current_block >= *block) } /// Checks if the given block is the first block that satisfies the fork condition. /// /// This will return false for any condition that is not block based. pub fn transitions_at_block(&self, current_block: BlockNumber) -> bool { - matches!(self, ForkCondition::Block(block) if current_block == *block) + matches!(self, Self::Block(block) if current_block == *block) } /// Checks whether the fork condition is satisfied at the given total difficulty and difficulty @@ -1377,7 +1377,7 @@ impl ForkCondition { /// /// This will return false for any condition that is not TTD-based. pub fn active_at_ttd(&self, ttd: U256, difficulty: U256) -> bool { - matches!(self, ForkCondition::TTD { total_difficulty, .. } + matches!(self, Self::TTD { total_difficulty, .. } if ttd.saturating_sub(difficulty) >= *total_difficulty) } @@ -1385,7 +1385,7 @@ impl ForkCondition { /// /// This will return false for any condition that is not timestamp-based. pub fn active_at_timestamp(&self, timestamp: u64) -> bool { - matches!(self, ForkCondition::Timestamp(time) if timestamp >= *time) + matches!(self, Self::Timestamp(time) if timestamp >= *time) } /// Checks whether the fork condition is satisfied at the given head block. @@ -1406,7 +1406,7 @@ impl ForkCondition { /// Returns `None` for fork conditions that are not TTD based. pub fn ttd(&self) -> Option { match self { - ForkCondition::TTD { total_difficulty, .. } => Some(*total_difficulty), + Self::TTD { total_difficulty, .. } => Some(*total_difficulty), _ => None, } } @@ -1414,7 +1414,7 @@ impl ForkCondition { /// Returns the timestamp of the fork condition, if it is timestamp based. pub fn as_timestamp(&self) -> Option { match self { - ForkCondition::Timestamp(timestamp) => Some(*timestamp), + Self::Timestamp(timestamp) => Some(*timestamp), _ => None, } } @@ -1602,7 +1602,7 @@ pub struct DepositContract { impl DepositContract { fn new(address: Address, block: BlockNumber, topic: B256) -> Self { - DepositContract { address, block, topic } + Self { address, block, topic } } } diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index b9574785af3df..7f48921fbdee1 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -111,7 +111,7 @@ pub struct Header { impl Default for Header { fn default() -> Self { - Header { + Self { parent_hash: Default::default(), ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: Default::default(), @@ -674,7 +674,7 @@ impl SealedHeader { #[inline(always)] fn validate_gas_limit( &self, - parent: &SealedHeader, + parent: &Self, chain_spec: &ChainSpec, ) -> Result<(), HeaderValidationError> { // Determine the parent gas limit, considering elasticity multiplier on the London fork. @@ -739,7 +739,7 @@ impl SealedHeader { /// of certain features (e.g., Optimism feature) or the activation of specific hardforks. pub fn validate_against_parent( &self, - parent: &SealedHeader, + parent: &Self, chain_spec: &ChainSpec, ) -> Result<(), HeaderValidationError> { // Parent number is consistent. @@ -826,7 +826,7 @@ impl SealedHeader { /// parent header fields. pub fn validate_4844_header_against_parent( &self, - parent: &SealedHeader, + parent: &Self, ) -> Result<(), HeaderValidationError> { // From [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension): // @@ -885,7 +885,7 @@ impl proptest::arbitrary::Arbitrary for SealedHeader { // map valid header strategy by sealing valid_header_strategy().prop_map(|header| header.seal_slow()).boxed() } - type Strategy = proptest::strategy::BoxedStrategy; + type Strategy = proptest::strategy::BoxedStrategy; } #[cfg(any(test, feature = "arbitrary"))] @@ -971,12 +971,12 @@ pub enum HeadersDirection { impl HeadersDirection { /// Returns true for rising block numbers pub fn is_rising(&self) -> bool { - matches!(self, HeadersDirection::Rising) + matches!(self, Self::Rising) } /// Returns true for falling block numbers pub fn is_falling(&self) -> bool { - matches!(self, HeadersDirection::Falling) + matches!(self, Self::Falling) } /// Converts the bool into a direction. @@ -987,9 +987,9 @@ impl HeadersDirection { /// [`HeadersDirection::Falling`] block numbers for `reverse == 1 == true` pub fn new(reverse: bool) -> Self { if reverse { - HeadersDirection::Falling + Self::Falling } else { - HeadersDirection::Rising + Self::Rising } } } diff --git a/crates/primitives/src/integer_list.rs b/crates/primitives/src/integer_list.rs index edcf310860a7e..33ddae4524a34 100644 --- a/crates/primitives/src/integer_list.rs +++ b/crates/primitives/src/integer_list.rs @@ -123,7 +123,7 @@ impl<'de> Visitor<'de> for IntegerListVisitor { } impl<'de> Deserialize<'de> for IntegerList { - fn deserialize(deserializer: D) -> Result + fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { diff --git a/crates/primitives/src/log.rs b/crates/primitives/src/log.rs index 628a20f831c2f..b2b6b8a4852ce 100644 --- a/crates/primitives/src/log.rs +++ b/crates/primitives/src/log.rs @@ -51,8 +51,8 @@ mod tests { } impl From for AlloyLog { - fn from(log: Log) -> AlloyLog { - AlloyLog::new_unchecked(log.address, log.topics, log.data) + fn from(log: Log) -> Self { + Self::new_unchecked(log.address, log.topics, log.data) } } diff --git a/crates/primitives/src/prune/mode.rs b/crates/primitives/src/prune/mode.rs index 3454573b9469e..46d38aa7103b4 100644 --- a/crates/primitives/src/prune/mode.rs +++ b/crates/primitives/src/prune/mode.rs @@ -29,16 +29,16 @@ impl PruneMode { tip: BlockNumber, segment: PruneSegment, purpose: PrunePurpose, - ) -> Result, PruneSegmentError> { + ) -> Result, PruneSegmentError> { let result = match self { - PruneMode::Full if segment.min_blocks(purpose) == 0 => Some((tip, *self)), - PruneMode::Distance(distance) if *distance > tip => None, // Nothing to prune yet - PruneMode::Distance(distance) if *distance >= segment.min_blocks(purpose) => { + Self::Full if segment.min_blocks(purpose) == 0 => Some((tip, *self)), + Self::Distance(distance) if *distance > tip => None, // Nothing to prune yet + Self::Distance(distance) if *distance >= segment.min_blocks(purpose) => { Some((tip - distance, *self)) } - PruneMode::Before(n) if *n == tip + 1 && purpose.is_static_file() => Some((tip, *self)), - PruneMode::Before(n) if *n > tip => None, // Nothing to prune yet - PruneMode::Before(n) if tip - n >= segment.min_blocks(purpose) => Some((n - 1, *self)), + Self::Before(n) if *n == tip + 1 && purpose.is_static_file() => Some((tip, *self)), + Self::Before(n) if *n > tip => None, // Nothing to prune yet + Self::Before(n) if tip - n >= segment.min_blocks(purpose) => Some((n - 1, *self)), _ => return Err(PruneSegmentError::Configuration(segment)), }; Ok(result) @@ -47,14 +47,14 @@ impl PruneMode { /// Check if target block should be pruned according to the provided prune mode and tip. pub fn should_prune(&self, block: BlockNumber, tip: BlockNumber) -> bool { match self { - PruneMode::Full => true, - PruneMode::Distance(distance) => { + Self::Full => true, + Self::Distance(distance) => { if *distance > tip { return false } block < tip - *distance } - PruneMode::Before(n) => *n > block, + Self::Before(n) => *n > block, } } diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 7f39c8d74ce3f..255976531fb79 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -48,7 +48,7 @@ pub struct PruneModes { impl PruneModes { /// Sets pruning to no target. pub fn none() -> Self { - PruneModes::default() + Self::default() } /// Sets pruning to all targets. diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 85470cb2e81f3..1e9a2ac1fcaa2 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -157,7 +157,7 @@ impl FromIterator>> for Receipts { impl From for ReceiptWithBloom { fn from(receipt: Receipt) -> Self { let bloom = receipt.bloom_slow(); - ReceiptWithBloom { receipt, bloom } + Self { receipt, bloom } } } @@ -245,7 +245,7 @@ impl proptest::arbitrary::Arbitrary for Receipt { arbitrary_receipt().boxed() } - type Strategy = proptest::strategy::BoxedStrategy; + type Strategy = proptest::strategy::BoxedStrategy; } #[cfg(any(test, feature = "arbitrary"))] diff --git a/crates/primitives/src/stage/checkpoints.rs b/crates/primitives/src/stage/checkpoints.rs index d9c10605c174d..a82a77dee8bd7 100644 --- a/crates/primitives/src/stage/checkpoints.rs +++ b/crates/primitives/src/stage/checkpoints.rs @@ -71,7 +71,7 @@ impl Compact for MerkleCheckpoint { } let (state, buf) = HashBuilderState::from_compact(buf, 0); - (MerkleCheckpoint { target_block, last_account_key, walker_stack, state }, buf) + (Self { target_block, last_account_key, walker_stack, state }, buf) } } diff --git a/crates/primitives/src/stage/id.rs b/crates/primitives/src/stage/id.rs index 2779c260801c7..f6b8508e03cfe 100644 --- a/crates/primitives/src/stage/id.rs +++ b/crates/primitives/src/stage/id.rs @@ -38,66 +38,66 @@ pub enum StageId { impl StageId { /// All supported Stages - pub const ALL: [StageId; 12] = [ - StageId::Headers, - StageId::Bodies, - StageId::SenderRecovery, - StageId::Execution, - StageId::MerkleUnwind, - StageId::AccountHashing, - StageId::StorageHashing, - StageId::MerkleExecute, - StageId::TransactionLookup, - StageId::IndexStorageHistory, - StageId::IndexAccountHistory, - StageId::Finish, + pub const ALL: [Self; 12] = [ + Self::Headers, + Self::Bodies, + Self::SenderRecovery, + Self::Execution, + Self::MerkleUnwind, + Self::AccountHashing, + Self::StorageHashing, + Self::MerkleExecute, + Self::TransactionLookup, + Self::IndexStorageHistory, + Self::IndexAccountHistory, + Self::Finish, ]; /// Stages that require state. - pub const STATE_REQUIRED: [StageId; 7] = [ - StageId::Execution, - StageId::MerkleUnwind, - StageId::AccountHashing, - StageId::StorageHashing, - StageId::MerkleExecute, - StageId::IndexStorageHistory, - StageId::IndexAccountHistory, + pub const STATE_REQUIRED: [Self; 7] = [ + Self::Execution, + Self::MerkleUnwind, + Self::AccountHashing, + Self::StorageHashing, + Self::MerkleExecute, + Self::IndexStorageHistory, + Self::IndexAccountHistory, ]; /// Return stage id formatted as string. pub fn as_str(&self) -> &str { match self { #[allow(deprecated)] - StageId::StaticFile => "StaticFile", - StageId::Headers => "Headers", - StageId::Bodies => "Bodies", - StageId::SenderRecovery => "SenderRecovery", - StageId::Execution => "Execution", - StageId::MerkleUnwind => "MerkleUnwind", - StageId::AccountHashing => "AccountHashing", - StageId::StorageHashing => "StorageHashing", - StageId::MerkleExecute => "MerkleExecute", - StageId::TransactionLookup => "TransactionLookup", - StageId::IndexAccountHistory => "IndexAccountHistory", - StageId::IndexStorageHistory => "IndexStorageHistory", - StageId::Finish => "Finish", - StageId::Other(s) => s, + Self::StaticFile => "StaticFile", + Self::Headers => "Headers", + Self::Bodies => "Bodies", + Self::SenderRecovery => "SenderRecovery", + Self::Execution => "Execution", + Self::MerkleUnwind => "MerkleUnwind", + Self::AccountHashing => "AccountHashing", + Self::StorageHashing => "StorageHashing", + Self::MerkleExecute => "MerkleExecute", + Self::TransactionLookup => "TransactionLookup", + Self::IndexAccountHistory => "IndexAccountHistory", + Self::IndexStorageHistory => "IndexStorageHistory", + Self::Finish => "Finish", + Self::Other(s) => s, } } /// Returns true if it's a downloading stage [StageId::Headers] or [StageId::Bodies] pub fn is_downloading_stage(&self) -> bool { - matches!(self, StageId::Headers | StageId::Bodies) + matches!(self, Self::Headers | Self::Bodies) } /// Returns `true` if it's [TransactionLookup](StageId::TransactionLookup) stage. pub fn is_tx_lookup(&self) -> bool { - matches!(self, StageId::TransactionLookup) + matches!(self, Self::TransactionLookup) } /// Returns true indicating if it's the finish stage [StageId::Finish] pub fn is_finish(&self) -> bool { - matches!(self, StageId::Finish) + matches!(self, Self::Finish) } } diff --git a/crates/primitives/src/stage/mod.rs b/crates/primitives/src/stage/mod.rs index 3c7c972bcf6fe..9976cf575315c 100644 --- a/crates/primitives/src/stage/mod.rs +++ b/crates/primitives/src/stage/mod.rs @@ -29,8 +29,8 @@ impl PipelineTarget { /// - `None`: If the target is for backward unwinding. pub fn sync_target(self) -> Option { match self { - PipelineTarget::Sync(hash) => Some(hash), - PipelineTarget::Unwind(_) => None, + Self::Sync(hash) => Some(hash), + Self::Unwind(_) => None, } } @@ -42,8 +42,8 @@ impl PipelineTarget { /// - `None`: If the target is for forward synchronization. pub fn unwind_target(self) -> Option { match self { - PipelineTarget::Sync(_) => None, - PipelineTarget::Unwind(number) => Some(number), + Self::Sync(_) => None, + Self::Unwind(number) => Some(number), } } } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f53405d08a45a..68c87a034f2b6 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -141,36 +141,36 @@ impl Transaction { /// It is only for signature signing or signer recovery. pub fn signature_hash(&self) -> B256 { match self { - Transaction::Legacy(tx) => tx.signature_hash(), - Transaction::Eip2930(tx) => tx.signature_hash(), - Transaction::Eip1559(tx) => tx.signature_hash(), - Transaction::Eip4844(tx) => tx.signature_hash(), + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip4844(tx) => tx.signature_hash(), #[cfg(feature = "optimism")] - Transaction::Deposit(_) => B256::ZERO, + Self::Deposit(_) => B256::ZERO, } } /// Get chain_id. pub fn chain_id(&self) -> Option { match self { - Transaction::Legacy(TxLegacy { chain_id, .. }) => *chain_id, - Transaction::Eip2930(TxEip2930 { chain_id, .. }) | - Transaction::Eip1559(TxEip1559 { chain_id, .. }) | - Transaction::Eip4844(TxEip4844 { chain_id, .. }) => Some(*chain_id), + Self::Legacy(TxLegacy { chain_id, .. }) => *chain_id, + Self::Eip2930(TxEip2930 { chain_id, .. }) | + Self::Eip1559(TxEip1559 { chain_id, .. }) | + Self::Eip4844(TxEip4844 { chain_id, .. }) => Some(*chain_id), #[cfg(feature = "optimism")] - Transaction::Deposit(_) => None, + Self::Deposit(_) => None, } } /// Sets the transaction's chain id to the provided value. pub fn set_chain_id(&mut self, chain_id: u64) { match self { - Transaction::Legacy(TxLegacy { chain_id: ref mut c, .. }) => *c = Some(chain_id), - Transaction::Eip2930(TxEip2930 { chain_id: ref mut c, .. }) | - Transaction::Eip1559(TxEip1559 { chain_id: ref mut c, .. }) | - Transaction::Eip4844(TxEip4844 { chain_id: ref mut c, .. }) => *c = chain_id, + Self::Legacy(TxLegacy { chain_id: ref mut c, .. }) => *c = Some(chain_id), + Self::Eip2930(TxEip2930 { chain_id: ref mut c, .. }) | + Self::Eip1559(TxEip1559 { chain_id: ref mut c, .. }) | + Self::Eip4844(TxEip4844 { chain_id: ref mut c, .. }) => *c = chain_id, #[cfg(feature = "optimism")] - Transaction::Deposit(_) => { /* noop */ } + Self::Deposit(_) => { /* noop */ } } } @@ -178,12 +178,12 @@ impl Transaction { /// [`TxKind::Create`] if the transaction is a contract creation. pub fn kind(&self) -> TxKind { match self { - Transaction::Legacy(TxLegacy { to, .. }) | - Transaction::Eip2930(TxEip2930 { to, .. }) | - Transaction::Eip1559(TxEip1559 { to, .. }) => *to, - Transaction::Eip4844(TxEip4844 { to, .. }) => TxKind::Call(*to), + Self::Legacy(TxLegacy { to, .. }) | + Self::Eip2930(TxEip2930 { to, .. }) | + Self::Eip1559(TxEip1559 { to, .. }) => *to, + Self::Eip4844(TxEip4844 { to, .. }) => TxKind::Call(*to), #[cfg(feature = "optimism")] - Transaction::Deposit(TxDeposit { to, .. }) => *to, + Self::Deposit(TxDeposit { to, .. }) => *to, } } @@ -198,37 +198,37 @@ impl Transaction { /// Get the transaction's type pub fn tx_type(&self) -> TxType { match self { - Transaction::Legacy(legacy_tx) => legacy_tx.tx_type(), - Transaction::Eip2930(access_list_tx) => access_list_tx.tx_type(), - Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.tx_type(), - Transaction::Eip4844(blob_tx) => blob_tx.tx_type(), + Self::Legacy(legacy_tx) => legacy_tx.tx_type(), + Self::Eip2930(access_list_tx) => access_list_tx.tx_type(), + Self::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.tx_type(), + Self::Eip4844(blob_tx) => blob_tx.tx_type(), #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.tx_type(), + Self::Deposit(deposit_tx) => deposit_tx.tx_type(), } } /// Gets the transaction's value field. pub fn value(&self) -> U256 { *match self { - Transaction::Legacy(TxLegacy { value, .. }) | - Transaction::Eip2930(TxEip2930 { value, .. }) | - Transaction::Eip1559(TxEip1559 { value, .. }) | - Transaction::Eip4844(TxEip4844 { value, .. }) => value, + Self::Legacy(TxLegacy { value, .. }) | + Self::Eip2930(TxEip2930 { value, .. }) | + Self::Eip1559(TxEip1559 { value, .. }) | + Self::Eip4844(TxEip4844 { value, .. }) => value, #[cfg(feature = "optimism")] - Transaction::Deposit(TxDeposit { value, .. }) => value, + Self::Deposit(TxDeposit { value, .. }) => value, } } /// Get the transaction's nonce. pub fn nonce(&self) -> u64 { match self { - Transaction::Legacy(TxLegacy { nonce, .. }) | - Transaction::Eip2930(TxEip2930 { nonce, .. }) | - Transaction::Eip1559(TxEip1559 { nonce, .. }) | - Transaction::Eip4844(TxEip4844 { nonce, .. }) => *nonce, + Self::Legacy(TxLegacy { nonce, .. }) | + Self::Eip2930(TxEip2930 { nonce, .. }) | + Self::Eip1559(TxEip1559 { nonce, .. }) | + Self::Eip4844(TxEip4844 { nonce, .. }) => *nonce, // Deposit transactions do not have nonces. #[cfg(feature = "optimism")] - Transaction::Deposit(_) => 0, + Self::Deposit(_) => 0, } } @@ -237,34 +237,34 @@ impl Transaction { /// Returns `None` for legacy transactions. pub fn access_list(&self) -> Option<&AccessList> { match self { - Transaction::Legacy(_) => None, - Transaction::Eip2930(tx) => Some(&tx.access_list), - Transaction::Eip1559(tx) => Some(&tx.access_list), - Transaction::Eip4844(tx) => Some(&tx.access_list), + Self::Legacy(_) => None, + Self::Eip2930(tx) => Some(&tx.access_list), + Self::Eip1559(tx) => Some(&tx.access_list), + Self::Eip4844(tx) => Some(&tx.access_list), #[cfg(feature = "optimism")] - Transaction::Deposit(_) => None, + Self::Deposit(_) => None, } } /// Get the gas limit of the transaction. pub fn gas_limit(&self) -> u64 { match self { - Transaction::Legacy(TxLegacy { gas_limit, .. }) | - Transaction::Eip2930(TxEip2930 { gas_limit, .. }) | - Transaction::Eip1559(TxEip1559 { gas_limit, .. }) | - Transaction::Eip4844(TxEip4844 { gas_limit, .. }) => *gas_limit, + Self::Legacy(TxLegacy { gas_limit, .. }) | + Self::Eip2930(TxEip2930 { gas_limit, .. }) | + Self::Eip1559(TxEip1559 { gas_limit, .. }) | + Self::Eip4844(TxEip4844 { gas_limit, .. }) => *gas_limit, #[cfg(feature = "optimism")] - Transaction::Deposit(TxDeposit { gas_limit, .. }) => *gas_limit, + Self::Deposit(TxDeposit { gas_limit, .. }) => *gas_limit, } } /// Returns true if the tx supports dynamic fees pub fn is_dynamic_fee(&self) -> bool { match self { - Transaction::Legacy(_) | Transaction::Eip2930(_) => false, - Transaction::Eip1559(_) | Transaction::Eip4844(_) => true, + Self::Legacy(_) | Self::Eip2930(_) => false, + Self::Eip1559(_) | Self::Eip4844(_) => true, #[cfg(feature = "optimism")] - Transaction::Deposit(_) => false, + Self::Deposit(_) => false, } } @@ -273,14 +273,14 @@ impl Transaction { /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). pub fn max_fee_per_gas(&self) -> u128 { match self { - Transaction::Legacy(TxLegacy { gas_price, .. }) | - Transaction::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, - Transaction::Eip1559(TxEip1559 { max_fee_per_gas, .. }) | - Transaction::Eip4844(TxEip4844 { max_fee_per_gas, .. }) => *max_fee_per_gas, + Self::Legacy(TxLegacy { gas_price, .. }) | + Self::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, + Self::Eip1559(TxEip1559 { max_fee_per_gas, .. }) | + Self::Eip4844(TxEip4844 { max_fee_per_gas, .. }) => *max_fee_per_gas, // Deposit transactions buy their L2 gas on L1 and, as such, the L2 gas is not // refundable. #[cfg(feature = "optimism")] - Transaction::Deposit(_) => 0, + Self::Deposit(_) => 0, } } @@ -290,13 +290,13 @@ impl Transaction { /// This is also commonly referred to as the "Gas Tip Cap" (`GasTipCap`). pub fn max_priority_fee_per_gas(&self) -> Option { match self { - Transaction::Legacy(_) | Transaction::Eip2930(_) => None, - Transaction::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | - Transaction::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) => { + Self::Legacy(_) | Self::Eip2930(_) => None, + Self::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | + Self::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) => { Some(*max_priority_fee_per_gas) } #[cfg(feature = "optimism")] - Transaction::Deposit(_) => None, + Self::Deposit(_) => None, } } @@ -306,12 +306,12 @@ impl Transaction { /// This is also commonly referred to as the "blob versioned hashes" (`BlobVersionedHashes`). pub fn blob_versioned_hashes(&self) -> Option> { match self { - Transaction::Legacy(_) | Transaction::Eip2930(_) | Transaction::Eip1559(_) => None, - Transaction::Eip4844(TxEip4844 { blob_versioned_hashes, .. }) => { + Self::Legacy(_) | Self::Eip2930(_) | Self::Eip1559(_) => None, + Self::Eip4844(TxEip4844 { blob_versioned_hashes, .. }) => { Some(blob_versioned_hashes.to_vec()) } #[cfg(feature = "optimism")] - Transaction::Deposit(_) => None, + Self::Deposit(_) => None, } } @@ -322,9 +322,7 @@ impl Transaction { /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). pub fn max_fee_per_blob_gas(&self) -> Option { match self { - Transaction::Eip4844(TxEip4844 { max_fee_per_blob_gas, .. }) => { - Some(*max_fee_per_blob_gas) - } + Self::Eip4844(TxEip4844 { max_fee_per_blob_gas, .. }) => Some(*max_fee_per_blob_gas), _ => None, } } @@ -347,14 +345,12 @@ impl Transaction { /// non-EIP-1559 transactions. pub fn priority_fee_or_price(&self) -> u128 { match self { - Transaction::Legacy(TxLegacy { gas_price, .. }) | - Transaction::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, - Transaction::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | - Transaction::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) => { - *max_priority_fee_per_gas - } + Self::Legacy(TxLegacy { gas_price, .. }) | + Self::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, + Self::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | + Self::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) => *max_priority_fee_per_gas, #[cfg(feature = "optimism")] - Transaction::Deposit(_) => 0, + Self::Deposit(_) => 0, } } @@ -363,12 +359,12 @@ impl Transaction { /// If the transaction is a legacy or EIP2930 transaction, the gas price is returned. pub fn effective_gas_price(&self, base_fee: Option) -> u128 { match self { - Transaction::Legacy(tx) => tx.gas_price, - Transaction::Eip2930(tx) => tx.gas_price, - Transaction::Eip1559(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - Transaction::Eip4844(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), + Self::Legacy(tx) => tx.gas_price, + Self::Eip2930(tx) => tx.gas_price, + Self::Eip1559(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), + Self::Eip4844(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), #[cfg(feature = "optimism")] - Transaction::Deposit(_) => 0, + Self::Deposit(_) => 0, } } @@ -406,12 +402,12 @@ impl Transaction { /// Get the transaction's input field. pub fn input(&self) -> &Bytes { match self { - Transaction::Legacy(TxLegacy { input, .. }) | - Transaction::Eip2930(TxEip2930 { input, .. }) | - Transaction::Eip1559(TxEip1559 { input, .. }) | - Transaction::Eip4844(TxEip4844 { input, .. }) => input, + Self::Legacy(TxLegacy { input, .. }) | + Self::Eip2930(TxEip2930 { input, .. }) | + Self::Eip1559(TxEip1559 { input, .. }) | + Self::Eip4844(TxEip4844 { input, .. }) => input, #[cfg(feature = "optimism")] - Transaction::Deposit(TxDeposit { input, .. }) => input, + Self::Deposit(TxDeposit { input, .. }) => input, } } @@ -420,7 +416,7 @@ impl Transaction { #[cfg(feature = "optimism")] pub fn source_hash(&self) -> Option { match self { - Transaction::Deposit(TxDeposit { source_hash, .. }) => Some(*source_hash), + Self::Deposit(TxDeposit { source_hash, .. }) => Some(*source_hash), _ => None, } } @@ -430,7 +426,7 @@ impl Transaction { #[cfg(feature = "optimism")] pub fn mint(&self) -> Option { match self { - Transaction::Deposit(TxDeposit { mint, .. }) => *mint, + Self::Deposit(TxDeposit { mint, .. }) => *mint, _ => None, } } @@ -440,7 +436,7 @@ impl Transaction { #[cfg(feature = "optimism")] pub fn is_system_transaction(&self) -> bool { match self { - Transaction::Deposit(TxDeposit { is_system_transaction, .. }) => *is_system_transaction, + Self::Deposit(TxDeposit { is_system_transaction, .. }) => *is_system_transaction, _ => false, } } @@ -448,7 +444,7 @@ impl Transaction { /// Returns whether or not the transaction is an Optimism Deposited transaction. #[cfg(feature = "optimism")] pub fn is_deposit(&self) -> bool { - matches!(self, Transaction::Deposit(_)) + matches!(self, Self::Deposit(_)) } /// This encodes the transaction _without_ the signature, and is only suitable for creating a @@ -466,57 +462,55 @@ impl Transaction { with_header: bool, ) { match self { - Transaction::Legacy(legacy_tx) => { + Self::Legacy(legacy_tx) => { // do nothing w/ with_header legacy_tx.encode_with_signature(signature, out) } - Transaction::Eip2930(access_list_tx) => { + Self::Eip2930(access_list_tx) => { access_list_tx.encode_with_signature(signature, out, with_header) } - Transaction::Eip1559(dynamic_fee_tx) => { + Self::Eip1559(dynamic_fee_tx) => { dynamic_fee_tx.encode_with_signature(signature, out, with_header) } - Transaction::Eip4844(blob_tx) => { - blob_tx.encode_with_signature(signature, out, with_header) - } + Self::Eip4844(blob_tx) => blob_tx.encode_with_signature(signature, out, with_header), #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.encode(out, with_header), + Self::Deposit(deposit_tx) => deposit_tx.encode(out, with_header), } } /// This sets the transaction's nonce. pub fn set_nonce(&mut self, nonce: u64) { match self { - Transaction::Legacy(tx) => tx.nonce = nonce, - Transaction::Eip2930(tx) => tx.nonce = nonce, - Transaction::Eip1559(tx) => tx.nonce = nonce, - Transaction::Eip4844(tx) => tx.nonce = nonce, + Self::Legacy(tx) => tx.nonce = nonce, + Self::Eip2930(tx) => tx.nonce = nonce, + Self::Eip1559(tx) => tx.nonce = nonce, + Self::Eip4844(tx) => tx.nonce = nonce, #[cfg(feature = "optimism")] - Transaction::Deposit(_) => { /* noop */ } + Self::Deposit(_) => { /* noop */ } } } /// This sets the transaction's value. pub fn set_value(&mut self, value: U256) { match self { - Transaction::Legacy(tx) => tx.value = value, - Transaction::Eip2930(tx) => tx.value = value, - Transaction::Eip1559(tx) => tx.value = value, - Transaction::Eip4844(tx) => tx.value = value, + Self::Legacy(tx) => tx.value = value, + Self::Eip2930(tx) => tx.value = value, + Self::Eip1559(tx) => tx.value = value, + Self::Eip4844(tx) => tx.value = value, #[cfg(feature = "optimism")] - Transaction::Deposit(tx) => tx.value = value, + Self::Deposit(tx) => tx.value = value, } } /// This sets the transaction's input field. pub fn set_input(&mut self, input: Bytes) { match self { - Transaction::Legacy(tx) => tx.input = input, - Transaction::Eip2930(tx) => tx.input = input, - Transaction::Eip1559(tx) => tx.input = input, - Transaction::Eip4844(tx) => tx.input = input, + Self::Legacy(tx) => tx.input = input, + Self::Eip2930(tx) => tx.input = input, + Self::Eip1559(tx) => tx.input = input, + Self::Eip4844(tx) => tx.input = input, #[cfg(feature = "optimism")] - Transaction::Deposit(tx) => tx.input = input, + Self::Deposit(tx) => tx.input = input, } } @@ -524,43 +518,43 @@ impl Transaction { #[inline] pub fn size(&self) -> usize { match self { - Transaction::Legacy(tx) => tx.size(), - Transaction::Eip2930(tx) => tx.size(), - Transaction::Eip1559(tx) => tx.size(), - Transaction::Eip4844(tx) => tx.size(), + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip4844(tx) => tx.size(), #[cfg(feature = "optimism")] - Transaction::Deposit(tx) => tx.size(), + Self::Deposit(tx) => tx.size(), } } /// Returns true if the transaction is a legacy transaction. #[inline] pub const fn is_legacy(&self) -> bool { - matches!(self, Transaction::Legacy(_)) + matches!(self, Self::Legacy(_)) } /// Returns true if the transaction is an EIP-2930 transaction. #[inline] pub const fn is_eip2930(&self) -> bool { - matches!(self, Transaction::Eip2930(_)) + matches!(self, Self::Eip2930(_)) } /// Returns true if the transaction is an EIP-1559 transaction. #[inline] pub const fn is_eip1559(&self) -> bool { - matches!(self, Transaction::Eip1559(_)) + matches!(self, Self::Eip1559(_)) } /// Returns true if the transaction is an EIP-4844 transaction. #[inline] pub const fn is_eip4844(&self) -> bool { - matches!(self, Transaction::Eip4844(_)) + matches!(self, Self::Eip4844(_)) } /// Returns the [TxLegacy] variant if the transaction is a legacy transaction. pub fn as_legacy(&self) -> Option<&TxLegacy> { match self { - Transaction::Legacy(tx) => Some(tx), + Self::Legacy(tx) => Some(tx), _ => None, } } @@ -568,7 +562,7 @@ impl Transaction { /// Returns the [TxEip2930] variant if the transaction is an EIP-2930 transaction. pub fn as_eip2930(&self) -> Option<&TxEip2930> { match self { - Transaction::Eip2930(tx) => Some(tx), + Self::Eip2930(tx) => Some(tx), _ => None, } } @@ -576,7 +570,7 @@ impl Transaction { /// Returns the [TxEip1559] variant if the transaction is an EIP-1559 transaction. pub fn as_eip1559(&self) -> Option<&TxEip1559> { match self { - Transaction::Eip1559(tx) => Some(tx), + Self::Eip1559(tx) => Some(tx), _ => None, } } @@ -584,7 +578,7 @@ impl Transaction { /// Returns the [TxEip4844] variant if the transaction is an EIP-4844 transaction. pub fn as_eip4844(&self) -> Option<&TxEip4844> { match self { - Transaction::Eip4844(tx) => Some(tx), + Self::Eip4844(tx) => Some(tx), _ => None, } } @@ -592,25 +586,25 @@ impl Transaction { impl From for Transaction { fn from(tx: TxLegacy) -> Self { - Transaction::Legacy(tx) + Self::Legacy(tx) } } impl From for Transaction { fn from(tx: TxEip2930) -> Self { - Transaction::Eip2930(tx) + Self::Eip2930(tx) } } impl From for Transaction { fn from(tx: TxEip1559) -> Self { - Transaction::Eip1559(tx) + Self::Eip1559(tx) } } impl From for Transaction { fn from(tx: TxEip4844) -> Self { - Transaction::Eip4844(tx) + Self::Eip4844(tx) } } @@ -623,20 +617,20 @@ impl Compact for Transaction { { let identifier = self.tx_type().to_compact(buf); match self { - Transaction::Legacy(tx) => { + Self::Legacy(tx) => { tx.to_compact(buf); } - Transaction::Eip2930(tx) => { + Self::Eip2930(tx) => { tx.to_compact(buf); } - Transaction::Eip1559(tx) => { + Self::Eip1559(tx) => { tx.to_compact(buf); } - Transaction::Eip4844(tx) => { + Self::Eip4844(tx) => { tx.to_compact(buf); } #[cfg(feature = "optimism")] - Transaction::Deposit(tx) => { + Self::Deposit(tx) => { tx.to_compact(buf); } } @@ -655,15 +649,15 @@ impl Compact for Transaction { match identifier { 0 => { let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); - (Transaction::Legacy(tx), buf) + (Self::Legacy(tx), buf) } 1 => { let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); - (Transaction::Eip2930(tx), buf) + (Self::Eip2930(tx), buf) } 2 => { let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); - (Transaction::Eip1559(tx), buf) + (Self::Eip1559(tx), buf) } 3 => { // An identifier of 3 indicates that the transaction type did not fit into @@ -675,12 +669,12 @@ impl Compact for Transaction { match identifier { 3 => { let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); - (Transaction::Eip4844(tx), buf) + (Self::Eip4844(tx), buf) } #[cfg(feature = "optimism")] 126 => { let (tx, buf) = TxDeposit::from_compact(buf, buf.len()); - (Transaction::Deposit(tx), buf) + (Self::Deposit(tx), buf) } _ => unreachable!("Junk data in database: unknown Transaction variant"), } @@ -701,20 +695,20 @@ impl Encodable for Transaction { /// hash intended for signing. fn encode(&self, out: &mut dyn bytes::BufMut) { match self { - Transaction::Legacy(legacy_tx) => { + Self::Legacy(legacy_tx) => { legacy_tx.encode_for_signing(out); } - Transaction::Eip2930(access_list_tx) => { + Self::Eip2930(access_list_tx) => { access_list_tx.encode_for_signing(out); } - Transaction::Eip1559(dynamic_fee_tx) => { + Self::Eip1559(dynamic_fee_tx) => { dynamic_fee_tx.encode_for_signing(out); } - Transaction::Eip4844(blob_tx) => { + Self::Eip4844(blob_tx) => { blob_tx.encode_for_signing(out); } #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => { + Self::Deposit(deposit_tx) => { deposit_tx.encode(out, true); } } @@ -722,12 +716,12 @@ impl Encodable for Transaction { fn length(&self) -> usize { match self { - Transaction::Legacy(legacy_tx) => legacy_tx.payload_len_for_signature(), - Transaction::Eip2930(access_list_tx) => access_list_tx.payload_len_for_signature(), - Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.payload_len_for_signature(), - Transaction::Eip4844(blob_tx) => blob_tx.payload_len_for_signature(), + Self::Legacy(legacy_tx) => legacy_tx.payload_len_for_signature(), + Self::Eip2930(access_list_tx) => access_list_tx.payload_len_for_signature(), + Self::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.payload_len_for_signature(), + Self::Eip4844(blob_tx) => blob_tx.payload_len_for_signature(), #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.payload_len(), + Self::Deposit(deposit_tx) => deposit_tx.payload_len(), } } } @@ -892,7 +886,7 @@ impl Compact for TransactionSignedNoHash { Transaction::from_compact(buf, transaction_type) }; - (TransactionSignedNoHash { signature, transaction }, buf) + (Self { signature, transaction }, buf) } } @@ -962,7 +956,7 @@ impl From for TransactionSigned { impl From for TransactionSignedNoHash { fn from(tx: TransactionSigned) -> Self { - TransactionSignedNoHash { signature: tx.signature, transaction: tx.transaction } + Self { signature: tx.signature, transaction: tx.transaction } } } @@ -1245,11 +1239,9 @@ impl TransactionSigned { /// This expects `rlp(legacy_tx)` // TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`, // so decoding methods do not need to manually advance the buffer - pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { - let (transaction, hash, signature) = - TransactionSigned::decode_rlp_legacy_transaction_tuple(data)?; - let signed = - TransactionSigned { transaction: Transaction::Legacy(transaction), hash, signature }; + pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { + let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; + let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; Ok(signed) } @@ -1263,9 +1255,7 @@ impl TransactionSigned { /// byte indicating the transaction type. /// /// CAUTION: this expects that `data` is `tx-type || rlp(tx-data)` - pub fn decode_enveloped_typed_transaction( - data: &mut &[u8], - ) -> alloy_rlp::Result { + pub fn decode_enveloped_typed_transaction(data: &mut &[u8]) -> alloy_rlp::Result { // keep this around so we can use it to calculate the hash let original_encoding_without_header = *data; @@ -1313,7 +1303,7 @@ impl TransactionSigned { } let hash = keccak256(&original_encoding_without_header[..tx_length]); - let signed = TransactionSigned { transaction, hash, signature }; + let signed = Self { transaction, hash, signature }; Ok(signed) } @@ -1342,9 +1332,9 @@ impl TransactionSigned { // Check if the tx is a list let output_data = if input_data[0] >= EMPTY_LIST_CODE { // decode as legacy transaction - TransactionSigned::decode_rlp_legacy_transaction(input_data)? + Self::decode_rlp_legacy_transaction(input_data)? } else { - TransactionSigned::decode_enveloped_typed_transaction(input_data)? + Self::decode_enveloped_typed_transaction(input_data)? }; if !input_data.is_empty() { @@ -1437,7 +1427,7 @@ impl Decodable for TransactionSigned { // if the transaction is encoded as a string then it is a typed transaction if !header.list { - let tx = TransactionSigned::decode_enveloped_typed_transaction(buf)?; + let tx = Self::decode_enveloped_typed_transaction(buf)?; let bytes_consumed = remaining_len - buf.len(); // because Header::decode works for single bytes (including the tx type), returning a @@ -1449,7 +1439,7 @@ impl Decodable for TransactionSigned { Ok(tx) } else { - let tx = TransactionSigned::decode_rlp_legacy_transaction(&mut original_encoding)?; + let tx = Self::decode_rlp_legacy_transaction(&mut original_encoding)?; // advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the // buffer @@ -1483,15 +1473,14 @@ impl proptest::arbitrary::Arbitrary for TransactionSigned { if tx_eip_4844.to != Address::default() { Some(()) } else { None }; } - let mut tx = - TransactionSigned { hash: Default::default(), signature: sig, transaction }; + let mut tx = Self { hash: Default::default(), signature: sig, transaction }; tx.hash = tx.recalculate_hash(); tx }) .boxed() } - type Strategy = proptest::strategy::BoxedStrategy; + type Strategy = proptest::strategy::BoxedStrategy; } #[cfg(any(test, feature = "arbitrary"))] @@ -1512,7 +1501,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { signature }; - Ok(TransactionSigned::from_transaction_and_signature(transaction, signature)) + Ok(Self::from_transaction_and_signature(transaction, signature)) } } @@ -1575,7 +1564,7 @@ impl Decodable for TransactionSignedEcRecovered { let signer = signed_transaction .recover_signer() .ok_or(RlpError::Custom("Unable to recover decoded transaction signer."))?; - Ok(TransactionSignedEcRecovered { signer, signed_transaction }) + Ok(Self { signer, signed_transaction }) } } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 8323de4705b5f..0c39c5251266d 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -58,13 +58,13 @@ impl PooledTransactionsElement { pub fn try_from_broadcast(tx: TransactionSigned) -> Result { match tx { TransactionSigned { transaction: Transaction::Legacy(tx), signature, hash } => { - Ok(PooledTransactionsElement::Legacy { transaction: tx, signature, hash }) + Ok(Self::Legacy { transaction: tx, signature, hash }) } TransactionSigned { transaction: Transaction::Eip2930(tx), signature, hash } => { - Ok(PooledTransactionsElement::Eip2930 { transaction: tx, signature, hash }) + Ok(Self::Eip2930 { transaction: tx, signature, hash }) } TransactionSigned { transaction: Transaction::Eip1559(tx), signature, hash } => { - Ok(PooledTransactionsElement::Eip1559 { transaction: tx, signature, hash }) + Ok(Self::Eip1559 { transaction: tx, signature, hash }) } // Not supported because missing blob sidecar tx @ TransactionSigned { transaction: Transaction::Eip4844(_), .. } => Err(tx), @@ -87,12 +87,7 @@ impl PooledTransactionsElement { // If the transaction is an EIP-4844 transaction... TransactionSigned { transaction: Transaction::Eip4844(tx), signature, hash } => { // Construct a `PooledTransactionsElement::BlobTransaction` with provided sidecar. - PooledTransactionsElement::BlobTransaction(BlobTransaction { - transaction: tx, - signature, - hash, - sidecar, - }) + Self::BlobTransaction(BlobTransaction { transaction: tx, signature, hash, sidecar }) } // If the transaction is not EIP-4844, return an error with the original // transaction. @@ -114,10 +109,10 @@ impl PooledTransactionsElement { /// Reference to transaction hash. Used to identify transaction. pub fn hash(&self) -> &TxHash { match self { - PooledTransactionsElement::Legacy { hash, .. } | - PooledTransactionsElement::Eip2930 { hash, .. } | - PooledTransactionsElement::Eip1559 { hash, .. } => hash, - PooledTransactionsElement::BlobTransaction(tx) => &tx.hash, + Self::Legacy { hash, .. } | Self::Eip2930 { hash, .. } | Self::Eip1559 { hash, .. } => { + hash + } + Self::BlobTransaction(tx) => &tx.hash, } } @@ -215,7 +210,7 @@ impl PooledTransactionsElement { // Now, we decode the inner blob transaction: // `rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` let blob_tx = BlobTransaction::decode_inner(data)?; - Ok(PooledTransactionsElement::BlobTransaction(blob_tx)) + Ok(Self::BlobTransaction(blob_tx)) } else { // DO NOT advance the buffer for the type, since we want the enveloped decoding to // decode it again and advance the buffer on its own. @@ -230,12 +225,12 @@ impl PooledTransactionsElement { Transaction::Eip4844(_) => Err(RlpError::Custom( "EIP-4844 transactions can only be decoded with transaction type 0x03", )), - Transaction::Eip2930(tx) => Ok(PooledTransactionsElement::Eip2930 { + Transaction::Eip2930(tx) => Ok(Self::Eip2930 { transaction: tx, signature: typed_tx.signature, hash: typed_tx.hash, }), - Transaction::Eip1559(tx) => Ok(PooledTransactionsElement::Eip1559 { + Transaction::Eip1559(tx) => Ok(Self::Eip1559 { transaction: tx, signature: typed_tx.signature, hash: typed_tx.hash, @@ -544,7 +539,7 @@ impl Decodable for PooledTransactionsElement { return Err(RlpError::UnexpectedLength) } - Ok(PooledTransactionsElement::BlobTransaction(blob_tx)) + Ok(Self::BlobTransaction(blob_tx)) } else { // DO NOT advance the buffer for the type, since we want the enveloped decoding to // decode it again and advance the buffer on its own. @@ -565,12 +560,12 @@ impl Decodable for PooledTransactionsElement { Transaction::Eip4844(_) => Err(RlpError::Custom( "EIP-4844 transactions can only be decoded with transaction type 0x03", )), - Transaction::Eip2930(tx) => Ok(PooledTransactionsElement::Eip2930 { + Transaction::Eip2930(tx) => Ok(Self::Eip2930 { transaction: tx, signature: typed_tx.signature, hash: typed_tx.hash, }), - Transaction::Eip1559(tx) => Ok(PooledTransactionsElement::Eip1559 { + Transaction::Eip1559(tx) => Ok(Self::Eip1559 { transaction: tx, signature: typed_tx.signature, hash: typed_tx.hash, @@ -587,8 +582,7 @@ impl TryFrom for PooledTransactionsElement { type Error = TransactionConversionError; fn try_from(tx: TransactionSigned) -> Result { - PooledTransactionsElement::try_from_broadcast(tx) - .map_err(|_| TransactionConversionError::UnsupportedForP2P) + Self::try_from_broadcast(tx).map_err(|_| TransactionConversionError::UnsupportedForP2P) } } @@ -605,11 +599,11 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { // Attempt to create a `TransactionSigned` with arbitrary data. let tx_signed = TransactionSigned::arbitrary(u)?; // Attempt to create a `PooledTransactionsElement` with arbitrary data, handling the Result. - match PooledTransactionsElement::try_from(tx_signed) { - Ok(PooledTransactionsElement::BlobTransaction(mut tx)) => { + match Self::try_from(tx_signed) { + Ok(Self::BlobTransaction(mut tx)) => { // Successfully converted to a BlobTransaction, now generate a sidecar. tx.sidecar = crate::BlobTransactionSidecar::arbitrary(u)?; - Ok(PooledTransactionsElement::BlobTransaction(tx)) + Ok(Self::BlobTransaction(tx)) } Ok(tx) => Ok(tx), // Successfully converted, but not a BlobTransaction. Err(_) => Err(arbitrary::Error::IncorrectFormat), /* Conversion failed, return an @@ -626,13 +620,13 @@ impl proptest::arbitrary::Arbitrary for PooledTransactionsElement { any::<(TransactionSigned, crate::BlobTransactionSidecar)>() .prop_map(move |(transaction, sidecar)| { - match PooledTransactionsElement::try_from(transaction) { - Ok(PooledTransactionsElement::BlobTransaction(mut tx)) => { + match Self::try_from(transaction) { + Ok(Self::BlobTransaction(mut tx)) => { tx.sidecar = sidecar; - PooledTransactionsElement::BlobTransaction(tx) + Self::BlobTransaction(tx) } Ok(tx) => tx, - Err(_) => PooledTransactionsElement::Eip1559 { + Err(_) => Self::Eip1559 { transaction: Default::default(), signature: Default::default(), hash: Default::default(), @@ -642,7 +636,7 @@ impl proptest::arbitrary::Arbitrary for PooledTransactionsElement { .boxed() } - type Strategy = proptest::strategy::BoxedStrategy; + type Strategy = proptest::strategy::BoxedStrategy; } /// A signed pooled transaction with recovered signer. diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index d203ecf773273..975f70317d17e 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -49,15 +49,15 @@ pub enum TxType { impl TxType { /// The max type reserved by an EIP. - pub const MAX_RESERVED_EIP: TxType = Self::Eip4844; + pub const MAX_RESERVED_EIP: Self = Self::Eip4844; /// Check if the transaction type has an access list. pub const fn has_access_list(&self) -> bool { match self { - TxType::Legacy => false, - TxType::Eip2930 | TxType::Eip1559 | TxType::Eip4844 => true, + Self::Legacy => false, + Self::Eip2930 | Self::Eip1559 | Self::Eip4844 => true, #[cfg(feature = "optimism")] - TxType::Deposit => false, + Self::Deposit => false, } } } @@ -77,7 +77,7 @@ impl From for u8 { impl From for U8 { fn from(value: TxType) -> Self { - U8::from(u8::from(value)) + Self::from(u8::from(value)) } } @@ -86,18 +86,18 @@ impl TryFrom for TxType { fn try_from(value: u8) -> Result { #[cfg(feature = "optimism")] - if value == TxType::Deposit { - return Ok(TxType::Deposit) + if value == Self::Deposit { + return Ok(Self::Deposit) } - if value == TxType::Legacy { - return Ok(TxType::Legacy) - } else if value == TxType::Eip2930 { - return Ok(TxType::Eip2930) - } else if value == TxType::Eip1559 { - return Ok(TxType::Eip1559) - } else if value == TxType::Eip4844 { - return Ok(TxType::Eip4844) + if value == Self::Legacy { + return Ok(Self::Legacy) + } else if value == Self::Eip2930 { + return Ok(Self::Eip2930) + } else if value == Self::Eip1559 { + return Ok(Self::Eip1559) + } else if value == Self::Eip4844 { + return Ok(Self::Eip4844) } Err("invalid tx type") @@ -127,10 +127,10 @@ impl Compact for TxType { B: bytes::BufMut + AsMut<[u8]>, { match self { - TxType::Legacy => 0, - TxType::Eip2930 => 1, - TxType::Eip1559 => 2, - TxType::Eip4844 => { + Self::Legacy => 0, + Self::Eip2930 => 1, + Self::Eip1559 => 2, + Self::Eip4844 => { // Write the full transaction type to the buffer when encoding > 3. // This allows compat decoding the [TyType] from a single byte as // opposed to 2 bits for the backwards-compatible encoding. @@ -138,7 +138,7 @@ impl Compact for TxType { 3 } #[cfg(feature = "optimism")] - TxType::Deposit => { + Self::Deposit => { buf.put_u8(self as u8); 3 } @@ -151,15 +151,15 @@ impl Compact for TxType { fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { ( match identifier { - 0 => TxType::Legacy, - 1 => TxType::Eip2930, - 2 => TxType::Eip1559, + 0 => Self::Legacy, + 1 => Self::Eip2930, + 2 => Self::Eip1559, 3 => { let extended_identifier = buf.get_u8(); match extended_identifier { - EIP4844_TX_TYPE_ID => TxType::Eip4844, + EIP4844_TX_TYPE_ID => Self::Eip4844, #[cfg(feature = "optimism")] - DEPOSIT_TX_TYPE_ID => TxType::Deposit, + DEPOSIT_TX_TYPE_ID => Self::Deposit, _ => panic!("Unsupported TxType identifier: {extended_identifier}"), } } @@ -178,7 +178,7 @@ impl PartialEq for TxType { impl PartialEq for u8 { fn eq(&self, other: &TxType) -> bool { - *self == *other as u8 + *self == *other as Self } } @@ -196,7 +196,7 @@ impl Decodable for TxType { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { let ty = u8::decode(buf)?; - TxType::try_from(ty).map_err(alloy_rlp::Error::Custom) + Self::try_from(ty).map_err(alloy_rlp::Error::Custom) } } diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs index c2818b754f896..a69356bb29075 100644 --- a/crates/primitives/src/transaction/variant.rs +++ b/crates/primitives/src/transaction/variant.rs @@ -26,18 +26,18 @@ impl TransactionSignedVariant { /// Returns the raw transaction object pub fn as_raw(&self) -> &Transaction { match self { - TransactionSignedVariant::SignedNoHash(tx) => &tx.transaction, - TransactionSignedVariant::Signed(tx) => &tx.transaction, - TransactionSignedVariant::SignedEcRecovered(tx) => &tx.signed_transaction.transaction, + Self::SignedNoHash(tx) => &tx.transaction, + Self::Signed(tx) => &tx.transaction, + Self::SignedEcRecovered(tx) => &tx.signed_transaction.transaction, } } /// Returns the hash of the transaction pub fn hash(&self) -> B256 { match self { - TransactionSignedVariant::SignedNoHash(tx) => tx.hash(), - TransactionSignedVariant::Signed(tx) => tx.hash, - TransactionSignedVariant::SignedEcRecovered(tx) => tx.hash, + Self::SignedNoHash(tx) => tx.hash(), + Self::Signed(tx) => tx.hash, + Self::SignedEcRecovered(tx) => tx.hash, } } @@ -46,9 +46,9 @@ impl TransactionSignedVariant { /// If the transaction is of not of [TransactionSignedEcRecovered] it will be recovered. pub fn signer(&self) -> Option
{ match self { - TransactionSignedVariant::SignedNoHash(tx) => tx.recover_signer(), - TransactionSignedVariant::Signed(tx) => tx.recover_signer(), - TransactionSignedVariant::SignedEcRecovered(tx) => Some(tx.signer), + Self::SignedNoHash(tx) => tx.recover_signer(), + Self::Signed(tx) => tx.recover_signer(), + Self::SignedEcRecovered(tx) => Some(tx.signer), } } @@ -56,7 +56,7 @@ impl TransactionSignedVariant { /// else None pub fn as_signed(&self) -> Option<&TransactionSigned> { match self { - TransactionSignedVariant::Signed(tx) => Some(tx), + Self::Signed(tx) => Some(tx), _ => None, } } @@ -65,41 +65,41 @@ impl TransactionSignedVariant { /// else None pub fn as_signed_ec_recovered(&self) -> Option<&TransactionSignedEcRecovered> { match self { - TransactionSignedVariant::SignedEcRecovered(tx) => Some(tx), + Self::SignedEcRecovered(tx) => Some(tx), _ => None, } } /// Returns true if the transaction is of [TransactionSigned] variant pub fn is_signed(&self) -> bool { - matches!(self, TransactionSignedVariant::Signed(_)) + matches!(self, Self::Signed(_)) } /// Returns true if the transaction is of [TransactionSignedNoHash] variant pub fn is_signed_no_hash(&self) -> bool { - matches!(self, TransactionSignedVariant::SignedNoHash(_)) + matches!(self, Self::SignedNoHash(_)) } /// Returns true if the transaction is of [TransactionSignedEcRecovered] variant pub fn is_signed_ec_recovered(&self) -> bool { - matches!(self, TransactionSignedVariant::SignedEcRecovered(_)) + matches!(self, Self::SignedEcRecovered(_)) } /// Consumes the [TransactionSignedVariant] and returns the consumed [Transaction] pub fn into_raw(self) -> Transaction { match self { - TransactionSignedVariant::SignedNoHash(tx) => tx.transaction, - TransactionSignedVariant::Signed(tx) => tx.transaction, - TransactionSignedVariant::SignedEcRecovered(tx) => tx.signed_transaction.transaction, + Self::SignedNoHash(tx) => tx.transaction, + Self::Signed(tx) => tx.transaction, + Self::SignedEcRecovered(tx) => tx.signed_transaction.transaction, } } /// Consumes the [TransactionSignedVariant] and returns the consumed [TransactionSigned] pub fn into_signed(self) -> TransactionSigned { match self { - TransactionSignedVariant::SignedNoHash(tx) => tx.with_hash(), - TransactionSignedVariant::Signed(tx) => tx, - TransactionSignedVariant::SignedEcRecovered(tx) => tx.signed_transaction, + Self::SignedNoHash(tx) => tx.with_hash(), + Self::Signed(tx) => tx, + Self::SignedEcRecovered(tx) => tx.signed_transaction, } } @@ -123,28 +123,28 @@ impl TransactionSignedVariant { self, ) -> Result { match self { - TransactionSignedVariant::SignedEcRecovered(tx) => Ok(tx), - TransactionSignedVariant::Signed(tx) => tx.try_into_ecrecovered(), - TransactionSignedVariant::SignedNoHash(tx) => tx.with_hash().try_into_ecrecovered(), + Self::SignedEcRecovered(tx) => Ok(tx), + Self::Signed(tx) => tx.try_into_ecrecovered(), + Self::SignedNoHash(tx) => tx.with_hash().try_into_ecrecovered(), } } } impl From for TransactionSignedVariant { fn from(tx: TransactionSignedNoHash) -> Self { - TransactionSignedVariant::SignedNoHash(tx) + Self::SignedNoHash(tx) } } impl From for TransactionSignedVariant { fn from(tx: TransactionSigned) -> Self { - TransactionSignedVariant::Signed(tx) + Self::Signed(tx) } } impl From for TransactionSignedVariant { fn from(tx: TransactionSignedEcRecovered) -> Self { - TransactionSignedVariant::SignedEcRecovered(tx) + Self::SignedEcRecovered(tx) } } diff --git a/crates/primitives/src/trie/subnode.rs b/crates/primitives/src/trie/subnode.rs index 1822396f62d86..d151c21ef3036 100644 --- a/crates/primitives/src/trie/subnode.rs +++ b/crates/primitives/src/trie/subnode.rs @@ -63,7 +63,7 @@ impl Compact for StoredSubNode { None }; - (StoredSubNode { key, nibble, node }, buf) + (Self { key, nibble, node }, buf) } } diff --git a/crates/prune/src/builder.rs b/crates/prune/src/builder.rs index 4e0ffd21a7960..290b5072455de 100644 --- a/crates/prune/src/builder.rs +++ b/crates/prune/src/builder.rs @@ -31,7 +31,7 @@ impl PrunerBuilder { /// Creates a new [PrunerBuilder] from the given [PruneConfig]. pub fn new(pruner_config: PruneConfig) -> Self { - PrunerBuilder::default() + Self::default() .block_interval(pruner_config.block_interval) .segments(pruner_config.segments) } diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index 348b12f674c20..b223bccb92f0d 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -23,11 +23,9 @@ pub enum PrunerError { impl From for RethError { fn from(err: PrunerError) -> Self { match err { - PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { - RethError::other(err) - } - PrunerError::Database(err) => RethError::Database(err), - PrunerError::Provider(err) => RethError::Provider(err), + PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => Self::other(err), + PrunerError::Database(err) => Self::Database(err), + PrunerError::Provider(err) => Self::Provider(err), } } } diff --git a/crates/prune/src/segments/set.rs b/crates/prune/src/segments/set.rs index 7978bd4e583e1..0843589ec7df5 100644 --- a/crates/prune/src/segments/set.rs +++ b/crates/prune/src/segments/set.rs @@ -47,7 +47,7 @@ impl SegmentSet { receipts_log_filter, } = prune_modes; - SegmentSet::default() + Self::default() // Receipts .segment_opt(receipts.map(Receipts::new)) // Receipts by logs diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 2bec090cc44a6..826f33ee7f73d 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -284,7 +284,7 @@ pub struct RpcServiceBuilder(tower::ServiceBuilder); impl Default for RpcServiceBuilder { fn default() -> Self { - RpcServiceBuilder(tower::ServiceBuilder::new()) + Self(tower::ServiceBuilder::new()) } } @@ -577,7 +577,7 @@ pub struct Builder { impl Default for Builder { fn default() -> Self { - Builder { + Self { settings: Settings::default(), id_provider: Arc::new(RandomIntegerIdProvider), rpc_middleware: RpcServiceBuilder::new(), diff --git a/crates/rpc/ipc/src/stream_codec.rs b/crates/rpc/ipc/src/stream_codec.rs index 3245d776cd324..de6c4bf2745f4 100644 --- a/crates/rpc/ipc/src/stream_codec.rs +++ b/crates/rpc/ipc/src/stream_codec.rs @@ -43,7 +43,7 @@ pub enum Separator { impl Default for Separator { fn default() -> Self { - Separator::Byte(b'\n') + Self::Byte(b'\n') } } @@ -57,12 +57,12 @@ pub struct StreamCodec { impl StreamCodec { /// Default codec with streaming input data. Input can be both enveloped and not. pub fn stream_incoming() -> Self { - StreamCodec::new(Separator::Empty, Default::default()) + Self::new(Separator::Empty, Default::default()) } /// New custom stream codec pub fn new(incoming_separator: Separator, outgoing_separator: Separator) -> Self { - StreamCodec { incoming_separator, outgoing_separator } + Self { incoming_separator, outgoing_separator } } } diff --git a/crates/rpc/rpc-builder/src/error.rs b/crates/rpc/rpc-builder/src/error.rs index 68a2183fe41ab..1753b1fc921b8 100644 --- a/crates/rpc/rpc-builder/src/error.rs +++ b/crates/rpc/rpc-builder/src/error.rs @@ -19,10 +19,10 @@ impl ServerKind { /// Returns the appropriate flags for each variant. pub fn flags(&self) -> &'static str { match self { - ServerKind::Http(_) => "--http.port", - ServerKind::WS(_) => "--ws.port", - ServerKind::WsHttp(_) => "--ws.port and --http.port", - ServerKind::Auth(_) => "--authrpc.port", + Self::Http(_) => "--http.port", + Self::WS(_) => "--ws.port", + Self::WsHttp(_) => "--ws.port and --http.port", + Self::Auth(_) => "--authrpc.port", } } } @@ -30,10 +30,10 @@ impl ServerKind { impl std::fmt::Display for ServerKind { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ServerKind::Http(addr) => write!(f, "{addr} (HTTP-RPC server)"), - ServerKind::WS(addr) => write!(f, "{addr} (WS-RPC server)"), - ServerKind::WsHttp(addr) => write!(f, "{addr} (WS-HTTP-RPC server)"), - ServerKind::Auth(addr) => write!(f, "{addr} (AUTH server)"), + Self::Http(addr) => write!(f, "{addr} (HTTP-RPC server)"), + Self::WS(addr) => write!(f, "{addr} (WS-RPC server)"), + Self::WsHttp(addr) => write!(f, "{addr} (WS-HTTP-RPC server)"), + Self::Auth(addr) => write!(f, "{addr} (AUTH server)"), } } } @@ -73,11 +73,11 @@ pub enum RpcError { impl RpcError { /// Converts an [io::Error] to a more descriptive `RpcError`. - pub fn server_error(io_error: io::Error, kind: ServerKind) -> RpcError { + pub fn server_error(io_error: io::Error, kind: ServerKind) -> Self { if io_error.kind() == ErrorKind::AddrInUse { - return RpcError::AddressAlreadyInUse { kind, error: io_error } + return Self::AddressAlreadyInUse { kind, error: io_error } } - RpcError::ServerError { kind, error: io_error } + Self::ServerError { kind, error: io_error } } } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index dfc52b67e1a59..0545356410a35 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -564,7 +564,7 @@ where impl Default for RpcModuleBuilder<(), (), (), (), (), ()> { fn default() -> Self { - RpcModuleBuilder::new((), (), (), (), (), ()) + Self::new((), (), (), (), (), ()) } } @@ -605,7 +605,7 @@ impl RpcModuleConfigBuilder { /// Consumes the type and creates the [RpcModuleConfig] pub fn build(self) -> RpcModuleConfig { - let RpcModuleConfigBuilder { eth } = self; + let Self { eth } = self; RpcModuleConfig { eth: eth.unwrap_or_default() } } } @@ -640,14 +640,14 @@ impl RpcModuleSelection { /// Returns a selection of [RethRpcModule] with all [RethRpcModule::all_variants]. pub fn all_modules() -> Vec { - RpcModuleSelection::try_from_selection(RethRpcModule::all_variants().iter().copied()) + Self::try_from_selection(RethRpcModule::all_variants().iter().copied()) .expect("valid selection") .into_selection() } /// Returns the [RpcModuleSelection::STANDARD_MODULES] as a selection. pub fn standard_modules() -> Vec { - RpcModuleSelection::try_from_selection(RpcModuleSelection::STANDARD_MODULES.iter().copied()) + Self::try_from_selection(Self::STANDARD_MODULES.iter().copied()) .expect("valid selection") .into_selection() } @@ -704,13 +704,13 @@ impl RpcModuleSelection { s.push(item); } } - Ok(RpcModuleSelection::Selection(s)) + Ok(Self::Selection(s)) } /// Returns true if no selection is configured pub fn is_empty(&self) -> bool { match self { - RpcModuleSelection::Selection(sel) => sel.is_empty(), + Self::Selection(sel) => sel.is_empty(), _ => false, } } @@ -718,23 +718,23 @@ impl RpcModuleSelection { /// Returns an iterator over all configured [RethRpcModule] pub fn iter_selection(&self) -> Box + '_> { match self { - RpcModuleSelection::All => Box::new(Self::all_modules().into_iter()), - RpcModuleSelection::Standard => Box::new(Self::STANDARD_MODULES.iter().copied()), - RpcModuleSelection::Selection(s) => Box::new(s.iter().copied()), + Self::All => Box::new(Self::all_modules().into_iter()), + Self::Standard => Box::new(Self::STANDARD_MODULES.iter().copied()), + Self::Selection(s) => Box::new(s.iter().copied()), } } /// Returns the list of configured [RethRpcModule] pub fn into_selection(self) -> Vec { match self { - RpcModuleSelection::All => Self::all_modules(), - RpcModuleSelection::Selection(s) => s, - RpcModuleSelection::Standard => Self::STANDARD_MODULES.to_vec(), + Self::All => Self::all_modules(), + Self::Selection(s) => s, + Self::Standard => Self::STANDARD_MODULES.to_vec(), } } /// Returns true if both selections are identical. - fn are_identical(http: Option<&RpcModuleSelection>, ws: Option<&RpcModuleSelection>) -> bool { + fn are_identical(http: Option<&Self>, ws: Option<&Self>) -> bool { match (http, ws) { (Some(http), Some(ws)) => { let http = http.clone().iter_selection().collect::>(); @@ -755,7 +755,7 @@ where T: Into, { fn from(value: I) -> Self { - RpcModuleSelection::Selection(value.into_iter().map(Into::into).collect()) + Self::Selection(value.into_iter().map(Into::into).collect()) } } @@ -769,9 +769,9 @@ impl FromStr for RpcModuleSelection { let mut modules = s.split(',').map(str::trim).peekable(); let first = modules.peek().copied().ok_or(ParseError::VariantNotFound)?; match first { - "all" | "All" => Ok(RpcModuleSelection::All), + "all" | "All" => Ok(Self::All), "none" | "None" => Ok(Selection(vec![])), - _ => RpcModuleSelection::try_from_selection(modules), + _ => Self::try_from_selection(modules), } } } @@ -845,7 +845,7 @@ impl RethRpcModule { } /// Returns all variants of the enum - pub fn modules() -> impl IntoIterator { + pub fn modules() -> impl IntoIterator { use strum::IntoEnumIterator; Self::iter() } @@ -862,17 +862,17 @@ impl FromStr for RethRpcModule { fn from_str(s: &str) -> Result { Ok(match s { - "admin" => RethRpcModule::Admin, - "debug" => RethRpcModule::Debug, - "eth" => RethRpcModule::Eth, - "net" => RethRpcModule::Net, - "trace" => RethRpcModule::Trace, - "txpool" => RethRpcModule::Txpool, - "web3" => RethRpcModule::Web3, - "rpc" => RethRpcModule::Rpc, - "reth" => RethRpcModule::Reth, - "ots" => RethRpcModule::Ots, - "eth-call-bundle" | "eth_callBundle" => RethRpcModule::EthCallBundle, + "admin" => Self::Admin, + "debug" => Self::Debug, + "eth" => Self::Eth, + "net" => Self::Net, + "trace" => Self::Trace, + "txpool" => Self::Txpool, + "web3" => Self::Web3, + "rpc" => Self::Rpc, + "reth" => Self::Reth, + "ots" => Self::Ots, + "eth-call-bundle" | "eth_callBundle" => Self::EthCallBundle, _ => return Err(ParseError::VariantNotFound), }) } @@ -880,7 +880,7 @@ impl FromStr for RethRpcModule { impl TryFrom<&str> for RethRpcModule { type Error = ParseError; - fn try_from(s: &str) -> Result>::Error> { + fn try_from(s: &str) -> Result>::Error> { FromStr::from_str(s) } } @@ -1969,7 +1969,7 @@ impl WsHttpServers { let mut http_handle = None; let mut ws_handle = None; match self { - WsHttpServers::SamePort(server) => { + Self::SamePort(server) => { // Make sure http and ws modules are identical, since we currently can't run // different modules on same server config.ensure_ws_http_identical()?; @@ -1980,7 +1980,7 @@ impl WsHttpServers { ws_handle = Some(handle); } } - WsHttpServers::DifferentPort { http, ws } => { + Self::DifferentPort { http, ws } => { if let Some((server, module)) = http.and_then(|server| http_module.map(|module| (server, module))) { @@ -2015,8 +2015,8 @@ pub struct RpcServer { // === impl RpcServer === impl RpcServer { - fn empty() -> RpcServer { - RpcServer { ws_http: Default::default(), ipc: None } + fn empty() -> Self { + Self { ws_http: Default::default(), ipc: None } } /// Returns the [`SocketAddr`] of the http server if started. diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs index a0285622beed9..ade36ed21654d 100644 --- a/crates/rpc/rpc-builder/src/metrics.rs +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -185,9 +185,9 @@ impl RpcTransport { /// Returns the string representation of the transport protocol. pub(crate) const fn as_str(&self) -> &'static str { match self { - RpcTransport::Http => "http", - RpcTransport::WebSocket => "ws", - RpcTransport::Ipc => "ipc", + Self::Http => "http", + Self::WebSocket => "ws", + Self::Ipc => "ipc", } } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index c3713f731fad3..c09e996b28a99 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -564,7 +564,7 @@ where async fn new_payload_v1(&self, payload: ExecutionPayloadV1) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV1"); let start = Instant::now(); - let res = EngineApi::new_payload_v1(self, payload).await; + let res = Self::new_payload_v1(self, payload).await; self.inner.metrics.latency.new_payload_v1.record(start.elapsed()); self.inner.metrics.new_payload_response.update_response_metrics(&res); Ok(res?) @@ -575,7 +575,7 @@ where async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); let start = Instant::now(); - let res = EngineApi::new_payload_v2(self, payload).await; + let res = Self::new_payload_v2(self, payload).await; self.inner.metrics.latency.new_payload_v2.record(start.elapsed()); self.inner.metrics.new_payload_response.update_response_metrics(&res); Ok(res?) @@ -592,8 +592,7 @@ where trace!(target: "rpc::engine", "Serving engine_newPayloadV3"); let start = Instant::now(); let res = - EngineApi::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root) - .await; + Self::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root).await; self.inner.metrics.latency.new_payload_v3.record(start.elapsed()); self.inner.metrics.new_payload_response.update_response_metrics(&res); Ok(res?) @@ -608,8 +607,7 @@ where trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); let start = Instant::now(); let res = - EngineApi::new_payload_v4(self, payload, versioned_hashes, parent_beacon_block_root) - .await; + Self::new_payload_v4(self, payload, versioned_hashes, parent_beacon_block_root).await; self.inner.metrics.latency.new_payload_v4.record(start.elapsed()); self.inner.metrics.new_payload_response.update_response_metrics(&res); Ok(res?) @@ -626,8 +624,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_forkchoiceUpdatedV1"); let start = Instant::now(); - let res = - EngineApi::fork_choice_updated_v1(self, fork_choice_state, payload_attributes).await; + let res = Self::fork_choice_updated_v1(self, fork_choice_state, payload_attributes).await; self.inner.metrics.latency.fork_choice_updated_v1.record(start.elapsed()); self.inner.metrics.fcu_response.update_response_metrics(&res); Ok(res?) @@ -642,8 +639,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_forkchoiceUpdatedV2"); let start = Instant::now(); - let res = - EngineApi::fork_choice_updated_v2(self, fork_choice_state, payload_attributes).await; + let res = Self::fork_choice_updated_v2(self, fork_choice_state, payload_attributes).await; self.inner.metrics.latency.fork_choice_updated_v2.record(start.elapsed()); self.inner.metrics.fcu_response.update_response_metrics(&res); Ok(res?) @@ -659,8 +655,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_forkchoiceUpdatedV3"); let start = Instant::now(); - let res = - EngineApi::fork_choice_updated_v3(self, fork_choice_state, payload_attributes).await; + let res = Self::fork_choice_updated_v3(self, fork_choice_state, payload_attributes).await; self.inner.metrics.latency.fork_choice_updated_v3.record(start.elapsed()); self.inner.metrics.fcu_response.update_response_metrics(&res); Ok(res?) @@ -683,7 +678,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV1"); let start = Instant::now(); - let res = EngineApi::get_payload_v1(self, payload_id).await; + let res = Self::get_payload_v1(self, payload_id).await; self.inner.metrics.latency.get_payload_v1.record(start.elapsed()); Ok(res?) } @@ -703,7 +698,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV2"); let start = Instant::now(); - let res = EngineApi::get_payload_v2(self, payload_id).await; + let res = Self::get_payload_v2(self, payload_id).await; self.inner.metrics.latency.get_payload_v2.record(start.elapsed()); Ok(res?) } @@ -723,7 +718,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV3"); let start = Instant::now(); - let res = EngineApi::get_payload_v3(self, payload_id).await; + let res = Self::get_payload_v3(self, payload_id).await; self.inner.metrics.latency.get_payload_v3.record(start.elapsed()); Ok(res?) } @@ -743,7 +738,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV4"); let start = Instant::now(); - let res = EngineApi::get_payload_v4(self, payload_id).await; + let res = Self::get_payload_v4(self, payload_id).await; self.inner.metrics.latency.get_payload_v4.record(start.elapsed()); Ok(res?) } @@ -756,7 +751,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV1"); let start = Instant::now(); - let res = EngineApi::get_payload_bodies_by_hash(self, block_hashes); + let res = Self::get_payload_bodies_by_hash(self, block_hashes); self.inner.metrics.latency.get_payload_bodies_by_hash_v1.record(start.elapsed()); Ok(res?) } @@ -784,7 +779,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV1"); let start_time = Instant::now(); - let res = EngineApi::get_payload_bodies_by_range(self, start.to(), count.to()).await; + let res = Self::get_payload_bodies_by_range(self, start.to(), count.to()).await; self.inner.metrics.latency.get_payload_bodies_by_range_v1.record(start_time.elapsed()); Ok(res?) } @@ -797,7 +792,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_exchangeTransitionConfigurationV1"); let start = Instant::now(); - let res = EngineApi::exchange_transition_configuration(self, config).await; + let res = Self::exchange_transition_configuration(self, config).await; self.inner.metrics.latency.exchange_transition_configuration.record(start.elapsed()); Ok(res?) } @@ -809,7 +804,7 @@ where client: ClientVersionV1, ) -> RpcResult> { trace!(target: "rpc::engine", "Serving engine_getClientVersionV1"); - let res = EngineApi::get_client_version_v1(self, client).await; + let res = Self::get_client_version_v1(self, client).await; Ok(res?) } diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index f409c3ce3d971..de5a4ed2c7501 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -272,7 +272,7 @@ impl std::fmt::Display for JsTracerBuilder { impl From for GethDebugTracingOptions { fn from(b: JsTracerBuilder) -> Self { - GethDebugTracingOptions { + Self { tracer: Some(GethDebugTracerType::JsTracer(b.code())), tracer_config: serde_json::Value::Object(Default::default()).into(), ..Default::default() @@ -356,7 +356,7 @@ pub struct NoopJsTracer; impl From for GethDebugTracingOptions { fn from(_: NoopJsTracer) -> Self { - GethDebugTracingOptions { + Self { tracer: Some(GethDebugTracerType::JsTracer(NOOP_TRACER.to_string())), tracer_config: serde_json::Value::Object(Default::default()).into(), ..Default::default() diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index df90e813b84e2..64c84c11b926c 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -439,7 +439,7 @@ where /// * `client1` - The first RPC client. /// * `client2` - The second RPC client. pub fn new(client1: C1, client2: C2) -> Self { - RpcComparer { client1, client2 } + Self { client1, client2 } } /// Compares the `trace_block` responses from the two RPC clients. diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs index 5da5a5667daa3..61e79fab13d64 100644 --- a/crates/rpc/rpc-types/src/mev.rs +++ b/crates/rpc/rpc-types/src/mev.rs @@ -276,7 +276,7 @@ impl Serialize for PrivacyHint { impl<'de> Deserialize<'de> for PrivacyHint { fn deserialize>(deserializer: D) -> Result { let hints = Vec::::deserialize(deserializer)?; - let mut privacy_hint = PrivacyHint::default(); + let mut privacy_hint = Self::default(); for hint in hints { match hint.as_str() { "calldata" => privacy_hint.calldata = true, @@ -489,22 +489,22 @@ pub enum BundleStats { impl Serialize for BundleStats { fn serialize(&self, serializer: S) -> Result { match self { - BundleStats::Unknown => serde_json::json!({"isSimulated": false}).serialize(serializer), - BundleStats::Seen(stats) => stats.serialize(serializer), - BundleStats::Simulated(stats) => stats.serialize(serializer), + Self::Unknown => serde_json::json!({"isSimulated": false}).serialize(serializer), + Self::Seen(stats) => stats.serialize(serializer), + Self::Simulated(stats) => stats.serialize(serializer), } } } impl<'de> Deserialize<'de> for BundleStats { - fn deserialize(deserializer: D) -> Result + fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { let map = serde_json::Map::deserialize(deserializer)?; if map.get("receivedAt").is_none() { - Ok(BundleStats::Unknown) + Ok(Self::Unknown) } else if map["isSimulated"] == false { StatsSeen::deserialize(serde_json::Value::Object(map)) .map(BundleStats::Seen) diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 6f3125e068f78..9e8a0b8aa2738 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -25,7 +25,7 @@ pub struct AdminApi { impl AdminApi { /// Creates a new instance of `AdminApi`. pub fn new(network: N, chain_spec: Arc) -> Self { - AdminApi { network, chain_spec } + Self { network, chain_spec } } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index f5c9ed2fe4d4e..01b3fb10108a2 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -710,7 +710,7 @@ where opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(DebugApi::debug_trace_raw_block(self, rlp_block, opts.unwrap_or_default()).await?) + Ok(Self::debug_trace_raw_block(self, rlp_block, opts.unwrap_or_default()).await?) } /// Handler for `debug_traceBlockByHash` @@ -720,7 +720,7 @@ where opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(DebugApi::debug_trace_block(self, block.into(), opts.unwrap_or_default()).await?) + Ok(Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()).await?) } /// Handler for `debug_traceBlockByNumber` @@ -730,7 +730,7 @@ where opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(DebugApi::debug_trace_block(self, block.into(), opts.unwrap_or_default()).await?) + Ok(Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()).await?) } /// Handler for `debug_traceTransaction` @@ -740,7 +740,7 @@ where opts: Option, ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Ok(DebugApi::debug_trace_transaction(self, tx_hash, opts.unwrap_or_default()).await?) + Ok(Self::debug_trace_transaction(self, tx_hash, opts.unwrap_or_default()).await?) } /// Handler for `debug_traceCall` @@ -751,8 +751,7 @@ where opts: Option, ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Ok(DebugApi::debug_trace_call(self, request, block_number, opts.unwrap_or_default()) - .await?) + Ok(Self::debug_trace_call(self, request, block_number, opts.unwrap_or_default()).await?) } async fn debug_trace_call_many( @@ -762,7 +761,7 @@ where opts: Option, ) -> RpcResult>> { let _permit = self.acquire_trace_permit().await; - Ok(DebugApi::debug_trace_call_many(self, bundles, state_context, opts).await?) + Ok(Self::debug_trace_call_many(self, bundles, state_context, opts).await?) } async fn debug_backtrace_at(&self, _location: &str) -> RpcResult<()> { diff --git a/crates/rpc/rpc/src/eth/api/fee_history.rs b/crates/rpc/rpc/src/eth/api/fee_history.rs index 487dade17859d..9184607bc08f7 100644 --- a/crates/rpc/rpc/src/eth/api/fee_history.rs +++ b/crates/rpc/rpc/src/eth/api/fee_history.rs @@ -178,7 +178,7 @@ pub struct FeeHistoryCacheConfig { impl Default for FeeHistoryCacheConfig { fn default() -> Self { - FeeHistoryCacheConfig { max_blocks: MAX_HEADER_HISTORY + 100, resolution: 4 } + Self { max_blocks: MAX_HEADER_HISTORY + 100, resolution: 4 } } } @@ -352,7 +352,7 @@ impl FeeHistoryEntry { /// /// Note: This does not calculate the rewards for the block. pub fn new(block: &SealedBlock) -> Self { - FeeHistoryEntry { + Self { base_fee_per_gas: block.base_fee_per_gas.unwrap_or_default(), gas_used_ratio: block.gas_used as f64 / block.gas_limit as f64, base_fee_per_blob_gas: block.blob_fee(), diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index b58949a67548b..a53542fa437ac 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -336,13 +336,13 @@ pub(crate) enum PendingBlockEnvOrigin { impl PendingBlockEnvOrigin { /// Returns true if the origin is the actual pending block as received from the CL. pub(crate) fn is_actual_pending(&self) -> bool { - matches!(self, PendingBlockEnvOrigin::ActualPending(_)) + matches!(self, Self::ActualPending(_)) } /// Consumes the type and returns the actual pending block. pub(crate) fn into_actual_pending(self) -> Option { match self { - PendingBlockEnvOrigin::ActualPending(block) => Some(block), + Self::ActualPending(block) => Some(block), _ => None, } } @@ -353,8 +353,8 @@ impl PendingBlockEnvOrigin { /// identify the block by its hash (latest block). pub(crate) fn state_block_id(&self) -> BlockId { match self { - PendingBlockEnvOrigin::ActualPending(_) => BlockNumberOrTag::Pending.into(), - PendingBlockEnvOrigin::DerivedFromLatest(header) => BlockId::Hash(header.hash().into()), + Self::ActualPending(_) => BlockNumberOrTag::Pending.into(), + Self::DerivedFromLatest(header) => BlockId::Hash(header.hash().into()), } } @@ -364,16 +364,16 @@ impl PendingBlockEnvOrigin { /// For the [PendingBlockEnvOrigin::DerivedFromLatest] this is the hash of the _latest_ header. fn build_target_hash(&self) -> B256 { match self { - PendingBlockEnvOrigin::ActualPending(block) => block.parent_hash, - PendingBlockEnvOrigin::DerivedFromLatest(header) => header.hash(), + Self::ActualPending(block) => block.parent_hash, + Self::DerivedFromLatest(header) => header.hash(), } } /// Returns the header this pending block is based on. pub(crate) fn header(&self) -> &SealedHeader { match self { - PendingBlockEnvOrigin::ActualPending(block) => &block.header, - PendingBlockEnvOrigin::DerivedFromLatest(header) => header, + Self::ActualPending(block) => &block.header, + Self::DerivedFromLatest(header) => header, } } } diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 7fd358c1328cb..969cd01677175 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -86,7 +86,7 @@ where /// Handler for: `eth_getBlockByHash` async fn block_by_hash(&self, hash: B256, full: bool) -> Result> { trace!(target: "rpc::eth", ?hash, ?full, "Serving eth_getBlockByHash"); - Ok(EthApi::rpc_block(self, hash, full).await?) + Ok(Self::rpc_block(self, hash, full).await?) } /// Handler for: `eth_getBlockByNumber` @@ -96,13 +96,13 @@ where full: bool, ) -> Result> { trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber"); - Ok(EthApi::rpc_block(self, number, full).await?) + Ok(Self::rpc_block(self, number, full).await?) } /// Handler for: `eth_getBlockTransactionCountByHash` async fn block_transaction_count_by_hash(&self, hash: B256) -> Result> { trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash"); - Ok(EthApi::block_transaction_count(self, hash).await?.map(U256::from)) + Ok(Self::block_transaction_count(self, hash).await?.map(U256::from)) } /// Handler for: `eth_getBlockTransactionCountByNumber` @@ -111,19 +111,19 @@ where number: BlockNumberOrTag, ) -> Result> { trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber"); - Ok(EthApi::block_transaction_count(self, number).await?.map(U256::from)) + Ok(Self::block_transaction_count(self, number).await?.map(U256::from)) } /// Handler for: `eth_getUncleCountByBlockHash` async fn block_uncles_count_by_hash(&self, hash: B256) -> Result> { trace!(target: "rpc::eth", ?hash, "Serving eth_getUncleCountByBlockHash"); - Ok(EthApi::ommers(self, hash)?.map(|ommers| U256::from(ommers.len()))) + Ok(Self::ommers(self, hash)?.map(|ommers| U256::from(ommers.len()))) } /// Handler for: `eth_getUncleCountByBlockNumber` async fn block_uncles_count_by_number(&self, number: BlockNumberOrTag) -> Result> { trace!(target: "rpc::eth", ?number, "Serving eth_getUncleCountByBlockNumber"); - Ok(EthApi::ommers(self, number)?.map(|ommers| U256::from(ommers.len()))) + Ok(Self::ommers(self, number)?.map(|ommers| U256::from(ommers.len()))) } /// Handler for: `eth_getBlockReceipts` @@ -132,7 +132,7 @@ where block_id: BlockId, ) -> Result>> { trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts"); - Ok(EthApi::block_receipts(self, block_id).await?) + Ok(Self::block_receipts(self, block_id).await?) } /// Handler for: `eth_getUncleByBlockHashAndIndex` @@ -142,7 +142,7 @@ where index: Index, ) -> Result> { trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getUncleByBlockHashAndIndex"); - Ok(EthApi::ommer_by_block_and_index(self, hash, index).await?) + Ok(Self::ommer_by_block_and_index(self, hash, index).await?) } /// Handler for: `eth_getUncleByBlockNumberAndIndex` @@ -152,7 +152,7 @@ where index: Index, ) -> Result> { trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getUncleByBlockNumberAndIndex"); - Ok(EthApi::ommer_by_block_and_index(self, number, index).await?) + Ok(Self::ommer_by_block_and_index(self, number, index).await?) } /// Handler for: `eth_getRawTransactionByHash` @@ -174,7 +174,7 @@ where index: Index, ) -> Result> { trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex"); - Ok(EthApi::raw_transaction_by_block_and_tx_index(self, hash, index).await?) + Ok(Self::raw_transaction_by_block_and_tx_index(self, hash, index).await?) } /// Handler for: `eth_getTransactionByBlockHashAndIndex` @@ -184,7 +184,7 @@ where index: Index, ) -> Result> { trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getTransactionByBlockHashAndIndex"); - Ok(EthApi::transaction_by_block_and_tx_index(self, hash, index).await?) + Ok(Self::transaction_by_block_and_tx_index(self, hash, index).await?) } /// Handler for: `eth_getRawTransactionByBlockNumberAndIndex` @@ -194,7 +194,7 @@ where index: Index, ) -> Result> { trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getRawTransactionByBlockNumberAndIndex"); - Ok(EthApi::raw_transaction_by_block_and_tx_index(self, number, index).await?) + Ok(Self::raw_transaction_by_block_and_tx_index(self, number, index).await?) } /// Handler for: `eth_getTransactionByBlockNumberAndIndex` @@ -204,7 +204,7 @@ where index: Index, ) -> Result> { trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getTransactionByBlockNumberAndIndex"); - Ok(EthApi::transaction_by_block_and_tx_index(self, number, index).await?) + Ok(Self::transaction_by_block_and_tx_index(self, number, index).await?) } /// Handler for: `eth_getTransactionReceipt` @@ -257,13 +257,13 @@ where /// Handler for: `eth_getHeaderByNumber` async fn header_by_number(&self, block_number: BlockNumberOrTag) -> Result> { trace!(target: "rpc::eth", ?block_number, "Serving eth_getHeaderByNumber"); - Ok(EthApi::rpc_block_header(self, block_number).await?) + Ok(Self::rpc_block_header(self, block_number).await?) } /// Handler for: `eth_getHeaderByHash` async fn header_by_hash(&self, hash: B256) -> Result> { trace!(target: "rpc::eth", ?hash, "Serving eth_getHeaderByHash"); - Ok(EthApi::rpc_block_header(self, hash).await?) + Ok(Self::rpc_block_header(self, hash).await?) } /// Handler for: `eth_call` @@ -288,7 +288,7 @@ where state_override: Option, ) -> Result> { trace!(target: "rpc::eth", ?bundle, ?state_context, ?state_override, "Serving eth_callMany"); - Ok(EthApi::call_many(self, bundle, state_context, state_override).await?) + Ok(Self::call_many(self, bundle, state_context, state_override).await?) } /// Handler for: `eth_createAccessList` @@ -317,19 +317,19 @@ where /// Handler for: `eth_gasPrice` async fn gas_price(&self) -> Result { trace!(target: "rpc::eth", "Serving eth_gasPrice"); - return Ok(EthApi::gas_price(self).await?) + return Ok(Self::gas_price(self).await?) } /// Handler for: `eth_maxPriorityFeePerGas` async fn max_priority_fee_per_gas(&self) -> Result { trace!(target: "rpc::eth", "Serving eth_maxPriorityFeePerGas"); - return Ok(EthApi::suggested_priority_fee(self).await?) + return Ok(Self::suggested_priority_fee(self).await?) } /// Handler for: `eth_blobBaseFee` async fn blob_base_fee(&self) -> Result { trace!(target: "rpc::eth", "Serving eth_blobBaseFee"); - return Ok(EthApi::blob_base_fee(self).await?) + return Ok(Self::blob_base_fee(self).await?) } // FeeHistory is calculated based on lazy evaluation of fees for historical blocks, and further @@ -348,7 +348,7 @@ where reward_percentiles: Option>, ) -> Result { trace!(target: "rpc::eth", ?block_count, ?newest_block, ?reward_percentiles, "Serving eth_feeHistory"); - return Ok(EthApi::fee_history(self, block_count, newest_block, reward_percentiles).await?) + return Ok(Self::fee_history(self, block_count, newest_block, reward_percentiles).await?) } /// Handler for: `eth_mining` @@ -391,7 +391,7 @@ where /// Handler for: `eth_sign` async fn sign(&self, address: Address, message: Bytes) -> Result { trace!(target: "rpc::eth", ?address, ?message, "Serving eth_sign"); - Ok(EthApi::sign(self, address, message).await?) + Ok(Self::sign(self, address, message).await?) } /// Handler for: `eth_signTransaction` @@ -402,7 +402,7 @@ where /// Handler for: `eth_signTypedData` async fn sign_typed_data(&self, address: Address, data: Value) -> Result { trace!(target: "rpc::eth", ?address, ?data, "Serving eth_signTypedData"); - Ok(EthApi::sign_typed_data(self, data, address)?) + Ok(Self::sign_typed_data(self, data, address)?) } /// Handler for: `eth_getProof` @@ -413,7 +413,7 @@ where block_number: Option, ) -> Result { trace!(target: "rpc::eth", ?address, ?keys, ?block_number, "Serving eth_getProof"); - let res = EthApi::get_proof(self, address, keys, block_number).await; + let res = Self::get_proof(self, address, keys, block_number).await; Ok(res.map_err(|e| match e { EthApiError::InvalidBlockRange => { diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 6e01ce3a2c57b..d9be1e91e5176 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -1623,7 +1623,7 @@ impl TransactionSource { /// Returns the transaction and block related info, if not pending pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { match self { - TransactionSource::Pool(tx) => { + Self::Pool(tx) => { let hash = tx.hash(); ( tx, @@ -1636,7 +1636,7 @@ impl TransactionSource { }, ) } - TransactionSource::Block { transaction, index, block_hash, block_number, base_fee } => { + Self::Block { transaction, index, block_hash, block_number, base_fee } => { let hash = transaction.hash(); ( transaction, diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 0523141eb2488..2135b43a68154 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -216,7 +216,7 @@ where Eth: EthTransactions + 'static, { async fn call_bundle(&self, request: EthCallBundle) -> RpcResult { - Ok(EthBundle::call_bundle(self, request).await?) + Ok(Self::call_bundle(self, request).await?) } } diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index a3e4011073183..4dbf302275e39 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -92,7 +92,7 @@ impl EthStateCache { rate_limiter: Arc::new(Semaphore::new(max_concurrent_db_operations)), evm_config, }; - let cache = EthStateCache { to_service }; + let cache = Self { to_service }; (cache, service) } diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index ff12130b19f82..42a8d3624f004 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -127,7 +127,7 @@ pub enum EthApiError { impl EthApiError { /// crates a new [EthApiError::Other] variant. pub fn other(err: E) -> Self { - EthApiError::Other(Box::new(err)) + Self::Other(Box::new(err)) } } @@ -176,10 +176,8 @@ impl From for ErrorObject<'static> { impl From for EthApiError { fn from(error: JsInspectorError) -> Self { match error { - err @ JsInspectorError::JsError(_) => { - EthApiError::InternalJsTracerError(err.to_string()) - } - err => EthApiError::InvalidParams(err.to_string()), + err @ JsInspectorError::JsError(_) => Self::InternalJsTracerError(err.to_string()), + err => Self::InvalidParams(err.to_string()), } } } @@ -188,7 +186,7 @@ impl From for EthApiError { fn from(error: RethError) -> Self { match error { RethError::Provider(err) => err.into(), - err => EthApiError::Internal(err), + err => Self::Internal(err), } } } @@ -202,28 +200,26 @@ impl From for EthApiError { ProviderError::BestBlockNotFound | ProviderError::BlockNumberForTransactionIndexNotFound | ProviderError::TotalDifficultyNotFound { .. } | - ProviderError::UnknownBlockHash(_) => EthApiError::UnknownBlockNumber, + ProviderError::UnknownBlockHash(_) => Self::UnknownBlockNumber, ProviderError::FinalizedBlockNotFound | ProviderError::SafeBlockNotFound => { - EthApiError::UnknownSafeOrFinalizedBlock + Self::UnknownSafeOrFinalizedBlock } - err => EthApiError::Internal(err.into()), + err => Self::Internal(err.into()), } } } impl From> for EthApiError where - T: Into, + T: Into, { fn from(err: EVMError) -> Self { match err { EVMError::Transaction(err) => RpcInvalidTransactionError::from(err).into(), - EVMError::Header(InvalidHeader::PrevrandaoNotSet) => EthApiError::PrevrandaoNotSet, - EVMError::Header(InvalidHeader::ExcessBlobGasNotSet) => { - EthApiError::ExcessBlobGasNotSet - } + EVMError::Header(InvalidHeader::PrevrandaoNotSet) => Self::PrevrandaoNotSet, + EVMError::Header(InvalidHeader::ExcessBlobGasNotSet) => Self::ExcessBlobGasNotSet, EVMError::Database(err) => err.into(), - EVMError::Custom(err) => EthApiError::EvmCustom(err), + EVMError::Custom(err) => Self::EvmCustom(err), } } } @@ -373,10 +369,10 @@ impl RpcInvalidTransactionError { /// Returns the rpc error code for this error. fn error_code(&self) -> i32 { match self { - RpcInvalidTransactionError::InvalidChainId | - RpcInvalidTransactionError::GasTooLow | - RpcInvalidTransactionError::GasTooHigh => EthRpcErrorCode::InvalidInput.code(), - RpcInvalidTransactionError::Revert(_) => EthRpcErrorCode::ExecutionError.code(), + Self::InvalidChainId | Self::GasTooLow | Self::GasTooHigh => { + EthRpcErrorCode::InvalidInput.code() + } + Self::Revert(_) => EthRpcErrorCode::ExecutionError.code(), _ => EthRpcErrorCode::TransactionRejected.code(), } } @@ -386,22 +382,20 @@ impl RpcInvalidTransactionError { /// Takes the configured gas limit of the transaction which is attached to the error pub(crate) fn halt(reason: HaltReason, gas_limit: u64) -> Self { match reason { - HaltReason::OutOfGas(err) => RpcInvalidTransactionError::out_of_gas(err, gas_limit), - HaltReason::NonceOverflow => RpcInvalidTransactionError::NonceMaxValue, - err => RpcInvalidTransactionError::EvmHalt(err), + HaltReason::OutOfGas(err) => Self::out_of_gas(err, gas_limit), + HaltReason::NonceOverflow => Self::NonceMaxValue, + err => Self::EvmHalt(err), } } /// Converts the out of gas error pub(crate) fn out_of_gas(reason: OutOfGasError, gas_limit: u64) -> Self { match reason { - OutOfGasError::Basic => RpcInvalidTransactionError::BasicOutOfGas(gas_limit), - OutOfGasError::Memory => RpcInvalidTransactionError::MemoryOutOfGas(gas_limit), - OutOfGasError::Precompile => RpcInvalidTransactionError::PrecompileOutOfGas(gas_limit), - OutOfGasError::InvalidOperand => { - RpcInvalidTransactionError::InvalidOperandOutOfGas(gas_limit) - } - OutOfGasError::MemoryLimit => RpcInvalidTransactionError::MemoryOutOfGas(gas_limit), + OutOfGasError::Basic => Self::BasicOutOfGas(gas_limit), + OutOfGasError::Memory => Self::MemoryOutOfGas(gas_limit), + OutOfGasError::Precompile => Self::PrecompileOutOfGas(gas_limit), + OutOfGasError::InvalidOperand => Self::InvalidOperandOutOfGas(gas_limit), + OutOfGasError::MemoryLimit => Self::MemoryOutOfGas(gas_limit), } } } @@ -426,66 +420,36 @@ impl From for RpcInvalidTransactionError { fn from(err: revm::primitives::InvalidTransaction) -> Self { use revm::primitives::InvalidTransaction; match err { - InvalidTransaction::InvalidChainId => RpcInvalidTransactionError::InvalidChainId, - InvalidTransaction::PriorityFeeGreaterThanMaxFee => { - RpcInvalidTransactionError::TipAboveFeeCap - } - InvalidTransaction::GasPriceLessThanBasefee => RpcInvalidTransactionError::FeeCapTooLow, - InvalidTransaction::CallerGasLimitMoreThanBlock => { - RpcInvalidTransactionError::GasTooHigh - } - InvalidTransaction::CallGasCostMoreThanGasLimit => { - RpcInvalidTransactionError::GasTooHigh - } - InvalidTransaction::RejectCallerWithCode => RpcInvalidTransactionError::SenderNoEOA, - InvalidTransaction::LackOfFundForMaxFee { .. } => { - RpcInvalidTransactionError::InsufficientFunds - } - InvalidTransaction::OverflowPaymentInTransaction => { - RpcInvalidTransactionError::GasUintOverflow - } - InvalidTransaction::NonceOverflowInTransaction => { - RpcInvalidTransactionError::NonceMaxValue - } - InvalidTransaction::CreateInitCodeSizeLimit => { - RpcInvalidTransactionError::MaxInitCodeSizeExceeded - } - InvalidTransaction::NonceTooHigh { .. } => RpcInvalidTransactionError::NonceTooHigh, - InvalidTransaction::NonceTooLow { .. } => RpcInvalidTransactionError::NonceTooLow, - InvalidTransaction::AccessListNotSupported => { - RpcInvalidTransactionError::AccessListNotSupported - } - InvalidTransaction::MaxFeePerBlobGasNotSupported => { - RpcInvalidTransactionError::MaxFeePerBlobGasNotSupported - } + InvalidTransaction::InvalidChainId => Self::InvalidChainId, + InvalidTransaction::PriorityFeeGreaterThanMaxFee => Self::TipAboveFeeCap, + InvalidTransaction::GasPriceLessThanBasefee => Self::FeeCapTooLow, + InvalidTransaction::CallerGasLimitMoreThanBlock => Self::GasTooHigh, + InvalidTransaction::CallGasCostMoreThanGasLimit => Self::GasTooHigh, + InvalidTransaction::RejectCallerWithCode => Self::SenderNoEOA, + InvalidTransaction::LackOfFundForMaxFee { .. } => Self::InsufficientFunds, + InvalidTransaction::OverflowPaymentInTransaction => Self::GasUintOverflow, + InvalidTransaction::NonceOverflowInTransaction => Self::NonceMaxValue, + InvalidTransaction::CreateInitCodeSizeLimit => Self::MaxInitCodeSizeExceeded, + InvalidTransaction::NonceTooHigh { .. } => Self::NonceTooHigh, + InvalidTransaction::NonceTooLow { .. } => Self::NonceTooLow, + InvalidTransaction::AccessListNotSupported => Self::AccessListNotSupported, + InvalidTransaction::MaxFeePerBlobGasNotSupported => Self::MaxFeePerBlobGasNotSupported, InvalidTransaction::BlobVersionedHashesNotSupported => { - RpcInvalidTransactionError::BlobVersionedHashesNotSupported - } - InvalidTransaction::BlobGasPriceGreaterThanMax => { - RpcInvalidTransactionError::BlobFeeCapTooLow - } - InvalidTransaction::EmptyBlobs => { - RpcInvalidTransactionError::BlobTransactionMissingBlobHashes - } - InvalidTransaction::BlobVersionNotSupported => { - RpcInvalidTransactionError::BlobHashVersionMismatch - } - InvalidTransaction::TooManyBlobs { max, have } => { - RpcInvalidTransactionError::TooManyBlobs { max, have } - } - InvalidTransaction::BlobCreateTransaction => { - RpcInvalidTransactionError::BlobTransactionIsCreate + Self::BlobVersionedHashesNotSupported } + InvalidTransaction::BlobGasPriceGreaterThanMax => Self::BlobFeeCapTooLow, + InvalidTransaction::EmptyBlobs => Self::BlobTransactionMissingBlobHashes, + InvalidTransaction::BlobVersionNotSupported => Self::BlobHashVersionMismatch, + InvalidTransaction::TooManyBlobs { max, have } => Self::TooManyBlobs { max, have }, + InvalidTransaction::BlobCreateTransaction => Self::BlobTransactionIsCreate, #[cfg(feature = "optimism")] InvalidTransaction::DepositSystemTxPostRegolith => { - RpcInvalidTransactionError::Optimism( - OptimismInvalidTransactionError::DepositSystemTxPostRegolith, - ) + Self::Optimism(OptimismInvalidTransactionError::DepositSystemTxPostRegolith) } #[cfg(feature = "optimism")] - InvalidTransaction::HaltedDepositPostRegolith => RpcInvalidTransactionError::Optimism( - OptimismInvalidTransactionError::HaltedDepositPostRegolith, - ), + InvalidTransaction::HaltedDepositPostRegolith => { + Self::Optimism(OptimismInvalidTransactionError::HaltedDepositPostRegolith) + } // TODO(EOF) InvalidTransaction::EofCrateShouldHaveToAddress => todo!("EOF"), } @@ -498,31 +462,23 @@ impl From for RpcInvalidTransactionErr // This conversion is used to convert any transaction errors that could occur inside the // txpool (e.g. `eth_sendRawTransaction`) to their corresponding RPC match err { - InvalidTransactionError::InsufficientFunds { .. } => { - RpcInvalidTransactionError::InsufficientFunds - } - InvalidTransactionError::NonceNotConsistent => RpcInvalidTransactionError::NonceTooLow, + InvalidTransactionError::InsufficientFunds { .. } => Self::InsufficientFunds, + InvalidTransactionError::NonceNotConsistent => Self::NonceTooLow, InvalidTransactionError::OldLegacyChainId => { // Note: this should be unreachable since Spurious Dragon now enabled - RpcInvalidTransactionError::OldLegacyChainId + Self::OldLegacyChainId } - InvalidTransactionError::ChainIdMismatch => RpcInvalidTransactionError::InvalidChainId, + InvalidTransactionError::ChainIdMismatch => Self::InvalidChainId, InvalidTransactionError::Eip2930Disabled | InvalidTransactionError::Eip1559Disabled | - InvalidTransactionError::Eip4844Disabled => { - RpcInvalidTransactionError::TxTypeNotSupported - } - InvalidTransactionError::TxTypeNotSupported => { - RpcInvalidTransactionError::TxTypeNotSupported - } - InvalidTransactionError::GasUintOverflow => RpcInvalidTransactionError::GasUintOverflow, - InvalidTransactionError::GasTooLow => RpcInvalidTransactionError::GasTooLow, - InvalidTransactionError::GasTooHigh => RpcInvalidTransactionError::GasTooHigh, - InvalidTransactionError::TipAboveFeeCap => RpcInvalidTransactionError::TipAboveFeeCap, - InvalidTransactionError::FeeCapTooLow => RpcInvalidTransactionError::FeeCapTooLow, - InvalidTransactionError::SignerAccountHasBytecode => { - RpcInvalidTransactionError::SenderNoEOA - } + InvalidTransactionError::Eip4844Disabled => Self::TxTypeNotSupported, + InvalidTransactionError::TxTypeNotSupported => Self::TxTypeNotSupported, + InvalidTransactionError::GasUintOverflow => Self::GasUintOverflow, + InvalidTransactionError::GasTooLow => Self::GasTooLow, + InvalidTransactionError::GasTooHigh => Self::GasTooHigh, + InvalidTransactionError::TipAboveFeeCap => Self::TipAboveFeeCap, + InvalidTransactionError::FeeCapTooLow => Self::FeeCapTooLow, + InvalidTransactionError::SignerAccountHasBytecode => Self::SenderNoEOA, } } } @@ -629,39 +585,37 @@ impl From for ErrorObject<'static> { } impl From for RpcPoolError { - fn from(err: PoolError) -> RpcPoolError { + fn from(err: PoolError) -> Self { match err.kind { - PoolErrorKind::ReplacementUnderpriced => RpcPoolError::ReplaceUnderpriced, - PoolErrorKind::FeeCapBelowMinimumProtocolFeeCap(_) => RpcPoolError::Underpriced, - PoolErrorKind::SpammerExceededCapacity(_) => RpcPoolError::TxPoolOverflow, - PoolErrorKind::DiscardedOnInsert => RpcPoolError::TxPoolOverflow, + PoolErrorKind::ReplacementUnderpriced => Self::ReplaceUnderpriced, + PoolErrorKind::FeeCapBelowMinimumProtocolFeeCap(_) => Self::Underpriced, + PoolErrorKind::SpammerExceededCapacity(_) => Self::TxPoolOverflow, + PoolErrorKind::DiscardedOnInsert => Self::TxPoolOverflow, PoolErrorKind::InvalidTransaction(err) => err.into(), - PoolErrorKind::Other(err) => RpcPoolError::Other(err), - PoolErrorKind::AlreadyImported => RpcPoolError::AlreadyKnown, - PoolErrorKind::ExistingConflictingTransactionType(_, _) => { - RpcPoolError::AddressAlreadyReserved - } + PoolErrorKind::Other(err) => Self::Other(err), + PoolErrorKind::AlreadyImported => Self::AlreadyKnown, + PoolErrorKind::ExistingConflictingTransactionType(_, _) => Self::AddressAlreadyReserved, } } } impl From for RpcPoolError { - fn from(err: InvalidPoolTransactionError) -> RpcPoolError { + fn from(err: InvalidPoolTransactionError) -> Self { match err { - InvalidPoolTransactionError::Consensus(err) => RpcPoolError::Invalid(err.into()), - InvalidPoolTransactionError::ExceedsGasLimit(_, _) => RpcPoolError::ExceedsGasLimit, + InvalidPoolTransactionError::Consensus(err) => Self::Invalid(err.into()), + InvalidPoolTransactionError::ExceedsGasLimit(_, _) => Self::ExceedsGasLimit, InvalidPoolTransactionError::ExceedsMaxInitCodeSize(_, _) => { - RpcPoolError::ExceedsMaxInitCodeSize + Self::ExceedsMaxInitCodeSize } InvalidPoolTransactionError::IntrinsicGasTooLow => { - RpcPoolError::Invalid(RpcInvalidTransactionError::GasTooLow) + Self::Invalid(RpcInvalidTransactionError::GasTooLow) } - InvalidPoolTransactionError::OversizedData(_, _) => RpcPoolError::OversizedData, - InvalidPoolTransactionError::Underpriced => RpcPoolError::Underpriced, - InvalidPoolTransactionError::Other(err) => RpcPoolError::PoolTransactionError(err), - InvalidPoolTransactionError::Eip4844(err) => RpcPoolError::Eip4844(err), + InvalidPoolTransactionError::OversizedData(_, _) => Self::OversizedData, + InvalidPoolTransactionError::Underpriced => Self::Underpriced, + InvalidPoolTransactionError::Other(err) => Self::PoolTransactionError(err), + InvalidPoolTransactionError::Eip4844(err) => Self::Eip4844(err), InvalidPoolTransactionError::Overdraft => { - RpcPoolError::Invalid(RpcInvalidTransactionError::InsufficientFunds) + Self::Invalid(RpcInvalidTransactionError::InsufficientFunds) } } } @@ -669,7 +623,7 @@ impl From for RpcPoolError { impl From for EthApiError { fn from(err: PoolError) -> Self { - EthApiError::PoolError(RpcPoolError::from(err)) + Self::PoolError(RpcPoolError::from(err)) } } diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index b14c20451c7ec..d0d7077e85ebf 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -269,7 +269,7 @@ where /// Handler for `eth_getFilterChanges` async fn filter_changes(&self, id: FilterId) -> RpcResult { trace!(target: "rpc::eth", "Serving eth_getFilterChanges"); - Ok(EthFilter::filter_changes(self, id).await?) + Ok(Self::filter_changes(self, id).await?) } /// Returns an array of all logs matching filter with given id. @@ -279,7 +279,7 @@ where /// Handler for `eth_getFilterLogs` async fn filter_logs(&self, id: FilterId) -> RpcResult> { trace!(target: "rpc::eth", "Serving eth_getFilterLogs"); - Ok(EthFilter::filter_logs(self, id).await?) + Ok(Self::filter_logs(self, id).await?) } /// Handler for `eth_uninstallFilter` @@ -588,7 +588,7 @@ struct PendingTransactionsReceiver { impl PendingTransactionsReceiver { fn new(receiver: Receiver) -> Self { - PendingTransactionsReceiver { txs_receiver: Arc::new(Mutex::new(receiver)) } + Self { txs_receiver: Arc::new(Mutex::new(receiver)) } } /// Returns all new pending transactions received since the last poll. @@ -617,7 +617,7 @@ where { /// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream. fn new(stream: NewSubpoolTransactionStream) -> Self { - FullTransactionsReceiver { txs_stream: Arc::new(Mutex::new(stream)) } + Self { txs_stream: Arc::new(Mutex::new(stream)) } } /// Returns all new pending transactions received since the last poll. @@ -646,7 +646,7 @@ where T: PoolTransaction + 'static, { async fn drain(&self) -> FilterChanges { - FullTransactionsReceiver::drain(self).await + Self::drain(self).await } } @@ -664,8 +664,8 @@ enum PendingTransactionKind { impl PendingTransactionKind { async fn drain(&self) -> FilterChanges { match self { - PendingTransactionKind::Hashes(receiver) => receiver.drain().await, - PendingTransactionKind::FullTransaction(receiver) => receiver.drain().await, + Self::Hashes(receiver) => receiver.drain().await, + Self::FullTransaction(receiver) => receiver.drain().await, } } } @@ -717,7 +717,7 @@ impl From for jsonrpsee::types::error::ErrorObject<'static> { impl From for FilterError { fn from(err: ProviderError) -> Self { - FilterError::EthAPIError(err.into()) + Self::EthAPIError(err.into()) } } diff --git a/crates/rpc/rpc/src/eth/gas_oracle.rs b/crates/rpc/rpc/src/eth/gas_oracle.rs index e9d04d67ee3bf..592c858d8d3cc 100644 --- a/crates/rpc/rpc/src/eth/gas_oracle.rs +++ b/crates/rpc/rpc/src/eth/gas_oracle.rs @@ -61,7 +61,7 @@ pub struct GasPriceOracleConfig { impl Default for GasPriceOracleConfig { fn default() -> Self { - GasPriceOracleConfig { + Self { blocks: DEFAULT_GAS_PRICE_BLOCKS, percentile: DEFAULT_GAS_PRICE_PERCENTILE, max_header_history: MAX_HEADER_HISTORY, diff --git a/crates/rpc/rpc/src/eth/optimism.rs b/crates/rpc/rpc/src/eth/optimism.rs index 24f6f36ff4635..fb1665b957854 100644 --- a/crates/rpc/rpc/src/eth/optimism.rs +++ b/crates/rpc/rpc/src/eth/optimism.rs @@ -20,15 +20,13 @@ pub enum OptimismEthApiError { impl ToRpcError for OptimismEthApiError { fn to_rpc_error(&self) -> ErrorObject<'static> { match self { - OptimismEthApiError::L1BlockFeeError | OptimismEthApiError::L1BlockGasError => { - internal_rpc_err(self.to_string()) - } + Self::L1BlockFeeError | Self::L1BlockGasError => internal_rpc_err(self.to_string()), } } } impl From for EthApiError { fn from(err: OptimismEthApiError) -> Self { - EthApiError::other(err) + Self::other(err) } } diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index fcd132bfec1af..f5b6545a00723 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -356,7 +356,7 @@ impl CallFees { blob_versioned_hashes: Option<&[B256]>, max_fee_per_blob_gas: Option, block_blob_fee: Option, - ) -> EthResult { + ) -> EthResult { /// Get the effective gas price of a transaction as specfified in EIP-1559 with relevant /// checks. fn get_effective_gas_price( @@ -399,7 +399,7 @@ impl CallFees { // either legacy transaction or no fee fields are specified // when no fields are specified, set gas price to zero let gas_price = gas_price.unwrap_or(U256::ZERO); - Ok(CallFees { + Ok(Self { gas_price, max_priority_fee_per_gas: None, max_fee_per_blob_gas: has_blob_hashes.then_some(block_blob_fee).flatten(), @@ -414,7 +414,7 @@ impl CallFees { )?; let max_fee_per_blob_gas = has_blob_hashes.then_some(block_blob_fee).flatten(); - Ok(CallFees { + Ok(Self { gas_price: effective_gas_price, max_priority_fee_per_gas, max_fee_per_blob_gas, @@ -433,7 +433,7 @@ impl CallFees { return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into()) } - Ok(CallFees { + Ok(Self { gas_price: effective_gas_price, max_priority_fee_per_gas, max_fee_per_blob_gas: Some(max_fee_per_blob_gas), diff --git a/crates/rpc/rpc/src/eth/signer.rs b/crates/rpc/rpc/src/eth/signer.rs index 578907604ea48..9999a7dac81eb 100644 --- a/crates/rpc/rpc/src/eth/signer.rs +++ b/crates/rpc/rpc/src/eth/signer.rs @@ -66,7 +66,7 @@ impl DevSigner { let address = reth_primitives::public_key_to_address(pk); let addresses = vec![address]; let accounts = HashMap::from([(address, sk)]); - signers.push(Box::new(DevSigner { addresses, accounts }) as Box); + signers.push(Box::new(Self { addresses, accounts }) as Box); } signers } diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 43890bea06ae8..87865ef31fd46 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -93,7 +93,7 @@ where &self, block_id: BlockId, ) -> RpcResult> { - Ok(RethApi::balance_changes_in_block(self, block_id).await?) + Ok(Self::balance_changes_in_block(self, block_id).await?) } } diff --git a/crates/rpc/rpc/src/rpc.rs b/crates/rpc/rpc/src/rpc.rs index 97a49ddb3609a..771987a88a707 100644 --- a/crates/rpc/rpc/src/rpc.rs +++ b/crates/rpc/rpc/src/rpc.rs @@ -15,7 +15,7 @@ pub struct RPCApi { impl RPCApi { /// Return a new RPCApi struct, with given module_map pub fn new(module_map: HashMap) -> Self { - RPCApi { rpc_modules: Arc::new(RpcModules::new(module_map)) } + Self { rpc_modules: Arc::new(RpcModules::new(module_map)) } } } diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index b2104ff47a5bf..9c7dac6a8d304 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -501,7 +501,7 @@ where let _permit = self.acquire_trace_permit().await; let request = TraceCallRequest { call, trace_types, block_id, state_overrides, block_overrides }; - Ok(TraceApi::trace_call(self, request).await?) + Ok(Self::trace_call(self, request).await?) } /// Handler for `trace_callMany` @@ -511,7 +511,7 @@ where block_id: Option, ) -> Result> { let _permit = self.acquire_trace_permit().await; - Ok(TraceApi::trace_call_many(self, calls, block_id).await?) + Ok(Self::trace_call_many(self, calls, block_id).await?) } /// Handler for `trace_rawTransaction` @@ -522,7 +522,7 @@ where block_id: Option, ) -> Result { let _permit = self.acquire_trace_permit().await; - Ok(TraceApi::trace_raw_transaction(self, data, trace_types, block_id).await?) + Ok(Self::trace_raw_transaction(self, data, trace_types, block_id).await?) } /// Handler for `trace_replayBlockTransactions` @@ -532,7 +532,7 @@ where trace_types: HashSet, ) -> Result>> { let _permit = self.acquire_trace_permit().await; - Ok(TraceApi::replay_block_transactions(self, block_id, trace_types).await?) + Ok(Self::replay_block_transactions(self, block_id, trace_types).await?) } /// Handler for `trace_replayTransaction` @@ -542,7 +542,7 @@ where trace_types: HashSet, ) -> Result { let _permit = self.acquire_trace_permit().await; - Ok(TraceApi::replay_transaction(self, transaction, trace_types).await?) + Ok(Self::replay_transaction(self, transaction, trace_types).await?) } /// Handler for `trace_block` @@ -551,7 +551,7 @@ where block_id: BlockId, ) -> Result>> { let _permit = self.acquire_trace_permit().await; - Ok(TraceApi::trace_block(self, block_id).await?) + Ok(Self::trace_block(self, block_id).await?) } /// Handler for `trace_filter` @@ -561,7 +561,7 @@ where /// # Limitations /// This currently requires block filter fields, since reth does not have address indices yet. async fn trace_filter(&self, filter: TraceFilter) -> Result> { - Ok(TraceApi::trace_filter(self, filter).await?) + Ok(Self::trace_filter(self, filter).await?) } /// Returns transaction trace at given index. @@ -572,7 +572,7 @@ where indices: Vec, ) -> Result> { let _permit = self.acquire_trace_permit().await; - Ok(TraceApi::trace_get(self, hash, indices.into_iter().map(Into::into).collect()).await?) + Ok(Self::trace_get(self, hash, indices.into_iter().map(Into::into).collect()).await?) } /// Handler for `trace_transaction` @@ -581,7 +581,7 @@ where hash: B256, ) -> Result>> { let _permit = self.acquire_trace_permit().await; - Ok(TraceApi::trace_transaction(self, hash).await?) + Ok(Self::trace_transaction(self, hash).await?) } /// Handler for `trace_transactionOpcodeGas` @@ -590,13 +590,13 @@ where tx_hash: B256, ) -> Result> { let _permit = self.acquire_trace_permit().await; - Ok(TraceApi::trace_transaction_opcode_gas(self, tx_hash).await?) + Ok(Self::trace_transaction_opcode_gas(self, tx_hash).await?) } /// Handler for `trace_blockOpcodeGas` async fn trace_block_opcode_gas(&self, block_id: BlockId) -> Result> { let _permit = self.acquire_trace_permit().await; - Ok(TraceApi::trace_block_opcode_gas(self, block_id).await?) + Ok(Self::trace_block_opcode_gas(self, block_id).await?) } } diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 202d69b575a7f..aaef62f70aa10 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -22,7 +22,7 @@ pub struct TxPoolApi { impl TxPoolApi { /// Creates a new instance of `TxpoolApi`. pub fn new(pool: Pool) -> Self { - TxPoolApi { pool } + Self { pool } } } diff --git a/crates/rpc/rpc/src/web3.rs b/crates/rpc/rpc/src/web3.rs index 604987e167c33..e7dc9e6be9c8a 100644 --- a/crates/rpc/rpc/src/web3.rs +++ b/crates/rpc/rpc/src/web3.rs @@ -16,7 +16,7 @@ pub struct Web3Api { impl Web3Api { /// Creates a new instance of `Web3Api`. pub fn new(network: N) -> Self { - Web3Api { network } + Self { network } } } diff --git a/crates/stages-api/src/error.rs b/crates/stages-api/src/error.rs index f6e528ca75426..40b998bc1fc0a 100644 --- a/crates/stages-api/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -23,8 +23,8 @@ impl BlockErrorKind { /// Returns `true` if the error is a state root error. pub fn is_state_root_error(&self) -> bool { match self { - BlockErrorKind::Validation(err) => err.is_state_root_error(), - BlockErrorKind::Execution(err) => err.is_state_root_error(), + Self::Validation(err) => err.is_state_root_error(), + Self::Execution(err) => err.is_state_root_error(), } } } @@ -138,24 +138,24 @@ impl StageError { pub fn is_fatal(&self) -> bool { matches!( self, - StageError::Database(_) | - StageError::Download(_) | - StageError::DatabaseIntegrity(_) | - StageError::StageCheckpoint(_) | - StageError::MissingDownloadBuffer | - StageError::MissingSyncGap | - StageError::ChannelClosed | - StageError::InconsistentBlockNumber { .. } | - StageError::InconsistentTxNumber { .. } | - StageError::Internal(_) | - StageError::Fatal(_) + Self::Database(_) | + Self::Download(_) | + Self::DatabaseIntegrity(_) | + Self::StageCheckpoint(_) | + Self::MissingDownloadBuffer | + Self::MissingSyncGap | + Self::ChannelClosed | + Self::InconsistentBlockNumber { .. } | + Self::InconsistentTxNumber { .. } | + Self::Internal(_) | + Self::Fatal(_) ) } } impl From for StageError { fn from(source: std::io::Error) -> Self { - StageError::Fatal(Box::new(source)) + Self::Fatal(Box::new(source)) } } diff --git a/crates/stages-api/src/pipeline/ctrl.rs b/crates/stages-api/src/pipeline/ctrl.rs index eb40bc7d4bf5e..dc19672bc9307 100644 --- a/crates/stages-api/src/pipeline/ctrl.rs +++ b/crates/stages-api/src/pipeline/ctrl.rs @@ -27,20 +27,20 @@ pub enum ControlFlow { impl ControlFlow { /// Whether the pipeline should continue executing stages. pub fn should_continue(&self) -> bool { - matches!(self, ControlFlow::Continue { .. } | ControlFlow::NoProgress { .. }) + matches!(self, Self::Continue { .. } | Self::NoProgress { .. }) } /// Returns true if the control flow is unwind. pub fn is_unwind(&self) -> bool { - matches!(self, ControlFlow::Unwind { .. }) + matches!(self, Self::Unwind { .. }) } /// Returns the pipeline block number the stage reached, if the state is not `Unwind`. pub fn block_number(&self) -> Option { match self { - ControlFlow::Unwind { .. } => None, - ControlFlow::Continue { block_number } => Some(*block_number), - ControlFlow::NoProgress { block_number } => *block_number, + Self::Unwind { .. } => None, + Self::Continue { block_number } => Some(*block_number), + Self::NoProgress { block_number } => *block_number, } } } diff --git a/crates/stages-api/src/pipeline/set.rs b/crates/stages-api/src/pipeline/set.rs index ede824359e076..01613f36c0a15 100644 --- a/crates/stages-api/src/pipeline/set.rs +++ b/crates/stages-api/src/pipeline/set.rs @@ -240,7 +240,7 @@ where } impl StageSet for StageSetBuilder { - fn builder(self) -> StageSetBuilder { + fn builder(self) -> Self { self } } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index b7057ab501619..cee62b3b3d80a 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -561,7 +561,7 @@ impl ExecutionStageThresholds { impl From for ExecutionStageThresholds { fn from(config: ExecutionConfig) -> Self { - ExecutionStageThresholds { + Self { max_blocks: config.max_blocks, max_changes: config.max_changes, max_cumulative_gas: config.max_cumulative_gas, diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index cb50dd39bd816..b4b7aec48bc1e 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -138,10 +138,10 @@ impl Stage for MerkleStage { /// Return the id of the stage fn id(&self) -> StageId { match self { - MerkleStage::Execution { .. } => StageId::MerkleExecute, - MerkleStage::Unwind => StageId::MerkleUnwind, + Self::Execution { .. } => StageId::MerkleExecute, + Self::Unwind => StageId::MerkleUnwind, #[cfg(any(test, feature = "test-utils"))] - MerkleStage::Both { .. } => StageId::Other("MerkleBoth"), + Self::Both { .. } => StageId::Other("MerkleBoth"), } } @@ -152,13 +152,13 @@ impl Stage for MerkleStage { input: ExecInput, ) -> Result { let threshold = match self { - MerkleStage::Unwind => { + Self::Unwind => { info!(target: "sync::stages::merkle::unwind", "Stage is always skipped"); return Ok(ExecOutput::done(StageCheckpoint::new(input.target()))) } - MerkleStage::Execution { clean_threshold } => *clean_threshold, + Self::Execution { clean_threshold } => *clean_threshold, #[cfg(any(test, feature = "test-utils"))] - MerkleStage::Both { clean_threshold } => *clean_threshold, + Self::Both { clean_threshold } => *clean_threshold, }; let range = input.next_block_range(); @@ -292,7 +292,7 @@ impl Stage for MerkleStage { ) -> Result { let tx = provider.tx_ref(); let range = input.unwind_block_range(); - if matches!(self, MerkleStage::Execution { .. }) { + if matches!(self, Self::Execution { .. }) { info!(target: "sync::stages::merkle::unwind", "Stage is always skipped"); return Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) }) } diff --git a/crates/static-file-types/src/segment.rs b/crates/static-file-types/src/segment.rs index 82b937f29442c..d85d33dc27e85 100644 --- a/crates/static-file-types/src/segment.rs +++ b/crates/static-file-types/src/segment.rs @@ -40,9 +40,9 @@ impl StaticFileSegment { /// Returns the segment as a string. pub const fn as_str(&self) -> &'static str { match self { - StaticFileSegment::Headers => "headers", - StaticFileSegment::Transactions => "transactions", - StaticFileSegment::Receipts => "receipts", + Self::Headers => "headers", + Self::Transactions => "transactions", + Self::Receipts => "receipts", } } @@ -57,18 +57,18 @@ impl StaticFileSegment { }; match self { - StaticFileSegment::Headers => default_config, - StaticFileSegment::Transactions => default_config, - StaticFileSegment::Receipts => default_config, + Self::Headers => default_config, + Self::Transactions => default_config, + Self::Receipts => default_config, } } /// Returns the number of columns for the segment pub const fn columns(&self) -> usize { match self { - StaticFileSegment::Headers => 3, - StaticFileSegment::Transactions => 1, - StaticFileSegment::Receipts => 1, + Self::Headers => 3, + Self::Transactions => 1, + Self::Receipts => 1, } } @@ -134,7 +134,7 @@ impl StaticFileSegment { /// Returns `true` if the segment is `StaticFileSegment::Headers`. pub fn is_headers(&self) -> bool { - matches!(self, StaticFileSegment::Headers) + matches!(self, Self::Headers) } } @@ -344,7 +344,7 @@ impl std::fmt::Display for SegmentRangeInclusive { impl From> for SegmentRangeInclusive { fn from(value: RangeInclusive) -> Self { - SegmentRangeInclusive { start: *value.start(), end: *value.end() } + Self { start: *value.start(), end: *value.end() } } } diff --git a/crates/storage/codecs/src/alloy/access_list.rs b/crates/storage/codecs/src/alloy/access_list.rs index 22439f827a7bb..d3f906318848d 100644 --- a/crates/storage/codecs/src/alloy/access_list.rs +++ b/crates/storage/codecs/src/alloy/access_list.rs @@ -21,7 +21,7 @@ impl Compact for AccessListItem { buf = new_buf; let (storage_keys, new_buf) = Vec::specialized_from_compact(buf, buf.len()); buf = new_buf; - let access_list_item = AccessListItem { address, storage_keys }; + let access_list_item = Self { address, storage_keys }; (access_list_item, buf) } } @@ -41,7 +41,7 @@ impl Compact for AccessList { fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { let (access_list_items, new_buf) = Vec::from_compact(buf, buf.len()); buf = new_buf; - let access_list = AccessList(access_list_items); + let access_list = Self(access_list_items); (access_list, buf) } } diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs index 619d9db517cce..bc3843ab7a6d1 100644 --- a/crates/storage/codecs/src/alloy/genesis_account.rs +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -53,7 +53,7 @@ impl Compact for AlloyGenesisAccount { fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { let (account, _) = GenesisAccount::from_compact(buf, len); - let alloy_account = AlloyGenesisAccount { + let alloy_account = Self { nonce: account.nonce, balance: account.balance, code: account.code, diff --git a/crates/storage/codecs/src/alloy/log.rs b/crates/storage/codecs/src/alloy/log.rs index a374b3680c351..8d5c30e0a0b3a 100644 --- a/crates/storage/codecs/src/alloy/log.rs +++ b/crates/storage/codecs/src/alloy/log.rs @@ -23,7 +23,7 @@ impl Compact for LogData { let (topics, new_buf) = Vec::specialized_from_compact(buf, buf.len()); buf = new_buf; let (data, buf) = Bytes::from_compact(buf, buf.len()); - let log_data = LogData::new_unchecked(topics, data); + let log_data = Self::new_unchecked(topics, data); (log_data, buf) } } @@ -46,7 +46,7 @@ impl Compact for Log { buf = new_buf; let (log_data, new_buf) = LogData::from_compact(buf, buf.len()); buf = new_buf; - let log = Log { address, data: log_data }; + let log = Self { address, data: log_data }; (log, buf) } } diff --git a/crates/storage/codecs/src/alloy/request.rs b/crates/storage/codecs/src/alloy/request.rs index d5d4daa4af94c..c732e30b2bbaf 100644 --- a/crates/storage/codecs/src/alloy/request.rs +++ b/crates/storage/codecs/src/alloy/request.rs @@ -18,7 +18,7 @@ impl Compact for Request { fn from_compact(buf: &[u8], _: usize) -> (Self, &[u8]) { let (raw, buf) = Bytes::from_compact(buf, buf.len()); - (Request::decode_7685(&mut raw.as_ref()).expect("invalid eip-7685 request in db"), buf) + (Self::decode_7685(&mut raw.as_ref()).expect("invalid eip-7685 request in db"), buf) } } diff --git a/crates/storage/codecs/src/alloy/txkind.rs b/crates/storage/codecs/src/alloy/txkind.rs index e1dffa15be55a..14ab5123671e7 100644 --- a/crates/storage/codecs/src/alloy/txkind.rs +++ b/crates/storage/codecs/src/alloy/txkind.rs @@ -9,8 +9,8 @@ impl Compact for TxKind { B: bytes::BufMut + AsMut<[u8]>, { match self { - TxKind::Create => 0, - TxKind::Call(address) => { + Self::Create => 0, + Self::Call(address) => { address.to_compact(buf); 1 } @@ -18,7 +18,7 @@ impl Compact for TxKind { } fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) { match identifier { - 0 => (TxKind::Create, buf), + 0 => (Self::Create, buf), 1 => { let (addr, buf) = Address::from_compact(buf, buf.len()); (addr.into(), buf) diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 5cdc1a6675c3a..3238048ad098e 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -35,7 +35,7 @@ impl Compact for AlloyWithdrawal { fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { let (withdrawal, _) = Withdrawal::from_compact(buf, len); - let alloy_withdrawal = AlloyWithdrawal { + let alloy_withdrawal = Self { index: withdrawal.index, validator_index: withdrawal.validator_index, address: withdrawal.address, diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 79f57991906f3..b0927a1481cb2 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -153,7 +153,7 @@ where #[inline] fn from_compact(buf: &[u8], _: usize) -> (Self, &[u8]) { let (length, mut buf) = decode_varuint(buf); - let mut list = Vec::with_capacity(length); + let mut list = Self::with_capacity(length); for _ in 0..length { let len; (len, buf) = decode_varuint(buf); @@ -184,7 +184,7 @@ where #[inline] fn specialized_from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { let (length, mut buf) = decode_varuint(buf); - let mut list = Vec::with_capacity(length); + let mut list = Self::with_capacity(length); for _ in 0..length { let element; @@ -274,13 +274,13 @@ impl Compact for U256 { #[inline] fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { if len == 0 { - return (U256::ZERO, buf) + return (Self::ZERO, buf) } let mut arr = [0; 32]; arr[(32 - len)..].copy_from_slice(&buf[..len]); buf.advance(len); - (U256::from_be_bytes(arr), buf) + (Self::from_be_bytes(arr), buf) } } @@ -573,7 +573,7 @@ mod tests { impl Default for TestStruct { fn default() -> Self { - TestStruct { + Self { f_u64: 1u64, // 4 bits | 1 byte f_u256: U256::from(1u64), // 6 bits | 1 byte f_bool_f: false, // 1 bit | 0 bytes diff --git a/crates/storage/db/src/abstraction/mock.rs b/crates/storage/db/src/abstraction/mock.rs index cb6717036ec60..f62746cca167c 100644 --- a/crates/storage/db/src/abstraction/mock.rs +++ b/crates/storage/db/src/abstraction/mock.rs @@ -138,8 +138,8 @@ impl DbCursorRO for CursorMock { fn walk(&mut self, start_key: Option) -> Result, DatabaseError> { let start: IterPairResult = match start_key { - Some(key) => >::seek(self, key).transpose(), - None => >::first(self).transpose(), + Some(key) => >::seek(self, key).transpose(), + None => >::first(self).transpose(), }; Ok(Walker::new(self, start)) @@ -160,8 +160,8 @@ impl DbCursorRO for CursorMock { }; let start: IterPairResult = match start_key { - Some(key) => >::seek(self, key).transpose(), - None => >::first(self).transpose(), + Some(key) => >::seek(self, key).transpose(), + None => >::first(self).transpose(), }; Ok(RangeWalker::new(self, start, end_key)) @@ -172,8 +172,8 @@ impl DbCursorRO for CursorMock { start_key: Option, ) -> Result, DatabaseError> { let start: IterPairResult = match start_key { - Some(key) => >::seek(self, key).transpose(), - None => >::last(self).transpose(), + Some(key) => >::seek(self, key).transpose(), + None => >::last(self).transpose(), }; Ok(ReverseWalker::new(self, start)) } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 58977811f2387..6f19bd911c7db 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -250,7 +250,7 @@ impl DatabaseEnv { path: &Path, kind: DatabaseEnvKind, args: DatabaseArguments, - ) -> Result { + ) -> Result { let mut inner_env = Environment::builder(); let mode = match kind { @@ -379,7 +379,7 @@ impl DatabaseEnv { inner_env.set_max_read_transaction_duration(max_read_transaction_duration); } - let env = DatabaseEnv { + let env = Self { inner: inner_env.open(path).map_err(|e| DatabaseError::Open(e.into()))?, metrics: None, }; diff --git a/crates/storage/db/src/metrics.rs b/crates/storage/db/src/metrics.rs index a37ca123fd3fe..16d19beb5b4f8 100644 --- a/crates/storage/db/src/metrics.rs +++ b/crates/storage/db/src/metrics.rs @@ -156,14 +156,14 @@ impl TransactionMode { /// Returns the transaction mode as a string. pub(crate) const fn as_str(&self) -> &'static str { match self { - TransactionMode::ReadOnly => "read-only", - TransactionMode::ReadWrite => "read-write", + Self::ReadOnly => "read-only", + Self::ReadWrite => "read-write", } } /// Returns `true` if the transaction mode is read-only. pub(crate) const fn is_read_only(&self) -> bool { - matches!(self, TransactionMode::ReadOnly) + matches!(self, Self::ReadOnly) } } @@ -182,15 +182,15 @@ impl TransactionOutcome { /// Returns the transaction outcome as a string. pub(crate) const fn as_str(&self) -> &'static str { match self { - TransactionOutcome::Commit => "commit", - TransactionOutcome::Abort => "abort", - TransactionOutcome::Drop => "drop", + Self::Commit => "commit", + Self::Abort => "abort", + Self::Drop => "drop", } } /// Returns `true` if the transaction outcome is a commit. pub(crate) const fn is_commit(&self) -> bool { - matches!(self, TransactionOutcome::Commit) + matches!(self, Self::Commit) } } @@ -221,15 +221,15 @@ impl Operation { /// Returns the operation as a string. pub(crate) const fn as_str(&self) -> &'static str { match self { - Operation::Get => "get", - Operation::Put => "put", - Operation::Delete => "delete", - Operation::CursorUpsert => "cursor-upsert", - Operation::CursorInsert => "cursor-insert", - Operation::CursorAppend => "cursor-append", - Operation::CursorAppendDup => "cursor-append-dup", - Operation::CursorDeleteCurrent => "cursor-delete-current", - Operation::CursorDeleteCurrentDuplicates => "cursor-delete-current-duplicates", + Self::Get => "get", + Self::Put => "put", + Self::Delete => "delete", + Self::CursorUpsert => "cursor-upsert", + Self::CursorInsert => "cursor-insert", + Self::CursorAppend => "cursor-append", + Self::CursorAppendDup => "cursor-append-dup", + Self::CursorDeleteCurrent => "cursor-delete-current", + Self::CursorDeleteCurrentDuplicates => "cursor-delete-current-duplicates", } } } @@ -250,10 +250,10 @@ impl Labels { /// Converts each label variant into its corresponding string representation. pub(crate) fn as_str(&self) -> &'static str { match self { - Labels::Table => "table", - Labels::TransactionMode => "mode", - Labels::TransactionOutcome => "outcome", - Labels::Operation => "operation", + Self::Table => "table", + Self::TransactionMode => "mode", + Self::TransactionOutcome => "outcome", + Self::Operation => "operation", } } } diff --git a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs index ff8d8b39c03bc..533b6b6926dd8 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; pub struct IntegerListInput(pub Vec); impl From for IntegerList { - fn from(list: IntegerListInput) -> IntegerList { + fn from(list: IntegerListInput) -> Self { let mut v = list.0; // Empty lists are not supported by `IntegerList`, so we want to skip these cases. diff --git a/crates/storage/db/src/tables/models/accounts.rs b/crates/storage/db/src/tables/models/accounts.rs index 7b0770da278c6..9549aa8e63286 100644 --- a/crates/storage/db/src/tables/models/accounts.rs +++ b/crates/storage/db/src/tables/models/accounts.rs @@ -92,7 +92,7 @@ impl BlockNumberAddress { impl From<(BlockNumber, Address)> for BlockNumberAddress { fn from(tpl: (u64, Address)) -> Self { - BlockNumberAddress(tpl) + Self(tpl) } } @@ -117,7 +117,7 @@ impl Decode for BlockNumberAddress { let num = u64::from_be_bytes(value[..8].try_into().map_err(|_| DatabaseError::Decode)?); let hash = Address::from_slice(&value[8..]); - Ok(BlockNumberAddress((num, hash))) + Ok(Self((num, hash))) } } @@ -150,7 +150,7 @@ impl Decode for AddressStorageKey { let address = Address::from_slice(&value[..20]); let storage_key = StorageKey::from_slice(&value[20..]); - Ok(AddressStorageKey((address, storage_key))) + Ok(Self((address, storage_key))) } } diff --git a/crates/storage/db/src/tables/models/integer_list.rs b/crates/storage/db/src/tables/models/integer_list.rs index 94746a12111e8..e419a9435129f 100644 --- a/crates/storage/db/src/tables/models/integer_list.rs +++ b/crates/storage/db/src/tables/models/integer_list.rs @@ -19,6 +19,6 @@ impl Compress for IntegerList { impl Decompress for IntegerList { fn decompress>(value: B) -> Result { - IntegerList::from_bytes(value.as_ref()).map_err(|_| DatabaseError::Decode) + Self::from_bytes(value.as_ref()).map_err(|_| DatabaseError::Decode) } } diff --git a/crates/storage/db/src/tables/models/mod.rs b/crates/storage/db/src/tables/models/mod.rs index f1270cec3b80f..4a15a2d555217 100644 --- a/crates/storage/db/src/tables/models/mod.rs +++ b/crates/storage/db/src/tables/models/mod.rs @@ -51,7 +51,7 @@ macro_rules! impl_uints { impl_uints!(u64, u32, u16, u8); impl Encode for Vec { - type Encoded = Vec; + type Encoded = Self; fn encode(self) -> Self::Encoded { self @@ -74,7 +74,7 @@ impl Encode for Address { impl Decode for Address { fn decode>(value: B) -> Result { - Ok(Address::from_slice(value.as_ref())) + Ok(Self::from_slice(value.as_ref())) } } @@ -88,7 +88,7 @@ impl Encode for B256 { impl Decode for B256 { fn decode>(value: B) -> Result { - Ok(B256::new(value.as_ref().try_into().map_err(|_| DatabaseError::Decode)?)) + Ok(Self::new(value.as_ref().try_into().map_err(|_| DatabaseError::Decode)?)) } } @@ -102,7 +102,7 @@ impl Encode for String { impl Decode for String { fn decode>(value: B) -> Result { - String::from_utf8(value.as_ref().to_vec()).map_err(|_| DatabaseError::Decode) + Self::from_utf8(value.as_ref().to_vec()).map_err(|_| DatabaseError::Decode) } } diff --git a/crates/storage/db/src/tables/models/sharded_key.rs b/crates/storage/db/src/tables/models/sharded_key.rs index 9c0664da44f20..3a55baeda9181 100644 --- a/crates/storage/db/src/tables/models/sharded_key.rs +++ b/crates/storage/db/src/tables/models/sharded_key.rs @@ -26,8 +26,8 @@ pub struct ShardedKey { pub highest_block_number: BlockNumber, } -impl AsRef> for ShardedKey { - fn as_ref(&self) -> &ShardedKey { +impl AsRef for ShardedKey { + fn as_ref(&self) -> &Self { self } } @@ -35,7 +35,7 @@ impl AsRef> for ShardedKey { impl ShardedKey { /// Creates a new `ShardedKey`. pub fn new(key: T, highest_block_number: BlockNumber) -> Self { - ShardedKey { key, highest_block_number } + Self { key, highest_block_number } } /// Creates a new key with the highest block number set to maximum. @@ -73,7 +73,7 @@ where ); let key = T::decode(&value[..tx_num_index])?; - Ok(ShardedKey::new(key, highest_tx_number)) + Ok(Self::new(key, highest_tx_number)) } } diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 90d4b96aec1f4..21bcc281cc1f2 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -77,7 +77,7 @@ impl RawKey { impl From for RawKey { fn from(key: K) -> Self { - RawKey::new(key) + Self::new(key) } } @@ -142,7 +142,7 @@ impl RawValue { impl From for RawValue { fn from(value: V) -> Self { - RawValue::new(value) + Self::new(value) } } diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 6ae7aad8ed30c..aeecf64a7b414 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -132,7 +132,7 @@ pub enum ProviderError { impl From for ProviderError { fn from(err: reth_fs_util::FsPathError) -> Self { - ProviderError::FsPathError(err.to_string()) + Self::FsPathError(err.to_string()) } } diff --git a/crates/storage/libmdbx-rs/src/database.rs b/crates/storage/libmdbx-rs/src/database.rs index 55eb7e0bbf573..4e3a6c06280cf 100644 --- a/crates/storage/libmdbx-rs/src/database.rs +++ b/crates/storage/libmdbx-rs/src/database.rs @@ -42,7 +42,7 @@ impl Database { /// Opens the freelist database with DBI `0`. pub fn freelist_db() -> Self { - Database { dbi: 0, _env: None } + Self { dbi: 0, _env: None } } /// Returns the underlying MDBX database handle. diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index ba7385b949a89..c6d206435a8b7 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -281,14 +281,14 @@ impl EnvironmentKind { /// Returns true if the environment was opened as WRITEMAP. #[inline] pub const fn is_write_map(&self) -> bool { - matches!(self, EnvironmentKind::WriteMap) + matches!(self, Self::WriteMap) } /// Additional flags required when opening the environment. pub(crate) fn extra_flags(&self) -> ffi::MDBX_env_flags_t { match self { - EnvironmentKind::Default => ffi::MDBX_ENV_DEFAULTS, - EnvironmentKind::WriteMap => ffi::MDBX_WRITEMAP, + Self::Default => ffi::MDBX_ENV_DEFAULTS, + Self::WriteMap => ffi::MDBX_WRITEMAP, } } } @@ -307,8 +307,8 @@ pub struct Stat(ffi::MDBX_stat); impl Stat { /// Create a new Stat with zero'd inner struct `ffi::MDB_stat`. - pub(crate) fn new() -> Stat { - unsafe { Stat(mem::zeroed()) } + pub(crate) fn new() -> Self { + unsafe { Self(mem::zeroed()) } } /// Returns a mut pointer to `ffi::MDB_stat`. @@ -859,8 +859,8 @@ pub(crate) mod read_transactions { impl MaxReadTransactionDuration { pub fn as_duration(&self) -> Option { match self { - MaxReadTransactionDuration::Unbounded => None, - MaxReadTransactionDuration::Set(duration) => Some(*duration), + Self::Unbounded => None, + Self::Set(duration) => Some(*duration), } } } diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index 84a6ef3617473..4776cc4ae838c 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -127,77 +127,77 @@ pub enum Error { impl Error { /// Converts a raw error code to an [Error]. - pub fn from_err_code(err_code: c_int) -> Error { + pub fn from_err_code(err_code: c_int) -> Self { match err_code { - ffi::MDBX_KEYEXIST => Error::KeyExist, - ffi::MDBX_NOTFOUND => Error::NotFound, - ffi::MDBX_ENODATA => Error::NoData, - ffi::MDBX_PAGE_NOTFOUND => Error::PageNotFound, - ffi::MDBX_CORRUPTED => Error::Corrupted, - ffi::MDBX_PANIC => Error::Panic, - ffi::MDBX_VERSION_MISMATCH => Error::VersionMismatch, - ffi::MDBX_INVALID => Error::Invalid, - ffi::MDBX_MAP_FULL => Error::MapFull, - ffi::MDBX_DBS_FULL => Error::DbsFull, - ffi::MDBX_READERS_FULL => Error::ReadersFull, - ffi::MDBX_TXN_FULL => Error::TxnFull, - ffi::MDBX_CURSOR_FULL => Error::CursorFull, - ffi::MDBX_PAGE_FULL => Error::PageFull, - ffi::MDBX_UNABLE_EXTEND_MAPSIZE => Error::UnableExtendMapSize, - ffi::MDBX_INCOMPATIBLE => Error::Incompatible, - ffi::MDBX_BAD_RSLOT => Error::BadRslot, - ffi::MDBX_BAD_TXN => Error::BadTxn, - ffi::MDBX_BAD_VALSIZE => Error::BadValSize, - ffi::MDBX_BAD_DBI => Error::BadDbi, - ffi::MDBX_PROBLEM => Error::Problem, - ffi::MDBX_BUSY => Error::Busy, - ffi::MDBX_EMULTIVAL => Error::Multival, - ffi::MDBX_WANNA_RECOVERY => Error::WannaRecovery, - ffi::MDBX_EKEYMISMATCH => Error::KeyMismatch, - ffi::MDBX_EINVAL => Error::DecodeError, - ffi::MDBX_EACCESS => Error::Access, - ffi::MDBX_TOO_LARGE => Error::TooLarge, - ffi::MDBX_EBADSIGN => Error::BadSignature, - other => Error::Other(other), + ffi::MDBX_KEYEXIST => Self::KeyExist, + ffi::MDBX_NOTFOUND => Self::NotFound, + ffi::MDBX_ENODATA => Self::NoData, + ffi::MDBX_PAGE_NOTFOUND => Self::PageNotFound, + ffi::MDBX_CORRUPTED => Self::Corrupted, + ffi::MDBX_PANIC => Self::Panic, + ffi::MDBX_VERSION_MISMATCH => Self::VersionMismatch, + ffi::MDBX_INVALID => Self::Invalid, + ffi::MDBX_MAP_FULL => Self::MapFull, + ffi::MDBX_DBS_FULL => Self::DbsFull, + ffi::MDBX_READERS_FULL => Self::ReadersFull, + ffi::MDBX_TXN_FULL => Self::TxnFull, + ffi::MDBX_CURSOR_FULL => Self::CursorFull, + ffi::MDBX_PAGE_FULL => Self::PageFull, + ffi::MDBX_UNABLE_EXTEND_MAPSIZE => Self::UnableExtendMapSize, + ffi::MDBX_INCOMPATIBLE => Self::Incompatible, + ffi::MDBX_BAD_RSLOT => Self::BadRslot, + ffi::MDBX_BAD_TXN => Self::BadTxn, + ffi::MDBX_BAD_VALSIZE => Self::BadValSize, + ffi::MDBX_BAD_DBI => Self::BadDbi, + ffi::MDBX_PROBLEM => Self::Problem, + ffi::MDBX_BUSY => Self::Busy, + ffi::MDBX_EMULTIVAL => Self::Multival, + ffi::MDBX_WANNA_RECOVERY => Self::WannaRecovery, + ffi::MDBX_EKEYMISMATCH => Self::KeyMismatch, + ffi::MDBX_EINVAL => Self::DecodeError, + ffi::MDBX_EACCESS => Self::Access, + ffi::MDBX_TOO_LARGE => Self::TooLarge, + ffi::MDBX_EBADSIGN => Self::BadSignature, + other => Self::Other(other), } } /// Converts an [Error] to the raw error code. pub fn to_err_code(&self) -> i32 { match self { - Error::KeyExist => ffi::MDBX_KEYEXIST, - Error::NotFound => ffi::MDBX_NOTFOUND, - Error::NoData => ffi::MDBX_ENODATA, - Error::PageNotFound => ffi::MDBX_PAGE_NOTFOUND, - Error::Corrupted => ffi::MDBX_CORRUPTED, - Error::Panic => ffi::MDBX_PANIC, - Error::VersionMismatch => ffi::MDBX_VERSION_MISMATCH, - Error::Invalid => ffi::MDBX_INVALID, - Error::MapFull => ffi::MDBX_MAP_FULL, - Error::DbsFull => ffi::MDBX_DBS_FULL, - Error::ReadersFull => ffi::MDBX_READERS_FULL, - Error::TxnFull => ffi::MDBX_TXN_FULL, - Error::CursorFull => ffi::MDBX_CURSOR_FULL, - Error::PageFull => ffi::MDBX_PAGE_FULL, - Error::UnableExtendMapSize => ffi::MDBX_UNABLE_EXTEND_MAPSIZE, - Error::Incompatible => ffi::MDBX_INCOMPATIBLE, - Error::BadRslot => ffi::MDBX_BAD_RSLOT, - Error::BadTxn => ffi::MDBX_BAD_TXN, - Error::BadValSize => ffi::MDBX_BAD_VALSIZE, - Error::BadDbi => ffi::MDBX_BAD_DBI, - Error::Problem => ffi::MDBX_PROBLEM, - Error::Busy => ffi::MDBX_BUSY, - Error::Multival => ffi::MDBX_EMULTIVAL, - Error::WannaRecovery => ffi::MDBX_WANNA_RECOVERY, - Error::KeyMismatch => ffi::MDBX_EKEYMISMATCH, - Error::DecodeErrorLenDiff | Error::DecodeError => ffi::MDBX_EINVAL, - Error::Access => ffi::MDBX_EACCESS, - Error::TooLarge => ffi::MDBX_TOO_LARGE, - Error::BadSignature => ffi::MDBX_EBADSIGN, - Error::WriteTransactionUnsupportedInReadOnlyMode => ffi::MDBX_EACCESS, - Error::NestedTransactionsUnsupportedWithWriteMap => ffi::MDBX_EACCESS, - Error::ReadTransactionTimeout => -96000, // Custom non-MDBX error code - Error::Other(err_code) => *err_code, + Self::KeyExist => ffi::MDBX_KEYEXIST, + Self::NotFound => ffi::MDBX_NOTFOUND, + Self::NoData => ffi::MDBX_ENODATA, + Self::PageNotFound => ffi::MDBX_PAGE_NOTFOUND, + Self::Corrupted => ffi::MDBX_CORRUPTED, + Self::Panic => ffi::MDBX_PANIC, + Self::VersionMismatch => ffi::MDBX_VERSION_MISMATCH, + Self::Invalid => ffi::MDBX_INVALID, + Self::MapFull => ffi::MDBX_MAP_FULL, + Self::DbsFull => ffi::MDBX_DBS_FULL, + Self::ReadersFull => ffi::MDBX_READERS_FULL, + Self::TxnFull => ffi::MDBX_TXN_FULL, + Self::CursorFull => ffi::MDBX_CURSOR_FULL, + Self::PageFull => ffi::MDBX_PAGE_FULL, + Self::UnableExtendMapSize => ffi::MDBX_UNABLE_EXTEND_MAPSIZE, + Self::Incompatible => ffi::MDBX_INCOMPATIBLE, + Self::BadRslot => ffi::MDBX_BAD_RSLOT, + Self::BadTxn => ffi::MDBX_BAD_TXN, + Self::BadValSize => ffi::MDBX_BAD_VALSIZE, + Self::BadDbi => ffi::MDBX_BAD_DBI, + Self::Problem => ffi::MDBX_PROBLEM, + Self::Busy => ffi::MDBX_BUSY, + Self::Multival => ffi::MDBX_EMULTIVAL, + Self::WannaRecovery => ffi::MDBX_WANNA_RECOVERY, + Self::KeyMismatch => ffi::MDBX_EKEYMISMATCH, + Self::DecodeErrorLenDiff | Self::DecodeError => ffi::MDBX_EINVAL, + Self::Access => ffi::MDBX_EACCESS, + Self::TooLarge => ffi::MDBX_TOO_LARGE, + Self::BadSignature => ffi::MDBX_EBADSIGN, + Self::WriteTransactionUnsupportedInReadOnlyMode => ffi::MDBX_EACCESS, + Self::NestedTransactionsUnsupportedWithWriteMap => ffi::MDBX_EACCESS, + Self::ReadTransactionTimeout => -96000, // Custom non-MDBX error code + Self::Other(err_code) => *err_code, } } } diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 45259ba7b4269..20fcddead7c7d 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -502,7 +502,7 @@ impl Transaction { impl Transaction { /// Begins a new nested transaction inside of this transaction. - pub fn begin_nested_txn(&mut self) -> Result> { + pub fn begin_nested_txn(&mut self) -> Result { if self.inner.env.is_write_map() { return Err(Error::NestedTransactionsUnsupportedWithWriteMap) } @@ -514,7 +514,7 @@ impl Transaction { sender: tx, }); - rx.recv().unwrap().map(|ptr| Transaction::new_from_ptr(self.env().clone(), ptr.0)) + rx.recv().unwrap().map(|ptr| Self::new_from_ptr(self.env().clone(), ptr.0)) })? } } diff --git a/crates/storage/nippy-jar/src/compression/mod.rs b/crates/storage/nippy-jar/src/compression/mod.rs index a8f99fa539249..76e8c6d16b693 100644 --- a/crates/storage/nippy-jar/src/compression/mod.rs +++ b/crates/storage/nippy-jar/src/compression/mod.rs @@ -50,14 +50,14 @@ pub enum Compressors { impl Compression for Compressors { fn decompress_to(&self, value: &[u8], dest: &mut Vec) -> Result<(), NippyJarError> { match self { - Compressors::Zstd(zstd) => zstd.decompress_to(value, dest), - Compressors::Lz4(lz4) => lz4.decompress_to(value, dest), + Self::Zstd(zstd) => zstd.decompress_to(value, dest), + Self::Lz4(lz4) => lz4.decompress_to(value, dest), } } fn decompress(&self, value: &[u8]) -> Result, NippyJarError> { match self { - Compressors::Zstd(zstd) => zstd.decompress(value), - Compressors::Lz4(lz4) => lz4.decompress(value), + Self::Zstd(zstd) => zstd.decompress(value), + Self::Lz4(lz4) => lz4.decompress(value), } } @@ -65,8 +65,8 @@ impl Compression for Compressors { let initial_capacity = dest.capacity(); loop { let result = match self { - Compressors::Zstd(zstd) => zstd.compress_to(src, dest), - Compressors::Lz4(lz4) => lz4.compress_to(src, dest), + Self::Zstd(zstd) => zstd.compress_to(src, dest), + Self::Lz4(lz4) => lz4.compress_to(src, dest), }; match result { @@ -83,15 +83,15 @@ impl Compression for Compressors { fn compress(&self, src: &[u8]) -> Result, NippyJarError> { match self { - Compressors::Zstd(zstd) => zstd.compress(src), - Compressors::Lz4(lz4) => lz4.compress(src), + Self::Zstd(zstd) => zstd.compress(src), + Self::Lz4(lz4) => lz4.compress(src), } } fn is_ready(&self) -> bool { match self { - Compressors::Zstd(zstd) => zstd.is_ready(), - Compressors::Lz4(lz4) => lz4.is_ready(), + Self::Zstd(zstd) => zstd.is_ready(), + Self::Lz4(lz4) => lz4.is_ready(), } } @@ -100,8 +100,8 @@ impl Compression for Compressors { columns: Vec>>, ) -> Result<(), NippyJarError> { match self { - Compressors::Zstd(zstd) => zstd.prepare_compression(columns), - Compressors::Lz4(lz4) => lz4.prepare_compression(columns), + Self::Zstd(zstd) => zstd.prepare_compression(columns), + Self::Lz4(lz4) => lz4.prepare_compression(columns), } } } diff --git a/crates/storage/nippy-jar/src/filter/cuckoo.rs b/crates/storage/nippy-jar/src/filter/cuckoo.rs index 7c05a8f5816e5..0e338a0327c2f 100644 --- a/crates/storage/nippy-jar/src/filter/cuckoo.rs +++ b/crates/storage/nippy-jar/src/filter/cuckoo.rs @@ -21,7 +21,7 @@ impl Cuckoo { // close to capacity. Therefore, we increase it. let max_capacity = max_capacity + 100 + max_capacity / 3; - Cuckoo { remaining: max_capacity, filter: CuckooFilter::with_capacity(max_capacity) } + Self { remaining: max_capacity, filter: CuckooFilter::with_capacity(max_capacity) } } } @@ -73,7 +73,7 @@ impl<'de> Deserialize<'de> for Cuckoo { let (remaining, exported): (usize, ExportedCuckooFilter) = Deserialize::deserialize(deserializer)?; - Ok(Cuckoo { remaining, filter: exported.into() }) + Ok(Self { remaining, filter: exported.into() }) } } diff --git a/crates/storage/nippy-jar/src/filter/mod.rs b/crates/storage/nippy-jar/src/filter/mod.rs index dd3e7804905ac..3ddae0148cd71 100644 --- a/crates/storage/nippy-jar/src/filter/mod.rs +++ b/crates/storage/nippy-jar/src/filter/mod.rs @@ -27,22 +27,22 @@ pub enum InclusionFilters { impl InclusionFilter for InclusionFilters { fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError> { match self { - InclusionFilters::Cuckoo(c) => c.add(element), - InclusionFilters::Unused => todo!(), + Self::Cuckoo(c) => c.add(element), + Self::Unused => todo!(), } } fn contains(&self, element: &[u8]) -> Result { match self { - InclusionFilters::Cuckoo(c) => c.contains(element), - InclusionFilters::Unused => todo!(), + Self::Cuckoo(c) => c.contains(element), + Self::Unused => todo!(), } } fn size(&self) -> usize { match self { - InclusionFilters::Cuckoo(c) => c.size(), - InclusionFilters::Unused => 0, + Self::Cuckoo(c) => c.size(), + Self::Unused => 0, } } } diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 2eafe68c409e8..6a629702b7394 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -144,12 +144,12 @@ impl std::fmt::Debug for NippyJar { impl NippyJar<()> { /// Creates a new [`NippyJar`] without an user-defined header data. pub fn new_without_header(columns: usize, path: &Path) -> Self { - NippyJar::<()>::new(columns, path, ()) + Self::new(columns, path, ()) } /// Loads the file configuration and returns [`Self`] on a jar without user-defined header data. pub fn load_without_header(path: &Path) -> Result { - NippyJar::<()>::load(path) + Self::load(path) } /// Whether this [`NippyJar`] uses a [`InclusionFilters`] and [`Functions`]. @@ -161,7 +161,7 @@ impl NippyJar<()> { impl NippyJar { /// Creates a new [`NippyJar`] with a user-defined header data. pub fn new(columns: usize, path: &Path, user_header: H) -> Self { - NippyJar { + Self { version: NIPPY_JAR_VERSION, user_header, columns, diff --git a/crates/storage/nippy-jar/src/phf/fmph.rs b/crates/storage/nippy-jar/src/phf/fmph.rs index 8753334b5133c..7a67ecd3be41a 100644 --- a/crates/storage/nippy-jar/src/phf/fmph.rs +++ b/crates/storage/nippy-jar/src/phf/fmph.rs @@ -88,12 +88,12 @@ impl<'de> Deserialize<'de> for Fmph { D: Deserializer<'de>, { if let Some(buffer) = >>::deserialize(deserializer)? { - return Ok(Fmph { + return Ok(Self { function: Some( Function::read(&mut std::io::Cursor::new(buffer)).map_err(D::Error::custom)?, ), }) } - Ok(Fmph { function: None }) + Ok(Self { function: None }) } } diff --git a/crates/storage/nippy-jar/src/phf/go_fmph.rs b/crates/storage/nippy-jar/src/phf/go_fmph.rs index b2ed28f685fcf..f0a6507b4f435 100644 --- a/crates/storage/nippy-jar/src/phf/go_fmph.rs +++ b/crates/storage/nippy-jar/src/phf/go_fmph.rs @@ -88,13 +88,13 @@ impl<'de> Deserialize<'de> for GoFmph { D: Deserializer<'de>, { if let Some(buffer) = >>::deserialize(deserializer)? { - return Ok(GoFmph { + return Ok(Self { function: Some( GOFunction::read(&mut std::io::Cursor::new(buffer)) .map_err(D::Error::custom)?, ), }) } - Ok(GoFmph { function: None }) + Ok(Self { function: None }) } } diff --git a/crates/storage/nippy-jar/src/phf/mod.rs b/crates/storage/nippy-jar/src/phf/mod.rs index 5ac5d516997f7..ade48b60a33df 100644 --- a/crates/storage/nippy-jar/src/phf/mod.rs +++ b/crates/storage/nippy-jar/src/phf/mod.rs @@ -32,15 +32,15 @@ pub enum Functions { impl PerfectHashingFunction for Functions { fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError> { match self { - Functions::Fmph(f) => f.set_keys(keys), - Functions::GoFmph(f) => f.set_keys(keys), + Self::Fmph(f) => f.set_keys(keys), + Self::GoFmph(f) => f.set_keys(keys), } } fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { match self { - Functions::Fmph(f) => f.get_index(key), - Functions::GoFmph(f) => f.get_index(key), + Self::Fmph(f) => f.get_index(key), + Self::GoFmph(f) => f.get_index(key), } } } diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 0eb21aed8a141..57f67847d1885 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -67,28 +67,28 @@ pub(crate) enum Action { impl Action { fn as_str(&self) -> &'static str { match self { - Action::InsertStorageHashing => "insert storage hashing", - Action::InsertAccountHashing => "insert account hashing", - Action::InsertMerkleTree => "insert merkle tree", - Action::InsertBlock => "insert block", - Action::InsertState => "insert state", - Action::InsertHashes => "insert hashes", - Action::InsertHistoryIndices => "insert history indices", - Action::UpdatePipelineStages => "update pipeline stages", - Action::InsertCanonicalHeaders => "insert canonical headers", - Action::InsertHeaders => "insert headers", - Action::InsertHeaderNumbers => "insert header numbers", - Action::InsertHeaderTerminalDifficulties => "insert header TD", - Action::InsertBlockOmmers => "insert block ommers", - Action::InsertTransactionSenders => "insert tx senders", - Action::InsertTransactions => "insert transactions", - Action::InsertTransactionHashNumbers => "insert transaction hash numbers", - Action::InsertBlockWithdrawals => "insert block withdrawals", - Action::InsertBlockRequests => "insert block withdrawals", - Action::InsertBlockBodyIndices => "insert block body indices", - Action::InsertTransactionBlocks => "insert transaction blocks", - Action::GetNextTxNum => "get next tx num", - Action::GetParentTD => "get parent TD", + Self::InsertStorageHashing => "insert storage hashing", + Self::InsertAccountHashing => "insert account hashing", + Self::InsertMerkleTree => "insert merkle tree", + Self::InsertBlock => "insert block", + Self::InsertState => "insert state", + Self::InsertHashes => "insert hashes", + Self::InsertHistoryIndices => "insert history indices", + Self::UpdatePipelineStages => "update pipeline stages", + Self::InsertCanonicalHeaders => "insert canonical headers", + Self::InsertHeaders => "insert headers", + Self::InsertHeaderNumbers => "insert header numbers", + Self::InsertHeaderTerminalDifficulties => "insert header TD", + Self::InsertBlockOmmers => "insert block ommers", + Self::InsertTransactionSenders => "insert tx senders", + Self::InsertTransactions => "insert transactions", + Self::InsertTransactionHashNumbers => "insert transaction hash numbers", + Self::InsertBlockWithdrawals => "insert block withdrawals", + Self::InsertBlockRequests => "insert block withdrawals", + Self::InsertBlockBodyIndices => "insert block body indices", + Self::InsertTransactionBlocks => "insert transaction blocks", + Self::GetNextTxNum => "get next tx num", + Self::GetParentTD => "get parent TD", } } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index f7ab2e574928f..8b83cc26a44a9 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -54,7 +54,7 @@ impl ProviderFactory { db: DB, chain_spec: Arc, static_files_path: PathBuf, - ) -> ProviderResult> { + ) -> ProviderResult { Ok(Self { db: Arc::new(db), chain_spec, @@ -89,7 +89,7 @@ impl ProviderFactory { args: DatabaseArguments, static_files_path: PathBuf, ) -> RethResult { - Ok(ProviderFactory:: { + Ok(Self { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider: StaticFileProvider::new(static_files_path)?, @@ -577,7 +577,7 @@ impl PruneCheckpointReader for ProviderFactory { impl Clone for ProviderFactory { fn clone(&self) -> Self { - ProviderFactory { + Self { db: Arc::clone(&self.db), chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 7f4b14fee37b4..e296e1d0fb497 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -63,7 +63,7 @@ impl<'a> StaticFileJarProvider<'a> { } /// Adds a new auxiliary static file to help query data from the main one - pub fn with_auxiliary(mut self, auxiliary_jar: StaticFileJarProvider<'a>) -> Self { + pub fn with_auxiliary(mut self, auxiliary_jar: Self) -> Self { self.auxiliary_jar = Some(Box::new(auxiliary_jar)); self } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 1dfd15cd77783..fd877a4470ebc 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -622,7 +622,7 @@ impl StaticFileProvider { fetch_from_database: FD, ) -> ProviderResult> where - FS: Fn(&StaticFileProvider) -> ProviderResult>, + FS: Fn(&Self) -> ProviderResult>, FD: Fn() -> ProviderResult>, { // If there is, check the maximum block or transaction number of the segment. @@ -661,7 +661,7 @@ impl StaticFileProvider { mut predicate: P, ) -> ProviderResult> where - FS: Fn(&StaticFileProvider, Range, &mut P) -> ProviderResult>, + FS: Fn(&Self, Range, &mut P) -> ProviderResult>, FD: FnMut(Range, P) -> ProviderResult>, P: FnMut(&T) -> bool, { diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 3498677c0f340..0a8ce79f0cc59 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -42,8 +42,8 @@ pub struct MockEthProvider { } impl Default for MockEthProvider { - fn default() -> MockEthProvider { - MockEthProvider { + fn default() -> Self { + Self { blocks: Default::default(), headers: Default::default(), accounts: Default::default(), diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index c85b15e39bd7a..8b8ec58350c2b 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -32,12 +32,12 @@ pub enum BlockSource { impl BlockSource { /// Returns `true` if the block source is `Pending` or `Any`. pub fn is_pending(&self) -> bool { - matches!(self, BlockSource::Pending | BlockSource::Any) + matches!(self, Self::Pending | Self::Any) } /// Returns `true` if the block source is `Database` or `Any`. pub fn is_database(&self) -> bool { - matches!(self, BlockSource::Database | BlockSource::Any) + matches!(self, Self::Database | Self::Any) } } diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 3e526a344f8c5..362a692d1ef07 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -560,7 +560,7 @@ impl TaskSpawner for TaskExecutor { fn spawn_critical(&self, name: &'static str, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { self.metrics.inc_critical_tasks(); - TaskExecutor::spawn_critical(self, name, fut) + Self::spawn_critical(self, name, fut) } fn spawn_blocking(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { @@ -572,7 +572,7 @@ impl TaskSpawner for TaskExecutor { name: &'static str, fut: BoxFuture<'static, ()>, ) -> JoinHandle<()> { - TaskExecutor::spawn_critical_blocking(self, name, fut) + Self::spawn_critical_blocking(self, name, fut) } } @@ -610,7 +610,7 @@ impl TaskSpawnerExt for TaskExecutor { where F: Future + Send + 'static, { - TaskExecutor::spawn_critical_with_graceful_shutdown_signal(self, name, f) + Self::spawn_critical_with_graceful_shutdown_signal(self, name, f) } fn spawn_with_graceful_shutdown_signal( @@ -620,7 +620,7 @@ impl TaskSpawnerExt for TaskExecutor { where F: Future + Send + 'static, { - TaskExecutor::spawn_with_graceful_shutdown_signal(self, f) + Self::spawn_with_graceful_shutdown_signal(self, f) } } diff --git a/crates/tasks/src/metrics.rs b/crates/tasks/src/metrics.rs index 127783cf0bd22..a8397b7fe415d 100644 --- a/crates/tasks/src/metrics.rs +++ b/crates/tasks/src/metrics.rs @@ -42,7 +42,7 @@ impl fmt::Debug for IncCounterOnDrop { impl IncCounterOnDrop { /// Creates a new instance of `IncCounterOnDrop` with the given counter. pub fn new(counter: Counter) -> Self { - IncCounterOnDrop(counter) + Self(counter) } } diff --git a/crates/tokio-util/src/event_stream.rs b/crates/tokio-util/src/event_stream.rs index 67bc72a97d07d..3faaece6dc216 100644 --- a/crates/tokio-util/src/event_stream.rs +++ b/crates/tokio-util/src/event_stream.rs @@ -20,7 +20,7 @@ where /// Creates a new `EventStream`. pub fn new(receiver: tokio::sync::broadcast::Receiver) -> Self { let inner = tokio_stream::wrappers::BroadcastStream::new(receiver); - EventStream { inner } + Self { inner } } } diff --git a/crates/tracing/src/formatter.rs b/crates/tracing/src/formatter.rs index 872a0b5c821c4..1322377f1c9fa 100644 --- a/crates/tracing/src/formatter.rs +++ b/crates/tracing/src/formatter.rs @@ -57,7 +57,7 @@ impl LogFormat { filter.max_level_hint().map_or(true, |max_level| max_level > tracing::Level::INFO)); match self { - LogFormat::Json => { + Self::Json => { let layer = tracing_subscriber::fmt::layer().json().with_ansi(ansi).with_target(target); @@ -67,8 +67,8 @@ impl LogFormat { layer.with_filter(filter).boxed() } } - LogFormat::LogFmt => tracing_logfmt::layer().with_filter(filter).boxed(), - LogFormat::Terminal => { + Self::LogFmt => tracing_logfmt::layer().with_filter(filter).boxed(), + Self::Terminal => { let layer = tracing_subscriber::fmt::layer().with_ansi(ansi).with_target(target); if let Some(writer) = file_writer { @@ -84,9 +84,9 @@ impl LogFormat { impl Display for LogFormat { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - LogFormat::Json => write!(f, "json"), - LogFormat::LogFmt => write!(f, "logfmt"), - LogFormat::Terminal => write!(f, "terminal"), + Self::Json => write!(f, "json"), + Self::LogFmt => write!(f, "logfmt"), + Self::Terminal => write!(f, "terminal"), } } } diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 5f44c87f5a3ad..862c4923dbc26 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -405,7 +405,7 @@ pub enum DiskFileBlobStoreError { impl From for BlobStoreError { fn from(value: DiskFileBlobStoreError) -> Self { - BlobStoreError::Other(Box::new(value)) + Self::Other(Box::new(value)) } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 8334c0f372a35..e20624801be18 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -216,7 +216,7 @@ impl InvalidPoolTransactionError { #[inline] fn is_bad_transaction(&self) -> bool { match self { - InvalidPoolTransactionError::Consensus(err) => { + Self::Consensus(err) => { // transaction considered invalid by the consensus rules // We do not consider the following errors to be erroneous transactions, since they // depend on dynamic environmental conditions and should not be assumed to have been @@ -250,17 +250,17 @@ impl InvalidPoolTransactionError { InvalidTransactionError::SignerAccountHasBytecode => true, } } - InvalidPoolTransactionError::ExceedsGasLimit(_, _) => true, - InvalidPoolTransactionError::ExceedsMaxInitCodeSize(_, _) => true, - InvalidPoolTransactionError::OversizedData(_, _) => true, - InvalidPoolTransactionError::Underpriced => { + Self::ExceedsGasLimit(_, _) => true, + Self::ExceedsMaxInitCodeSize(_, _) => true, + Self::OversizedData(_, _) => true, + Self::Underpriced => { // local setting false } - InvalidPoolTransactionError::IntrinsicGasTooLow => true, - InvalidPoolTransactionError::Overdraft => false, - InvalidPoolTransactionError::Other(err) => err.is_bad_transaction(), - InvalidPoolTransactionError::Eip4844(eip4844_err) => { + Self::IntrinsicGasTooLow => true, + Self::Overdraft => false, + Self::Other(err) => err.is_bad_transaction(), + Self::Eip4844(eip4844_err) => { match eip4844_err { Eip4844PoolTransactionError::MissingEip4844BlobSidecar => { // this is only reachable when blob transactions are reinjected and we're @@ -291,12 +291,7 @@ impl InvalidPoolTransactionError { /// Returns `true` if an import failed due to nonce gap. pub const fn is_nonce_gap(&self) -> bool { - matches!( - self, - InvalidPoolTransactionError::Consensus(InvalidTransactionError::NonceNotConsistent) - ) || matches!( - self, - InvalidPoolTransactionError::Eip4844(Eip4844PoolTransactionError::Eip4844NonceGap) - ) + matches!(self, Self::Consensus(InvalidTransactionError::NonceNotConsistent)) || + matches!(self, Self::Eip4844(Eip4844PoolTransactionError::Eip4844NonceGap)) } } diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index ecc46cae8adfd..f6c50f8d98b32 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -65,7 +65,7 @@ impl SenderId { impl From for SenderId { fn from(value: u64) -> Self { - SenderId(value) + Self(value) } } @@ -93,11 +93,7 @@ impl TransactionId { /// /// This returns `transaction_nonce - 1` if `transaction_nonce` is higher than the /// `on_chain_nonce` - pub fn ancestor( - transaction_nonce: u64, - on_chain_nonce: u64, - sender: SenderId, - ) -> Option { + pub fn ancestor(transaction_nonce: u64, on_chain_nonce: u64, sender: SenderId) -> Option { if transaction_nonce == on_chain_nonce { return None } @@ -106,13 +102,13 @@ impl TransactionId { } /// Returns the `TransactionId` that would come before this transaction. - pub(crate) fn unchecked_ancestor(&self) -> Option { - (self.nonce != 0).then(|| TransactionId::new(self.sender, self.nonce - 1)) + pub(crate) fn unchecked_ancestor(&self) -> Option { + (self.nonce != 0).then(|| Self::new(self.sender, self.nonce - 1)) } /// Returns the `TransactionId` that directly follows this transaction: `self.nonce + 1` - pub const fn descendant(&self) -> TransactionId { - TransactionId::new(self.sender, self.nonce + 1) + pub const fn descendant(&self) -> Self { + Self::new(self.sender, self.nonce + 1) } /// Returns the nonce that follows immediately after this one. diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 923683d8a9bec..5da2e4e762fe1 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -485,7 +485,7 @@ impl MaintainedPoolState { /// Returns `true` if the pool is assumed to be out of sync with the current state. #[inline] const fn is_drifted(&self) -> bool { - matches!(self, MaintainedPoolState::Drifted) + matches!(self, Self::Drifted) } } diff --git a/crates/transaction-pool/src/ordering.rs b/crates/transaction-pool/src/ordering.rs index 8187520ee089c..e4928710b600d 100644 --- a/crates/transaction-pool/src/ordering.rs +++ b/crates/transaction-pool/src/ordering.rs @@ -15,7 +15,7 @@ pub enum Priority { impl From> for Priority { fn from(value: Option) -> Self { - value.map_or(Priority::None, Priority::Value) + value.map_or(Self::None, Priority::Value) } } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 5e870de2b81e3..35ab9e5cc24e1 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -148,7 +148,7 @@ impl BestTransactions { impl crate::traits::BestTransactions for BestTransactions { fn mark_invalid(&mut self, tx: &Self::Item) { - BestTransactions::mark_invalid(self, tx) + Self::mark_invalid(self, tx) } fn no_updates(&mut self) { diff --git a/crates/transaction-pool/src/pool/events.rs b/crates/transaction-pool/src/pool/events.rs index 58578d08f5f37..7b17dcec50ccd 100644 --- a/crates/transaction-pool/src/pool/events.rs +++ b/crates/transaction-pool/src/pool/events.rs @@ -80,11 +80,6 @@ impl TransactionEvent { /// Returns `true` if the event is final and no more events are expected for this transaction /// hash. pub const fn is_final(&self) -> bool { - matches!( - self, - TransactionEvent::Replaced(_) | - TransactionEvent::Mined(_) | - TransactionEvent::Discarded - ) + matches!(self, Self::Replaced(_) | Self::Mined(_) | Self::Discarded) } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index fe84e6e90b0b0..a23db39648968 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -1023,7 +1023,7 @@ impl AddedTransaction { /// Returns whether the transaction has been added to the pending pool. pub(crate) const fn as_pending(&self) -> Option<&AddedPendingTransaction> { match self { - AddedTransaction::Pending(tx) => Some(tx), + Self::Pending(tx) => Some(tx), _ => None, } } @@ -1031,16 +1031,16 @@ impl AddedTransaction { /// Returns the replaced transaction if there was one pub(crate) const fn replaced(&self) -> Option<&Arc>> { match self { - AddedTransaction::Pending(tx) => tx.replaced.as_ref(), - AddedTransaction::Parked { replaced, .. } => replaced.as_ref(), + Self::Pending(tx) => tx.replaced.as_ref(), + Self::Parked { replaced, .. } => replaced.as_ref(), } } /// Returns the discarded transactions if there were any pub(crate) fn discarded_transactions(&self) -> Option<&[Arc>]> { match self { - AddedTransaction::Pending(tx) => Some(&tx.discarded), - AddedTransaction::Parked { .. } => None, + Self::Pending(tx) => Some(&tx.discarded), + Self::Parked { .. } => None, } } @@ -1052,18 +1052,18 @@ impl AddedTransaction { /// Returns the hash of the transaction pub(crate) fn hash(&self) -> &TxHash { match self { - AddedTransaction::Pending(tx) => tx.transaction.hash(), - AddedTransaction::Parked { transaction, .. } => transaction.hash(), + Self::Pending(tx) => tx.transaction.hash(), + Self::Parked { transaction, .. } => transaction.hash(), } } /// Converts this type into the event type for listeners pub(crate) fn into_new_transaction_event(self) -> NewTransactionEvent { match self { - AddedTransaction::Pending(tx) => { + Self::Pending(tx) => { NewTransactionEvent { subpool: SubPool::Pending, transaction: tx.transaction } } - AddedTransaction::Parked { transaction, subpool, .. } => { + Self::Parked { transaction, subpool, .. } => { NewTransactionEvent { transaction, subpool } } } @@ -1073,8 +1073,8 @@ impl AddedTransaction { #[cfg(test)] pub(crate) const fn subpool(&self) -> SubPool { match self { - AddedTransaction::Pending(_) => SubPool::Pending, - AddedTransaction::Parked { subpool, .. } => *subpool, + Self::Pending(_) => SubPool::Pending, + Self::Parked { subpool, .. } => *subpool, } } @@ -1082,8 +1082,8 @@ impl AddedTransaction { #[cfg(test)] pub(crate) fn id(&self) -> &TransactionId { match self { - AddedTransaction::Pending(added) => added.transaction.id(), - AddedTransaction::Parked { transaction, .. } => transaction.id(), + Self::Pending(added) => added.transaction.id(), + Self::Parked { transaction, .. } => transaction.id(), } } } diff --git a/crates/transaction-pool/src/pool/size.rs b/crates/transaction-pool/src/pool/size.rs index 93dfb9bc0bd72..5e5a0aceac499 100644 --- a/crates/transaction-pool/src/pool/size.rs +++ b/crates/transaction-pool/src/pool/size.rs @@ -31,6 +31,6 @@ impl SubAssign for SizeTracker { impl From for usize { fn from(value: SizeTracker) -> Self { - value.0 as usize + value.0 as Self } } diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index a71c8bbfc0775..d0a3b10f8cb99 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -56,19 +56,19 @@ impl TxState { /// - enough blob fee cap #[inline] pub(crate) const fn is_pending(&self) -> bool { - self.bits() >= TxState::PENDING_POOL_BITS.bits() + self.bits() >= Self::PENDING_POOL_BITS.bits() } /// Whether this transaction is a blob transaction. #[inline] pub(crate) const fn is_blob(&self) -> bool { - self.contains(TxState::BLOB_TRANSACTION) + self.contains(Self::BLOB_TRANSACTION) } /// Returns `true` if the transaction has a nonce gap. #[inline] pub(crate) const fn has_nonce_gap(&self) -> bool { - !self.intersects(TxState::NO_NONCE_GAPS) + !self.intersects(Self::NO_NONCE_GAPS) } } @@ -95,30 +95,30 @@ impl SubPool { /// Whether this transaction is to be moved to the pending sub-pool. #[inline] pub const fn is_pending(&self) -> bool { - matches!(self, SubPool::Pending) + matches!(self, Self::Pending) } /// Whether this transaction is in the queued pool. #[inline] pub const fn is_queued(&self) -> bool { - matches!(self, SubPool::Queued) + matches!(self, Self::Queued) } /// Whether this transaction is in the base fee pool. #[inline] pub const fn is_base_fee(&self) -> bool { - matches!(self, SubPool::BaseFee) + matches!(self, Self::BaseFee) } /// Whether this transaction is in the blob pool. #[inline] pub const fn is_blob(&self) -> bool { - matches!(self, SubPool::Blob) + matches!(self, Self::Blob) } /// Returns whether this is a promotion depending on the current sub-pool location. #[inline] - pub fn is_promoted(&self, other: SubPool) -> bool { + pub fn is_promoted(&self, other: Self) -> bool { self > &other } } @@ -126,16 +126,16 @@ impl SubPool { impl From for SubPool { fn from(value: TxState) -> Self { if value.is_pending() { - return SubPool::Pending + return Self::Pending } if value.is_blob() { // all _non-pending_ blob transactions are in the blob sub-pool - return SubPool::Blob + return Self::Blob } if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { - return SubPool::Queued + return Self::Queued } - SubPool::BaseFee + Self::BaseFee } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 4e35733d4bc63..28d0df38a1cfb 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1704,7 +1704,7 @@ pub(crate) struct PendingFees { impl Default for PendingFees { fn default() -> Self { - PendingFees { base_fee: Default::default(), blob_fee: BLOB_TX_MIN_BLOB_GASPRICE } + Self { base_fee: Default::default(), blob_fee: BLOB_TX_MIN_BLOB_GASPRICE } } } diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index e5c99ec03ea32..0b981ea155d6a 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -142,7 +142,7 @@ pub struct TransactionBuilder { impl TransactionBuilder { /// Converts the transaction builder into a legacy transaction format. pub fn into_legacy(self) -> TransactionSigned { - TransactionBuilder::signed( + Self::signed( TxLegacy { chain_id: Some(self.chain_id), nonce: self.nonce, @@ -159,7 +159,7 @@ impl TransactionBuilder { /// Converts the transaction builder into a transaction format using EIP-1559. pub fn into_eip1559(self) -> TransactionSigned { - TransactionBuilder::signed( + Self::signed( TxEip1559 { chain_id: self.chain_id, nonce: self.nonce, @@ -177,7 +177,7 @@ impl TransactionBuilder { } /// Converts the transaction builder into a transaction format using EIP-4844. pub fn into_eip4844(self) -> TransactionSigned { - TransactionBuilder::signed( + Self::signed( TxEip4844 { chain_id: self.chain_id, nonce: self.nonce, diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 15755b4e602f4..171c29d913e0e 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -218,7 +218,7 @@ impl MockTransaction { /// Returns a new legacy transaction with random address and hash and empty values pub fn legacy() -> Self { - MockTransaction::Legacy { + Self::Legacy { chain_id: Some(1), hash: B256::random(), sender: Address::random(), @@ -234,7 +234,7 @@ impl MockTransaction { /// Returns a new EIP2930 transaction with random address and hash and empty values pub fn eip2930() -> Self { - MockTransaction::Eip2930 { + Self::Eip2930 { chain_id: 1, hash: B256::random(), sender: Address::random(), @@ -251,7 +251,7 @@ impl MockTransaction { /// Returns a new EIP1559 transaction with random address and hash and empty values pub fn eip1559() -> Self { - MockTransaction::Eip1559 { + Self::Eip1559 { chain_id: 1, hash: B256::random(), sender: Address::random(), @@ -269,7 +269,7 @@ impl MockTransaction { /// Returns a new EIP4844 transaction with random address and hash and empty values pub fn eip4844() -> Self { - MockTransaction::Eip4844 { + Self::Eip4844 { chain_id: 1, hash: B256::random(), sender: Address::random(), @@ -291,8 +291,7 @@ impl MockTransaction { /// Returns a new EIP4844 transaction with a provided sidecar pub fn eip4844_with_sidecar(sidecar: BlobTransactionSidecar) -> Self { let mut transaction = Self::eip4844(); - if let MockTransaction::Eip4844 { sidecar: ref mut existing_sidecar, .. } = &mut transaction - { + if let Self::Eip4844 { sidecar: ref mut existing_sidecar, .. } = &mut transaction { *existing_sidecar = sidecar; } transaction @@ -326,7 +325,7 @@ impl MockTransaction { /// Sets the max fee per blob gas for EIP-4844 transactions, pub fn set_blob_fee(&mut self, val: u128) -> &mut Self { - if let MockTransaction::Eip4844 { max_fee_per_blob_gas, .. } = self { + if let Self::Eip4844 { max_fee_per_blob_gas, .. } = self { *max_fee_per_blob_gas = val; } self @@ -334,8 +333,8 @@ impl MockTransaction { /// Sets the priority fee for dynamic fee transactions (EIP-1559 and EIP-4844) pub fn set_priority_fee(&mut self, val: u128) -> &mut Self { - if let MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } | - MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } = self + if let Self::Eip1559 { max_priority_fee_per_gas, .. } | + Self::Eip4844 { max_priority_fee_per_gas, .. } = self { *max_priority_fee_per_gas = val; } @@ -351,18 +350,15 @@ impl MockTransaction { /// Gets the priority fee for dynamic fee transactions (EIP-1559 and EIP-4844) pub const fn get_priority_fee(&self) -> Option { match self { - MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } | - MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } => { - Some(*max_priority_fee_per_gas) - } + Self::Eip1559 { max_priority_fee_per_gas, .. } | + Self::Eip4844 { max_priority_fee_per_gas, .. } => Some(*max_priority_fee_per_gas), _ => None, } } /// Sets the max fee for dynamic fee transactions (EIP-1559 and EIP-4844) pub fn set_max_fee(&mut self, val: u128) -> &mut Self { - if let MockTransaction::Eip1559 { max_fee_per_gas, .. } | - MockTransaction::Eip4844 { max_fee_per_gas, .. } = self + if let Self::Eip1559 { max_fee_per_gas, .. } | Self::Eip4844 { max_fee_per_gas, .. } = self { *max_fee_per_gas = val; } @@ -378,8 +374,9 @@ impl MockTransaction { /// Gets the max fee for dynamic fee transactions (EIP-1559 and EIP-4844) pub const fn get_max_fee(&self) -> Option { match self { - MockTransaction::Eip1559 { max_fee_per_gas, .. } | - MockTransaction::Eip4844 { max_fee_per_gas, .. } => Some(*max_fee_per_gas), + Self::Eip1559 { max_fee_per_gas, .. } | Self::Eip4844 { max_fee_per_gas, .. } => { + Some(*max_fee_per_gas) + } _ => None, } } @@ -387,10 +384,10 @@ impl MockTransaction { /// Sets the access list for transactions supporting EIP-1559, EIP-4844, and EIP-2930. pub fn set_accesslist(&mut self, list: AccessList) -> &mut Self { match self { - MockTransaction::Legacy { .. } => {} - MockTransaction::Eip1559 { access_list: accesslist, .. } | - MockTransaction::Eip4844 { access_list: accesslist, .. } | - MockTransaction::Eip2930 { access_list: accesslist, .. } => { + Self::Legacy { .. } => {} + Self::Eip1559 { access_list: accesslist, .. } | + Self::Eip4844 { access_list: accesslist, .. } | + Self::Eip2930 { access_list: accesslist, .. } => { *accesslist = list; } } @@ -400,12 +397,11 @@ impl MockTransaction { /// Sets the gas price for the transaction. pub fn set_gas_price(&mut self, val: u128) -> &mut Self { match self { - MockTransaction::Legacy { gas_price, .. } | - MockTransaction::Eip2930 { gas_price, .. } => { + Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => { *gas_price = val; } - MockTransaction::Eip1559 { max_fee_per_gas, max_priority_fee_per_gas, .. } | - MockTransaction::Eip4844 { max_fee_per_gas, max_priority_fee_per_gas, .. } => { + Self::Eip1559 { max_fee_per_gas, max_priority_fee_per_gas, .. } | + Self::Eip4844 { max_fee_per_gas, max_priority_fee_per_gas, .. } => { *max_fee_per_gas = val; *max_priority_fee_per_gas = val; } @@ -416,20 +412,11 @@ impl MockTransaction { /// Sets the gas price for the transaction. pub fn with_gas_price(mut self, val: u128) -> Self { match self { - MockTransaction::Legacy { ref mut gas_price, .. } | - MockTransaction::Eip2930 { ref mut gas_price, .. } => { + Self::Legacy { ref mut gas_price, .. } | Self::Eip2930 { ref mut gas_price, .. } => { *gas_price = val; } - MockTransaction::Eip1559 { - ref mut max_fee_per_gas, - ref mut max_priority_fee_per_gas, - .. - } | - MockTransaction::Eip4844 { - ref mut max_fee_per_gas, - ref mut max_priority_fee_per_gas, - .. - } => { + Self::Eip1559 { ref mut max_fee_per_gas, ref mut max_priority_fee_per_gas, .. } | + Self::Eip4844 { ref mut max_fee_per_gas, ref mut max_priority_fee_per_gas, .. } => { *max_fee_per_gas = val; *max_priority_fee_per_gas = val; } @@ -440,10 +427,10 @@ impl MockTransaction { /// Gets the gas price for the transaction. pub const fn get_gas_price(&self) -> u128 { match self { - MockTransaction::Legacy { gas_price, .. } | - MockTransaction::Eip2930 { gas_price, .. } => *gas_price, - MockTransaction::Eip1559 { max_fee_per_gas, .. } | - MockTransaction::Eip4844 { max_fee_per_gas, .. } => *max_fee_per_gas, + Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => *gas_price, + Self::Eip1559 { max_fee_per_gas, .. } | Self::Eip4844 { max_fee_per_gas, .. } => { + *max_fee_per_gas + } } } @@ -515,7 +502,7 @@ impl MockTransaction { /// If it's an EIP-4844 transaction. pub fn inc_blob_fee_by(&self, value: u128) -> Self { let mut this = self.clone(); - if let MockTransaction::Eip4844 { max_fee_per_blob_gas, .. } = &mut this { + if let Self::Eip4844 { max_fee_per_blob_gas, .. } = &mut this { *max_fee_per_blob_gas = max_fee_per_blob_gas.checked_add(value).unwrap(); } this @@ -533,7 +520,7 @@ impl MockTransaction { /// If it's an EIP-4844 transaction. pub fn decr_blob_fee_by(&self, value: u128) -> Self { let mut this = self.clone(); - if let MockTransaction::Eip4844 { max_fee_per_blob_gas, .. } = &mut this { + if let Self::Eip4844 { max_fee_per_blob_gas, .. } = &mut this { *max_fee_per_blob_gas = max_fee_per_blob_gas.checked_sub(value).unwrap(); } this @@ -551,61 +538,61 @@ impl MockTransaction { /// Checks if the transaction is of the legacy type. pub const fn is_legacy(&self) -> bool { - matches!(self, MockTransaction::Legacy { .. }) + matches!(self, Self::Legacy { .. }) } /// Checks if the transaction is of the EIP-1559 type. pub const fn is_eip1559(&self) -> bool { - matches!(self, MockTransaction::Eip1559 { .. }) + matches!(self, Self::Eip1559 { .. }) } /// Checks if the transaction is of the EIP-4844 type. pub const fn is_eip4844(&self) -> bool { - matches!(self, MockTransaction::Eip4844 { .. }) + matches!(self, Self::Eip4844 { .. }) } /// Checks if the transaction is of the EIP-2930 type. pub const fn is_eip2930(&self) -> bool { - matches!(self, MockTransaction::Eip2930 { .. }) + matches!(self, Self::Eip2930 { .. }) } } impl PoolTransaction for MockTransaction { fn hash(&self) -> &TxHash { match self { - MockTransaction::Legacy { hash, .. } | - MockTransaction::Eip1559 { hash, .. } | - MockTransaction::Eip4844 { hash, .. } | - MockTransaction::Eip2930 { hash, .. } => hash, + Self::Legacy { hash, .. } | + Self::Eip1559 { hash, .. } | + Self::Eip4844 { hash, .. } | + Self::Eip2930 { hash, .. } => hash, } } fn sender(&self) -> Address { match self { - MockTransaction::Legacy { sender, .. } | - MockTransaction::Eip1559 { sender, .. } | - MockTransaction::Eip4844 { sender, .. } | - MockTransaction::Eip2930 { sender, .. } => *sender, + Self::Legacy { sender, .. } | + Self::Eip1559 { sender, .. } | + Self::Eip4844 { sender, .. } | + Self::Eip2930 { sender, .. } => *sender, } } fn nonce(&self) -> u64 { match self { - MockTransaction::Legacy { nonce, .. } | - MockTransaction::Eip1559 { nonce, .. } | - MockTransaction::Eip4844 { nonce, .. } | - MockTransaction::Eip2930 { nonce, .. } => *nonce, + Self::Legacy { nonce, .. } | + Self::Eip1559 { nonce, .. } | + Self::Eip4844 { nonce, .. } | + Self::Eip2930 { nonce, .. } => *nonce, } } fn cost(&self) -> U256 { match self { - MockTransaction::Legacy { gas_price, value, gas_limit, .. } | - MockTransaction::Eip2930 { gas_limit, gas_price, value, .. } => { + Self::Legacy { gas_price, value, gas_limit, .. } | + Self::Eip2930 { gas_limit, gas_price, value, .. } => { U256::from(*gas_limit) * U256::from(*gas_price) + *value } - MockTransaction::Eip1559 { max_fee_per_gas, value, gas_limit, .. } | - MockTransaction::Eip4844 { max_fee_per_gas, value, gas_limit, .. } => { + Self::Eip1559 { max_fee_per_gas, value, gas_limit, .. } | + Self::Eip4844 { max_fee_per_gas, value, gas_limit, .. } => { U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value } } @@ -617,35 +604,33 @@ impl PoolTransaction for MockTransaction { fn max_fee_per_gas(&self) -> u128 { match self { - MockTransaction::Legacy { gas_price, .. } | - MockTransaction::Eip2930 { gas_price, .. } => *gas_price, - MockTransaction::Eip1559 { max_fee_per_gas, .. } | - MockTransaction::Eip4844 { max_fee_per_gas, .. } => *max_fee_per_gas, + Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => *gas_price, + Self::Eip1559 { max_fee_per_gas, .. } | Self::Eip4844 { max_fee_per_gas, .. } => { + *max_fee_per_gas + } } } fn access_list(&self) -> Option<&AccessList> { match self { - MockTransaction::Legacy { .. } => None, - MockTransaction::Eip1559 { access_list: accesslist, .. } | - MockTransaction::Eip4844 { access_list: accesslist, .. } | - MockTransaction::Eip2930 { access_list: accesslist, .. } => Some(accesslist), + Self::Legacy { .. } => None, + Self::Eip1559 { access_list: accesslist, .. } | + Self::Eip4844 { access_list: accesslist, .. } | + Self::Eip2930 { access_list: accesslist, .. } => Some(accesslist), } } fn max_priority_fee_per_gas(&self) -> Option { match self { - MockTransaction::Legacy { .. } | MockTransaction::Eip2930 { .. } => None, - MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } | - MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } => { - Some(*max_priority_fee_per_gas) - } + Self::Legacy { .. } | Self::Eip2930 { .. } => None, + Self::Eip1559 { max_priority_fee_per_gas, .. } | + Self::Eip4844 { max_priority_fee_per_gas, .. } => Some(*max_priority_fee_per_gas), } } fn max_fee_per_blob_gas(&self) -> Option { match self { - MockTransaction::Eip4844 { max_fee_per_blob_gas, .. } => Some(*max_fee_per_blob_gas), + Self::Eip4844 { max_fee_per_blob_gas, .. } => Some(*max_fee_per_blob_gas), _ => None, } } @@ -679,50 +664,47 @@ impl PoolTransaction for MockTransaction { /// Returns the priority fee or gas price based on the transaction type. fn priority_fee_or_price(&self) -> u128 { match self { - MockTransaction::Legacy { gas_price, .. } | - MockTransaction::Eip2930 { gas_price, .. } => *gas_price, - MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } | - MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } => *max_priority_fee_per_gas, + Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => *gas_price, + Self::Eip1559 { max_priority_fee_per_gas, .. } | + Self::Eip4844 { max_priority_fee_per_gas, .. } => *max_priority_fee_per_gas, } } /// Returns the transaction kind associated with the transaction. fn kind(&self) -> TxKind { match self { - MockTransaction::Legacy { to, .. } | - MockTransaction::Eip1559 { to, .. } | - MockTransaction::Eip2930 { to, .. } => *to, - MockTransaction::Eip4844 { to, .. } => TxKind::Call(*to), + Self::Legacy { to, .. } | Self::Eip1559 { to, .. } | Self::Eip2930 { to, .. } => *to, + Self::Eip4844 { to, .. } => TxKind::Call(*to), } } /// Returns the input data associated with the transaction. fn input(&self) -> &[u8] { match self { - MockTransaction::Legacy { .. } => &[], - MockTransaction::Eip1559 { input, .. } | - MockTransaction::Eip4844 { input, .. } | - MockTransaction::Eip2930 { input, .. } => input, + Self::Legacy { .. } => &[], + Self::Eip1559 { input, .. } | + Self::Eip4844 { input, .. } | + Self::Eip2930 { input, .. } => input, } } /// Returns the size of the transaction. fn size(&self) -> usize { match self { - MockTransaction::Legacy { size, .. } | - MockTransaction::Eip1559 { size, .. } | - MockTransaction::Eip4844 { size, .. } | - MockTransaction::Eip2930 { size, .. } => *size, + Self::Legacy { size, .. } | + Self::Eip1559 { size, .. } | + Self::Eip4844 { size, .. } | + Self::Eip2930 { size, .. } => *size, } } /// Returns the transaction type as a byte identifier. fn tx_type(&self) -> u8 { match self { - MockTransaction::Legacy { .. } => TxType::Legacy.into(), - MockTransaction::Eip1559 { .. } => TxType::Eip1559.into(), - MockTransaction::Eip4844 { .. } => TxType::Eip4844.into(), - MockTransaction::Eip2930 { .. } => TxType::Eip2930.into(), + Self::Legacy { .. } => TxType::Legacy.into(), + Self::Eip1559 { .. } => TxType::Eip1559.into(), + Self::Eip4844 { .. } => TxType::Eip4844.into(), + Self::Eip2930 { .. } => TxType::Eip2930.into(), } } @@ -734,10 +716,10 @@ impl PoolTransaction for MockTransaction { /// Returns the chain ID associated with the transaction. fn chain_id(&self) -> Option { match self { - MockTransaction::Legacy { chain_id, .. } => *chain_id, - MockTransaction::Eip1559 { chain_id, .. } | - MockTransaction::Eip4844 { chain_id, .. } | - MockTransaction::Eip2930 { chain_id, .. } => Some(*chain_id), + Self::Legacy { chain_id, .. } => *chain_id, + Self::Eip1559 { chain_id, .. } | + Self::Eip4844 { chain_id, .. } | + Self::Eip2930 { chain_id, .. } => Some(*chain_id), } } } @@ -790,7 +772,7 @@ impl TryFromRecoveredTransaction for MockTransaction { to, value, input, - }) => Ok(MockTransaction::Legacy { + }) => Ok(Self::Legacy { chain_id, hash, sender, @@ -811,7 +793,7 @@ impl TryFromRecoveredTransaction for MockTransaction { value, input, access_list, - }) => Ok(MockTransaction::Eip2930 { + }) => Ok(Self::Eip2930 { chain_id, hash, sender, @@ -834,7 +816,7 @@ impl TryFromRecoveredTransaction for MockTransaction { value, input, access_list, - }) => Ok(MockTransaction::Eip1559 { + }) => Ok(Self::Eip1559 { chain_id, hash, sender, @@ -861,7 +843,7 @@ impl TryFromRecoveredTransaction for MockTransaction { access_list, blob_versioned_hashes: _, max_fee_per_blob_gas, - }) => Ok(MockTransaction::Eip4844 { + }) => Ok(Self::Eip4844 { chain_id, hash, sender, @@ -1017,7 +999,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { to, value, input, - }) => MockTransaction::Legacy { + }) => Self::Legacy { chain_id: *chain_id, sender, hash: tx_hash, @@ -1039,7 +1021,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { value, access_list, input, - }) => MockTransaction::Eip2930 { + }) => Self::Eip2930 { chain_id: *chain_id, sender, hash: tx_hash, @@ -1062,7 +1044,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { value, input, access_list, - }) => MockTransaction::Eip1559 { + }) => Self::Eip1559 { chain_id: *chain_id, sender, hash: tx_hash, @@ -1089,7 +1071,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { access_list, blob_versioned_hashes: _, placeholder, - }) => MockTransaction::Eip4844 { + }) => Self::Eip4844 { chain_id: *chain_id, sender, hash: tx_hash, @@ -1114,7 +1096,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { .boxed() } - type Strategy = proptest::strategy::BoxedStrategy; + type Strategy = proptest::strategy::BoxedStrategy; } /// A factory for creating and managing various types of mock transactions. @@ -1433,7 +1415,7 @@ impl NonConflictingSetOutcome { /// Returns the inner [MockTransactionSet] pub fn into_inner(self) -> MockTransactionSet { match self { - NonConflictingSetOutcome::BlobsOnly(set) | NonConflictingSetOutcome::Mixed(set) => set, + Self::BlobsOnly(set) | Self::Mixed(set) => set, } } @@ -1451,8 +1433,8 @@ impl NonConflictingSetOutcome { rng: &mut impl rand::Rng, ) { match self { - NonConflictingSetOutcome::BlobsOnly(_) => {} - NonConflictingSetOutcome::Mixed(set) => set.with_nonce_gaps(gap_pct, gap_range, rng), + Self::BlobsOnly(_) => {} + Self::Mixed(set) => set.with_nonce_gaps(gap_pct, gap_range, rng), } } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index dd465a77b9fff..58377a19e0d61 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -497,7 +497,7 @@ impl PropagateKind { /// Returns the peer the transaction was sent to pub const fn peer(&self) -> &PeerId { match self { - PropagateKind::Full(peer) | PropagateKind::Hash(peer) => peer, + Self::Full(peer) | Self::Hash(peer) => peer, } } } @@ -561,16 +561,16 @@ pub enum TransactionOrigin { impl TransactionOrigin { /// Whether the transaction originates from a local source. pub const fn is_local(&self) -> bool { - matches!(self, TransactionOrigin::Local) + matches!(self, Self::Local) } /// Whether the transaction originates from an external source. pub const fn is_external(&self) -> bool { - matches!(self, TransactionOrigin::External) + matches!(self, Self::External) } /// Whether the transaction originates from a private source. pub const fn is_private(&self) -> bool { - matches!(self, TransactionOrigin::Private) + matches!(self, Self::Private) } } @@ -909,7 +909,7 @@ impl EthBlobTransactionSidecar { /// Returns the blob sidecar if it is present pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { match self { - EthBlobTransactionSidecar::Present(sidecar) => Some(sidecar), + Self::Present(sidecar) => Some(sidecar), _ => None, } } @@ -969,13 +969,13 @@ impl From for EthPooledTransaction { // include the blob sidecar let (tx, blob) = tx.into_parts(); let tx = TransactionSignedEcRecovered::from_signed_transaction(tx, signer); - let mut pooled = EthPooledTransaction::new(tx, encoded_length); + let mut pooled = Self::new(tx, encoded_length); pooled.blob_sidecar = EthBlobTransactionSidecar::Present(blob); pooled } tx => { // no blob sidecar - EthPooledTransaction::new(tx.into_ecrecovered_transaction(signer), encoded_length) + Self::new(tx.into_ecrecovered_transaction(signer), encoded_length) } } } @@ -1146,14 +1146,14 @@ impl TryFromRecoveredTransaction for EthPooledTransaction { }; let encoded_length = tx.length_without_header(); - let transaction = EthPooledTransaction::new(tx, encoded_length); + let transaction = Self::new(tx, encoded_length); Ok(transaction) } } impl FromRecoveredPooledTransaction for EthPooledTransaction { fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { - EthPooledTransaction::from(tx) + Self::from(tx) } } @@ -1231,8 +1231,8 @@ impl GetPooledTransactionLimit { #[inline] pub const fn exceeds(&self, size: usize) -> bool { match self { - GetPooledTransactionLimit::None => false, - GetPooledTransactionLimit::ResponseSizeSoftLimit(limit) => size > *limit, + Self::None => false, + Self::ResponseSizeSoftLimit(limit) => size > *limit, } } } diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 018b3aab0b541..a18cdad845d66 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -42,7 +42,7 @@ impl ValidationTask { /// Creates a new task with the given receiver. pub fn with_receiver(jobs: mpsc::Receiver + Send>>>) -> Self { - ValidationTask { validation_jobs: Arc::new(Mutex::new(ReceiverStream::new(jobs))) } + Self { validation_jobs: Arc::new(Mutex::new(ReceiverStream::new(jobs))) } } /// Executes all new validation jobs that come in. diff --git a/crates/trie-parallel/src/parallel_root.rs b/crates/trie-parallel/src/parallel_root.rs index 04417360567ef..513ae15775007 100644 --- a/crates/trie-parallel/src/parallel_root.rs +++ b/crates/trie-parallel/src/parallel_root.rs @@ -206,7 +206,7 @@ impl From for ProviderError { match error { ParallelStateRootError::Provider(error) => error, ParallelStateRootError::StorageRoot(StorageRootError::DB(error)) => { - ProviderError::Database(error) + Self::Database(error) } } } diff --git a/crates/trie/benches/prefix_set.rs b/crates/trie/benches/prefix_set.rs index c45c1f2b2986b..bd199c2cde835 100644 --- a/crates/trie/benches/prefix_set.rs +++ b/crates/trie/benches/prefix_set.rs @@ -19,11 +19,11 @@ pub trait PrefixSetAbstraction: Default { impl PrefixSetAbstraction for PrefixSetMut { fn insert(&mut self, key: Nibbles) { - PrefixSetMut::insert(self, key) + Self::insert(self, key) } fn contains(&mut self, key: Nibbles) -> bool { - PrefixSetMut::contains(self, &key) + Self::contains(self, &key) } } diff --git a/crates/trie/src/prefix_set/mod.rs b/crates/trie/src/prefix_set/mod.rs index 32fdc68c812d7..c37b90d4a740c 100644 --- a/crates/trie/src/prefix_set/mod.rs +++ b/crates/trie/src/prefix_set/mod.rs @@ -58,7 +58,7 @@ where I: IntoIterator, { fn from(value: I) -> Self { - PrefixSetMut { keys: value.into_iter().collect(), ..Default::default() } + Self { keys: value.into_iter().collect(), ..Default::default() } } } diff --git a/crates/trie/src/trie_cursor/subnode.rs b/crates/trie/src/trie_cursor/subnode.rs index c3eca176a9de8..c8b77daea58e0 100644 --- a/crates/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/src/trie_cursor/subnode.rs @@ -64,7 +64,7 @@ impl CursorSubNode { CHILD_INDEX_RANGE.clone().find(|i| n.state_mask.is_bit_set(*i)).unwrap() as i8 }); let full_key = full_key(key.clone(), nibble); - CursorSubNode { key, node, nibble, full_key } + Self { key, node, nibble, full_key } } /// Returns the full key of the current node. diff --git a/crates/trie/src/updates.rs b/crates/trie/src/updates.rs index 1d31ee31fea4e..91fc63b101c11 100644 --- a/crates/trie/src/updates.rs +++ b/crates/trie/src/updates.rs @@ -37,7 +37,7 @@ pub enum TrieOp { impl TrieOp { /// Returns `true` if the operation is an update. pub fn is_update(&self) -> bool { - matches!(self, TrieOp::Update(..)) + matches!(self, Self::Update(..)) } } diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 6ea0fad702f91..5c1746045ed66 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -42,7 +42,7 @@ pub struct BlockchainTestCase { impl Case for BlockchainTestCase { fn load(path: &Path) -> Result { - Ok(BlockchainTestCase { + Ok(Self { tests: { let s = fs::read_to_string(path) .map_err(|error| Error::Io { path: path.into(), error })?; diff --git a/testing/ef-tests/src/result.rs b/testing/ef-tests/src/result.rs index c7a893c4cbdec..409b273fd5de6 100644 --- a/testing/ef-tests/src/result.rs +++ b/testing/ef-tests/src/result.rs @@ -66,7 +66,7 @@ pub struct CaseResult { impl CaseResult { /// Create a new test result. pub fn new(path: &Path, case: &impl Case, result: Result<(), Error>) -> Self { - CaseResult { desc: case.description(), path: path.into(), result } + Self { desc: case.description(), path: path.into(), result } } } From 69c65302bf006f8e80708df5555c3663c872969e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 15:18:01 +0200 Subject: [PATCH 686/700] chore: rm reth-interfaces from prune (#8469) --- Cargo.lock | 3 ++- crates/prune/Cargo.toml | 3 ++- crates/prune/src/error.rs | 2 +- crates/prune/src/segments/account_history.rs | 8 ++++---- crates/prune/src/segments/headers.rs | 2 +- crates/prune/src/segments/history.rs | 3 +-- crates/prune/src/segments/mod.rs | 7 ++++--- crates/prune/src/segments/receipts.rs | 14 ++++++++------ crates/prune/src/segments/receipts_by_logs.rs | 8 ++++---- crates/prune/src/segments/sender_recovery.rs | 2 +- crates/prune/src/segments/storage_history.rs | 8 ++++---- crates/prune/src/segments/transaction_lookup.rs | 2 +- crates/prune/src/segments/transactions.rs | 2 +- 13 files changed, 34 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f764515ec5cb..1ba0e79e23b0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7711,11 +7711,12 @@ dependencies = [ "rayon", "reth-config", "reth-db", - "reth-interfaces", + "reth-errors", "reth-metrics", "reth-primitives", "reth-provider", "reth-stages", + "reth-testing-utils", "reth-tokio-util", "reth-tracing", "thiserror", diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index a645dd40a44a7..33083e89c6421 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -15,8 +15,8 @@ workspace = true # reth reth-primitives.workspace = true reth-db.workspace = true +reth-errors.workspace = true reth-provider.workspace = true -reth-interfaces.workspace = true reth-tokio-util.workspace = true reth-config.workspace = true @@ -35,6 +35,7 @@ tokio.workspace = true # reth reth-db = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true reth-tracing.workspace = true assert_matches.workspace = true diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index b223bccb92f0d..6f37fff9f315a 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -1,5 +1,5 @@ use reth_db::DatabaseError; -use reth_interfaces::RethError; +use reth_errors::RethError; use reth_primitives::PruneSegmentError; use reth_provider::ProviderError; use thiserror::Error; diff --git a/crates/prune/src/segments/account_history.rs b/crates/prune/src/segments/account_history.rs index c0d92929801d6..5811f3b11379b 100644 --- a/crates/prune/src/segments/account_history.rs +++ b/crates/prune/src/segments/account_history.rs @@ -107,16 +107,16 @@ mod tests { }; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; - use reth_interfaces::test_utils::{ - generators, - generators::{random_block_range, random_changeset_range, random_eoa_accounts}, - }; use reth_primitives::{ BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, B256, }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::{ + generators, + generators::{random_block_range, random_changeset_range, random_eoa_accounts}, + }; use std::{collections::BTreeMap, ops::AddAssign}; #[test] diff --git a/crates/prune/src/segments/headers.rs b/crates/prune/src/segments/headers.rs index 64263c88156d4..c2c17914bffc9 100644 --- a/crates/prune/src/segments/headers.rs +++ b/crates/prune/src/segments/headers.rs @@ -189,13 +189,13 @@ where mod tests { use assert_matches::assert_matches; use reth_db::{tables, transaction::DbTx}; - use reth_interfaces::test_utils::{generators, generators::random_header_range}; use reth_primitives::{ BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, B256, U256, }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::TestStageDB; + use reth_testing_utils::{generators, generators::random_header_range}; use tracing::trace; use crate::segments::{ diff --git a/crates/prune/src/segments/history.rs b/crates/prune/src/segments/history.rs index 2d8159a733d7c..d402db2b466e3 100644 --- a/crates/prune/src/segments/history.rs +++ b/crates/prune/src/segments/history.rs @@ -4,9 +4,8 @@ use reth_db::{ models::ShardedKey, table::Table, transaction::DbTxMut, - BlockNumberList, + BlockNumberList, DatabaseError, }; -use reth_interfaces::db::DatabaseError; use reth_primitives::BlockNumber; use reth_provider::DatabaseProviderRW; diff --git a/crates/prune/src/segments/mod.rs b/crates/prune/src/segments/mod.rs index d0d66e817600d..bfd3b5a8d9bfe 100644 --- a/crates/prune/src/segments/mod.rs +++ b/crates/prune/src/segments/mod.rs @@ -15,12 +15,13 @@ pub use headers::Headers; pub use receipts::Receipts; pub use receipts_by_logs::ReceiptsByLogs; use reth_db::database::Database; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, TxNumber, }; -use reth_provider::{BlockReader, DatabaseProviderRW, PruneCheckpointWriter}; +use reth_provider::{ + errors::provider::ProviderResult, BlockReader, DatabaseProviderRW, PruneCheckpointWriter, +}; pub use sender_recovery::SenderRecovery; pub use set::SegmentSet; use std::{fmt::Debug, ops::RangeInclusive}; @@ -101,7 +102,7 @@ impl PruneInput { let last_tx = body.last_tx_num(); if last_tx + body.tx_count() == 0 { // Prevents a scenario where the pruner correctly starts at a finalized block, - // but the first transaction (tx_num = 0) only appears on an unfinalized one. + // but the first transaction (tx_num = 0) only appears on an non-finalized one. // Should only happen on a test/hive scenario. return Ok(None) } diff --git a/crates/prune/src/segments/receipts.rs b/crates/prune/src/segments/receipts.rs index 4ae58db3ecdff..485b7dd68bb0b 100644 --- a/crates/prune/src/segments/receipts.rs +++ b/crates/prune/src/segments/receipts.rs @@ -3,9 +3,11 @@ use crate::{ PrunerError, }; use reth_db::{database::Database, tables}; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{PruneCheckpoint, PruneMode, PruneProgress, PruneSegment}; -use reth_provider::{DatabaseProviderRW, PruneCheckpointWriter, TransactionsProvider}; +use reth_provider::{ + errors::provider::ProviderResult, DatabaseProviderRW, PruneCheckpointWriter, + TransactionsProvider, +}; use tracing::{instrument, trace}; #[derive(Debug)] @@ -97,16 +99,16 @@ mod tests { Itertools, }; use reth_db::tables; - use reth_interfaces::test_utils::{ - generators, - generators::{random_block_range, random_receipt}, - }; use reth_primitives::{ BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, TxNumber, B256, }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::{ + generators, + generators::{random_block_range, random_receipt}, + }; use std::ops::Sub; #[test] diff --git a/crates/prune/src/segments/receipts_by_logs.rs b/crates/prune/src/segments/receipts_by_logs.rs index c9bf4a799c3c0..211434bc067c5 100644 --- a/crates/prune/src/segments/receipts_by_logs.rs +++ b/crates/prune/src/segments/receipts_by_logs.rs @@ -217,13 +217,13 @@ mod tests { use crate::segments::{receipts_by_logs::ReceiptsByLogs, PruneInput, Segment}; use assert_matches::assert_matches; use reth_db::{cursor::DbCursorRO, tables, transaction::DbTx}; - use reth_interfaces::test_utils::{ - generators, - generators::{random_block_range, random_eoa_account, random_log, random_receipt}, - }; use reth_primitives::{PruneLimiter, PruneMode, PruneSegment, ReceiptsLogPruneConfig, B256}; use reth_provider::{PruneCheckpointReader, TransactionsProvider}; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::{ + generators, + generators::{random_block_range, random_eoa_account, random_log, random_receipt}, + }; use std::collections::BTreeMap; #[test] diff --git a/crates/prune/src/segments/sender_recovery.rs b/crates/prune/src/segments/sender_recovery.rs index 355d82f4576c7..9dfcc85bb4a80 100644 --- a/crates/prune/src/segments/sender_recovery.rs +++ b/crates/prune/src/segments/sender_recovery.rs @@ -82,13 +82,13 @@ mod tests { Itertools, }; use reth_db::tables; - use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{ BlockNumber, PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment, TxNumber, B256, }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::{generators, generators::random_block_range}; use std::ops::Sub; #[test] diff --git a/crates/prune/src/segments/storage_history.rs b/crates/prune/src/segments/storage_history.rs index 1ee8d20f4bf82..6be6a24ff171f 100644 --- a/crates/prune/src/segments/storage_history.rs +++ b/crates/prune/src/segments/storage_history.rs @@ -111,15 +111,15 @@ mod tests { }; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; - use reth_interfaces::test_utils::{ - generators, - generators::{random_block_range, random_changeset_range, random_eoa_accounts}, - }; use reth_primitives::{ BlockNumber, PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment, B256, }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::{ + generators, + generators::{random_block_range, random_changeset_range, random_eoa_accounts}, + }; use std::{collections::BTreeMap, ops::AddAssign}; #[test] diff --git a/crates/prune/src/segments/transaction_lookup.rs b/crates/prune/src/segments/transaction_lookup.rs index 3203552ba8432..898b30206d568 100644 --- a/crates/prune/src/segments/transaction_lookup.rs +++ b/crates/prune/src/segments/transaction_lookup.rs @@ -109,13 +109,13 @@ mod tests { Itertools, }; use reth_db::tables; - use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{ BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, TxNumber, B256, }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::{generators, generators::random_block_range}; use std::ops::Sub; #[test] diff --git a/crates/prune/src/segments/transactions.rs b/crates/prune/src/segments/transactions.rs index ed53c0306ff92..3f881e06fb4da 100644 --- a/crates/prune/src/segments/transactions.rs +++ b/crates/prune/src/segments/transactions.rs @@ -81,13 +81,13 @@ mod tests { Itertools, }; use reth_db::tables; - use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{ BlockNumber, PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, TxNumber, B256, }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::{generators, generators::random_block_range}; use std::ops::Sub; #[test] From 269e3a9c42e90403ded9d887730b832f8a176537 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 15:27:49 +0200 Subject: [PATCH 687/700] chore: rm reth-interfaces from engine api (#8468) --- Cargo.lock | 4 +++- crates/rpc/rpc-engine-api/Cargo.toml | 7 ++++--- crates/rpc/rpc-engine-api/src/engine_api.rs | 9 +++++---- crates/rpc/rpc-engine-api/tests/it/payload.rs | 4 +--- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ba0e79e23b0d..0218339c96266 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7875,7 +7875,7 @@ dependencies = [ "reth-beacon-consensus", "reth-engine-primitives", "reth-ethereum-engine-primitives", - "reth-interfaces", + "reth-evm", "reth-metrics", "reth-payload-builder", "reth-primitives", @@ -7883,7 +7883,9 @@ dependencies = [ "reth-rpc-api", "reth-rpc-types", "reth-rpc-types-compat", + "reth-storage-api", "reth-tasks", + "reth-testing-utils", "reth-tokio-util", "serde", "thiserror", diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 83a5f85fcfaec..bd68a77d12b8a 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -14,14 +14,15 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-provider.workspace = true -reth-rpc-types.workspace = true reth-rpc-api.workspace = true +reth-rpc-types.workspace = true +reth-storage-api.workspace = true reth-beacon-consensus.workspace = true reth-payload-builder.workspace = true reth-tasks.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true +reth-evm.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -40,10 +41,10 @@ serde.workspace = true [dev-dependencies] reth-ethereum-engine-primitives.workspace = true -reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true +reth-testing-utils.workspace = true alloy-rlp.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index c09e996b28a99..21bd5e0614031 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -6,9 +6,9 @@ use reth_engine_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, EngineTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; +use reth_evm::provider::EvmEnvProvider; use reth_payload_builder::PayloadStore; use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hardfork, B256, U64}; -use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, @@ -19,6 +19,7 @@ use reth_rpc_types::engine::{ use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, }; +use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; use std::{sync::Arc, time::Instant}; use tokio::sync::oneshot; @@ -831,7 +832,7 @@ mod tests { use assert_matches::assert_matches; use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; use reth_ethereum_engine_primitives::EthEngineTypes; - use reth_interfaces::test_utils::generators::random_block; + use reth_testing_utils::generators::random_block; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{SealedBlock, B256, MAINNET}; @@ -903,7 +904,7 @@ mod tests { // tests covering `engine_getPayloadBodiesByRange` and `engine_getPayloadBodiesByHash` mod get_payload_bodies { use super::*; - use reth_interfaces::test_utils::{generators, generators::random_block_range}; + use reth_testing_utils::{generators, generators::random_block_range}; #[tokio::test] async fn invalid_params() { @@ -1017,8 +1018,8 @@ mod tests { // https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-3 mod exchange_transition_configuration { use super::*; - use reth_interfaces::test_utils::generators; use reth_primitives::U256; + use reth_testing_utils::generators; #[tokio::test] async fn terminal_td_mismatch() { diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index a68eef6393bc7..94f011c02db30 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -2,9 +2,6 @@ use alloy_rlp::{Decodable, Error as RlpError}; use assert_matches::assert_matches; -use reth_interfaces::test_utils::generators::{ - self, random_block, random_block_range, random_header, Rng, -}; use reth_primitives::{ bytes::{Bytes, BytesMut}, proofs, Block, SealedBlock, TransactionSigned, Withdrawals, B256, U256, @@ -16,6 +13,7 @@ use reth_rpc_types_compat::engine::payload::{ block_to_payload, block_to_payload_v1, convert_to_payload_body_v1, try_into_sealed_block, try_payload_v1_to_block, }; +use reth_testing_utils::generators::{self, random_block, random_block_range, random_header, Rng}; fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { let unsealed = src.unseal(); From 0d205039054c2ce7d62d338bcc76e4eb9a886644 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 15:28:15 +0200 Subject: [PATCH 688/700] chore: rm reth-interfaces from db-common (#8467) --- Cargo.lock | 1 - crates/storage/db-common/Cargo.toml | 1 - crates/storage/db-common/src/init.rs | 4 ++-- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0218339c96266..eb66ba34ab6ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6695,7 +6695,6 @@ dependencies = [ "reth-config", "reth-db", "reth-etl", - "reth-interfaces", "reth-primitives", "reth-provider", "reth-trie", diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 675dde4ba5916..0d04f569ec33b 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -11,7 +11,6 @@ repository.workspace = true # reth reth-primitives.workspace = true reth-db = { workspace = true, features = ["mdbx"] } -reth-interfaces = { workspace = true, features = ["clap"] } reth-provider.workspace = true reth-config.workspace = true reth-trie.workspace = true diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 05435ce37e989..27728313b1059 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -2,9 +2,8 @@ use reth_codecs::Compact; use reth_config::config::EtlConfig; -use reth_db::{database::Database, tables, transaction::DbTxMut}; +use reth_db::{database::Database, tables, transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; -use reth_interfaces::{db::DatabaseError, provider::ProviderResult}; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, StaticFileSegment, @@ -12,6 +11,7 @@ use reth_primitives::{ }; use reth_provider::{ bundle_state::{BundleStateInit, RevertsInit}, + errors::provider::ProviderResult, providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, From 53bcb2c9ee04d64abdf2933480f7f60662c270a9 Mon Sep 17 00:00:00 2001 From: James Prestwich Date: Wed, 29 May 2024 15:12:06 +0100 Subject: [PATCH 689/700] Fix: manually impl Clone for BlockchainProvider (#8474) --- crates/storage/provider/src/providers/mod.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index eda49830dd78c..9bbdcb26f1d14 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -65,7 +65,6 @@ pub use consistent_view::{ConsistentDbView, ConsistentViewError}; /// This type serves as the main entry point for interacting with the blockchain and provides data /// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper /// type that holds an instance of the database and the blockchain tree. -#[derive(Clone)] #[allow(missing_debug_implementations)] pub struct BlockchainProvider { /// Provider type used to access the database. @@ -76,6 +75,16 @@ pub struct BlockchainProvider { chain_info: ChainInfoTracker, } +impl Clone for BlockchainProvider { + fn clone(&self) -> Self { + Self { + database: self.database.clone(), + tree: self.tree.clone(), + chain_info: self.chain_info.clone(), + } + } +} + impl BlockchainProvider { /// Create new provider instance that wraps the database and the blockchain tree, using the /// provided latest header to initialize the chain info tracker. From bed246c0ac92ce5774998f1809df713a5d3701b0 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 29 May 2024 17:16:12 +0300 Subject: [PATCH 690/700] chore: name rayon threads (#8471) --- crates/node/builder/src/launch/common.rs | 25 ++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 5b66362878dd3..36cf479907753 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -18,8 +18,8 @@ use reth_prune::PrunerBuilder; use reth_rpc_layer::JwtSecret; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{error, info, warn}; -use std::{cmp::max, sync::Arc, thread::available_parallelism}; +use reth_tracing::tracing::{debug, error, info, warn}; +use std::{sync::Arc, thread::available_parallelism}; use tokio::sync::mpsc::Receiver; /// Reusable setup for launching a node. @@ -112,15 +112,24 @@ impl LaunchContext { pub fn configure_globals(&self) { // Raise the fd limit of the process. // Does not do anything on windows. - let _ = fdlimit::raise_fd_limit(); + match fdlimit::raise_fd_limit() { + Ok(fdlimit::Outcome::LimitRaised { from, to }) => { + debug!(from, to, "Raised file descriptor limit"); + } + Ok(fdlimit::Outcome::Unsupported) => {} + Err(err) => warn!(%err, "Failed to raise file descriptor limit"), + } // Limit the global rayon thread pool, reserving 2 cores for the rest of the system - let _ = ThreadPoolBuilder::new() - .num_threads( - available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), - ) + let num_threads = + available_parallelism().map_or(0, |num| num.get().saturating_sub(2).max(2)); + if let Err(err) = ThreadPoolBuilder::new() + .num_threads(num_threads) + .thread_name(|i| format!("reth-rayon-{i}")) .build_global() - .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); + { + error!(%err, "Failed to build global thread pool") + } } } From bd59c1badd80f57384fd508e58b6095e148a2d5b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 16:23:28 +0200 Subject: [PATCH 691/700] chore: rm reth-interfaces from stages-api (#8472) --- Cargo.lock | 4 +++- crates/stages-api/Cargo.toml | 5 +++-- crates/stages-api/src/error.rs | 11 +++++------ crates/stages-api/src/pipeline/mod.rs | 8 +++----- crates/storage/provider/src/lib.rs | 2 +- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eb66ba34ab6ea..3efd3c97fd417 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7994,12 +7994,14 @@ dependencies = [ "metrics", "reth-consensus", "reth-db", - "reth-interfaces", + "reth-errors", "reth-metrics", + "reth-network-p2p", "reth-primitives", "reth-provider", "reth-prune", "reth-static-file", + "reth-testing-utils", "reth-tokio-util", "thiserror", "tokio", diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml index 32c4258538ad1..51c1bc54af6f9 100644 --- a/crates/stages-api/Cargo.toml +++ b/crates/stages-api/Cargo.toml @@ -15,11 +15,12 @@ workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-db.workspace = true -reth-interfaces.workspace = true reth-static-file.workspace = true +reth-network-p2p.workspace = true reth-tokio-util.workspace = true reth-consensus.workspace = true reth-prune.workspace = true +reth-errors.workspace = true # metrics reth-metrics.workspace = true @@ -38,8 +39,8 @@ auto_impl.workspace = true [dev-dependencies] assert_matches.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } -reth-interfaces = { workspace = true, features = ["test-utils"] } tokio-stream.workspace = true +reth-testing-utils.workspace = true [features] test-utils = [] diff --git a/crates/stages-api/src/error.rs b/crates/stages-api/src/error.rs index 40b998bc1fc0a..9b75653559fdd 100644 --- a/crates/stages-api/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -1,8 +1,7 @@ use crate::PipelineEvent; use reth_consensus::ConsensusError; -use reth_interfaces::{ - db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, -}; +use reth_errors::{BlockExecutionError, DatabaseError, RethError}; +use reth_network_p2p::error::DownloadError; use reth_primitives::{BlockNumber, SealedHeader, StaticFileSegment, TxNumber}; use reth_provider::ProviderError; use thiserror::Error; @@ -16,7 +15,7 @@ pub enum BlockErrorKind { Validation(#[from] ConsensusError), /// The block encountered an execution error. #[error("execution error: {0}")] - Execution(#[from] executor::BlockExecutionError), + Execution(#[from] BlockExecutionError), } impl BlockErrorKind { @@ -66,7 +65,7 @@ pub enum StageError { MissingSyncGap, /// The stage encountered a database error. #[error("internal database error occurred: {0}")] - Database(#[from] DbError), + Database(#[from] DatabaseError), /// Invalid pruning configuration #[error(transparent)] PruningConfiguration(#[from] reth_primitives::PruneSegmentError), @@ -167,7 +166,7 @@ pub enum PipelineError { Stage(#[from] StageError), /// The pipeline encountered a database error. #[error(transparent)] - Database(#[from] DbError), + Database(#[from] DatabaseError), /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 66a87a0f8a457..c223c7192c7f7 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -4,7 +4,6 @@ pub use crate::pipeline::ctrl::ControlFlow; pub use event::*; use futures_util::Future; use reth_db::database::Database; -use reth_interfaces::RethResult; use reth_primitives::{ constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, stage::{PipelineTarget, StageCheckpoint, StageId}, @@ -32,6 +31,7 @@ use crate::{ }; pub use builder::*; use progress::*; +use reth_errors::RethResult; pub use set::*; /// A container for a queued stage. @@ -589,12 +589,10 @@ mod tests { use crate::{test_utils::TestStage, UnwindOutput}; use assert_matches::assert_matches; use reth_consensus::ConsensusError; - use reth_interfaces::{ - provider::ProviderError, - test_utils::{generators, generators::random_header}, - }; + use reth_errors::ProviderError; use reth_primitives::PruneModes; use reth_provider::test_utils::create_test_provider_factory; + use reth_testing_utils::{generators, generators::random_header}; use tokio_stream::StreamExt; #[test] diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 0d232531889bd..021372bb647e9 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -29,7 +29,7 @@ pub use providers::{ pub mod test_utils; /// Re-export provider error. -pub use reth_storage_errors::provider::ProviderError; +pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; pub use reth_execution_types::*; From 13c914598eb0ff4d8821297a453dc7608b90a21d Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 29 May 2024 17:44:58 +0300 Subject: [PATCH 692/700] chore: name std threads (#8475) --- crates/cli/runner/src/lib.rs | 10 +++++++-- crates/storage/libmdbx-rs/src/txn_manager.rs | 22 ++++++++++++-------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index 94536d0cb91b6..da1c5841d7882 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -58,7 +58,10 @@ impl CliRunner { // drop the tokio runtime on a separate thread because drop blocks until its pools // (including blocking pool) are shutdown. In other words `drop(tokio_runtime)` would block // the current thread but we want to exit right away. - std::thread::spawn(move || drop(tokio_runtime)); + std::thread::Builder::new() + .name("tokio-runtime-shutdown".to_string()) + .spawn(move || drop(tokio_runtime)) + .unwrap(); command_res } @@ -92,7 +95,10 @@ impl CliRunner { // drop the tokio runtime on a separate thread because drop blocks until its pools // (including blocking pool) are shutdown. In other words `drop(tokio_runtime)` would block // the current thread but we want to exit right away. - std::thread::spawn(move || drop(tokio_runtime)); + std::thread::Builder::new() + .name("tokio-runtime-shutdown".to_string()) + .spawn(move || drop(tokio_runtime)) + .unwrap(); Ok(()) } diff --git a/crates/storage/libmdbx-rs/src/txn_manager.rs b/crates/storage/libmdbx-rs/src/txn_manager.rs index 869eefa7873cd..38952c2f1df91 100644 --- a/crates/storage/libmdbx-rs/src/txn_manager.rs +++ b/crates/storage/libmdbx-rs/src/txn_manager.rs @@ -45,14 +45,14 @@ impl TxnManager { txn_manager } - /// Spawns a new thread with [std::thread::spawn] that listens to incoming [TxnManagerMessage] - /// messages, executes an FFI function, and returns the result on the provided channel. + /// Spawns a new [std::thread] that listens to incoming [TxnManagerMessage] messages, executes + /// an FFI function, and returns the result on the provided channel. /// /// - [TxnManagerMessage::Begin] opens a new transaction with [ffi::mdbx_txn_begin_ex] /// - [TxnManagerMessage::Abort] aborts a transaction with [ffi::mdbx_txn_abort] /// - [TxnManagerMessage::Commit] commits a transaction with [ffi::mdbx_txn_commit_ex] fn start_message_listener(&self, env: EnvPtr, rx: Receiver) { - std::thread::spawn(move || { + let task = move || { #[allow(clippy::redundant_locals)] let env = env; loop { @@ -90,7 +90,8 @@ impl TxnManager { Err(_) => return, } } - }); + }; + std::thread::Builder::new().name("mbdx-rs-txn-manager".to_string()).spawn(task).unwrap(); } pub(crate) fn send_message(&self, message: TxnManagerMessage) { @@ -198,11 +199,10 @@ mod read_transactions { self.timed_out_not_aborted.len() } - /// Spawns a new thread with [std::thread::spawn] that monitors the list of active read - /// transactions and timeouts those that are open for longer than - /// `ReadTransactions.max_duration`. + /// Spawns a new [std::thread] that monitors the list of active read transactions and + /// timeouts those that are open for longer than `ReadTransactions.max_duration`. pub(super) fn start_monitor(self: Arc) { - std::thread::spawn(move || { + let task = move || { let mut timed_out_active = Vec::new(); loop { @@ -295,7 +295,11 @@ mod read_transactions { READ_TRANSACTIONS_CHECK_INTERVAL.min(duration_until_closest_deadline), ); } - }); + }; + std::thread::Builder::new() + .name("mdbx-rs-read-tx-timeouts".to_string()) + .spawn(task) + .unwrap(); } } From 1d428cbba6dc004a6b92a8d0030ae6b4a4609984 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 16:52:45 +0200 Subject: [PATCH 693/700] feat: add blockexecution msg fn (#8478) --- crates/evm/execution-errors/src/lib.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 5013e5387595e..f08fcc0180396 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -11,6 +11,7 @@ use reth_consensus::ConsensusError; use reth_primitives::{revm_primitives::EVMError, BlockNumHash, PruneSegmentError, B256}; use reth_storage_errors::provider::ProviderError; +use std::fmt::Display; use thiserror::Error; pub mod trie; @@ -118,7 +119,7 @@ pub enum BlockExecutionError { /// Error when fetching latest block state. #[error(transparent)] LatestBlock(#[from] ProviderError), - /// Optimism Block Executor Errors + /// Arbitrary Block Executor Errors #[error(transparent)] Other(Box), } @@ -132,6 +133,11 @@ impl BlockExecutionError { Self::Other(Box::new(error)) } + /// Create a new [BlockExecutionError::Other] from a given message. + pub fn msg(msg: impl Display) -> Self { + Self::Other(msg.to_string().into()) + } + /// Returns the inner `BlockValidationError` if the error is a validation error. pub const fn as_validation(&self) -> Option<&BlockValidationError> { match self { From 09f1ee1600c07118085465ba1a11c871d93454b0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 16:57:50 +0200 Subject: [PATCH 694/700] chore: rm reth-interfaces from op e2e (#8480) --- Cargo.lock | 1 - crates/optimism/node/Cargo.toml | 1 - crates/optimism/node/tests/e2e/p2p.rs | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3efd3c97fd417..8df6440c4cd92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7525,7 +7525,6 @@ dependencies = [ "reth-e2e-test-utils", "reth-evm", "reth-evm-optimism", - "reth-interfaces", "reth-network", "reth-node-api", "reth-node-builder", diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index ddd2fb283896d..5cd32f8f489fa 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -25,7 +25,6 @@ reth-tracing.workspace = true reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network.workspace = true -reth-interfaces.workspace = true reth-evm.workspace = true reth-revm.workspace = true reth-evm-optimism.workspace = true diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 9e3741055ab67..e4993e8f3138c 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,5 +1,5 @@ use crate::utils::{advance_chain, setup}; -use reth_interfaces::blockchain_tree::error::BlockchainTreeError; +use reth::blockchain_tree::error::BlockchainTreeError; use reth_rpc_types::engine::PayloadStatusEnum; use std::sync::Arc; use tokio::sync::Mutex; From 43bfa2ecb03701b8e15876e1c9c1c253a67608da Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 16:58:18 +0200 Subject: [PATCH 695/700] chore: rm reth-interfaces from payload builder (#8479) --- Cargo.lock | 2 +- crates/payload/builder/Cargo.toml | 2 +- crates/payload/builder/src/error.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8df6440c4cd92..68e0437635d79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7588,8 +7588,8 @@ dependencies = [ "futures-util", "metrics", "reth-engine-primitives", + "reth-errors", "reth-ethereum-engine-primitives", - "reth-interfaces", "reth-metrics", "reth-primitives", "reth-provider", diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index aa4b785cd370e..c95b22c3fdabc 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-primitives.workspace = true reth-rpc-types.workspace = true reth-transaction-pool.workspace = true -reth-interfaces.workspace = true +reth-errors.workspace = true reth-provider.workspace = true reth-engine-primitives.workspace = true reth-ethereum-engine-primitives.workspace = true diff --git a/crates/payload/builder/src/error.rs b/crates/payload/builder/src/error.rs index a7aa7e88be9a4..eee8899cf1d93 100644 --- a/crates/payload/builder/src/error.rs +++ b/crates/payload/builder/src/error.rs @@ -1,6 +1,6 @@ //! Error types emitted by types or implementations of this crate. -use reth_interfaces::{provider::ProviderError, RethError}; +use reth_errors::{ProviderError, RethError}; use reth_primitives::{revm_primitives::EVMError, B256}; use reth_transaction_pool::BlobStoreError; use tokio::sync::oneshot; From 77b872c3dcb1b006ff06a9657d5757095d96fcfb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 16:59:34 +0200 Subject: [PATCH 696/700] chore: rm reth-interfaces from op blockchain tree (#8481) --- Cargo.lock | 2 +- crates/blockchain-tree/Cargo.toml | 4 ++-- crates/blockchain-tree/src/block_buffer.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 68e0437635d79..161f886c348a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6550,7 +6550,6 @@ dependencies = [ "reth-evm", "reth-evm-ethereum", "reth-execution-errors", - "reth-interfaces", "reth-metrics", "reth-network", "reth-primitives", @@ -6558,6 +6557,7 @@ dependencies = [ "reth-revm", "reth-stages-api", "reth-storage-errors", + "reth-testing-utils", "reth-trie", "reth-trie-parallel", "tokio", diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 58ee1cda5ac67..9ba786a894344 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -23,7 +23,7 @@ reth-provider.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-parallel = { workspace = true, features = ["parallel"] } -reth-network = { workspace = true } +reth-network.workspace = true reth-consensus.workspace = true # common @@ -41,10 +41,10 @@ linked_hash_set = "0.1.4" [dev-dependencies] reth-db = { workspace = true, features = ["test-utils"] } -reth-interfaces = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true , features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-evm = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true reth-revm.workspace = true reth-evm-ethereum.workspace = true parking_lot.workspace = true diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 19ff9368b695b..25e3f32ad6c32 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -182,11 +182,11 @@ impl BlockBuffer { #[cfg(test)] mod tests { use crate::BlockBuffer; - use reth_interfaces::test_utils::{ + use reth_primitives::{BlockHash, BlockNumHash, SealedBlockWithSenders}; + use reth_testing_utils::{ generators, generators::{random_block, Rng}, }; - use reth_primitives::{BlockHash, BlockNumHash, SealedBlockWithSenders}; use std::collections::HashMap; /// Create random block with specified number and parent hash. From d7e822f9d64af4a3f1974edb88fa168b48a57d6e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 17:05:18 +0200 Subject: [PATCH 697/700] chore: rm reth-interfaces from bin reth (#8483) --- Cargo.lock | 3 ++- bin/reth/Cargo.toml | 3 ++- bin/reth/src/commands/debug_cmd/build_block.rs | 2 +- bin/reth/src/commands/debug_cmd/execution.rs | 2 +- bin/reth/src/commands/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/commands/debug_cmd/merkle.rs | 2 +- bin/reth/src/commands/import.rs | 2 +- bin/reth/src/commands/p2p/mod.rs | 2 +- 8 files changed, 10 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 161f886c348a6..d8ac329843691 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6405,13 +6405,14 @@ dependencies = [ "reth-discv4", "reth-discv5", "reth-downloaders", + "reth-errors", "reth-ethereum-payload-builder", "reth-evm", "reth-exex", "reth-fs-util", - "reth-interfaces", "reth-network", "reth-network-api", + "reth-network-p2p", "reth-nippy-jar", "reth-node-api", "reth-node-builder", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 35c5c814aa55e..4b6657c947ba0 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -23,7 +23,7 @@ reth-provider = { workspace = true } reth-evm.workspace = true reth-revm.workspace = true reth-stages.workspace = true -reth-interfaces = { workspace = true, features = ["clap"] } +reth-errors.workspace = true reth-transaction-pool.workspace = true reth-beacon-consensus.workspace = true reth-cli-runner.workspace = true @@ -35,6 +35,7 @@ reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } reth-network = { workspace = true, features = ["serde"] } +reth-network-p2p.workspace = true reth-network-api.workspace = true reth-downloaders.workspace = true reth-tracing.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 7914ec7829dee..7760f3a92d69d 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -21,9 +21,9 @@ use reth_blockchain_tree::{ use reth_cli_runner::CliContext; use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; +use reth_errors::RethResult; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_fs_util as fs; -use reth_interfaces::RethResult; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::database::CachedReads; use reth_primitives::{ diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 628a6cd2621c1..87e122b4c0615 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -24,9 +24,9 @@ use reth_downloaders::{ }; use reth_exex::ExExManagerHandle; use reth_fs_util as fs; -use reth_interfaces::p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; +use reth_network_p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; use reth_primitives::{ stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, }; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 6f7a580a4b703..4a38bec01f8ac 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -15,9 +15,9 @@ use clap::Parser; use reth_cli_runner::CliContext; use reth_config::Config; use reth_db::{init_db, DatabaseEnv}; +use reth_errors::BlockValidationError; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_fs_util as fs; -use reth_interfaces::executor::BlockValidationError; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_primitives::{stage::StageId, BlockHashOrNumber, ChainSpec, Receipts}; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index bfbca46f4ff4a..83562d9671572 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -19,9 +19,9 @@ use reth_consensus::Consensus; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; use reth_evm::execute::{BatchBlockExecutionOutput, BatchExecutor, BlockExecutorProvider}; use reth_fs_util as fs; -use reth_interfaces::p2p::full_block::FullBlockClient; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; +use reth_network_p2p::full_block::FullBlockClient; use reth_primitives::{stage::StageCheckpoint, BlockHashOrNumber, ChainSpec, PruneModes}; use reth_provider::{ BlockNumReader, BlockWriter, BundleStateWithReceipts, HeaderProvider, LatestStateProviderRef, diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 71268fa8e1bbb..869bcefbd6431 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -22,7 +22,7 @@ use reth_downloaders::{ file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; -use reth_interfaces::p2p::{ +use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index b6710a363a991..8764d448631e7 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -14,8 +14,8 @@ use clap::{Parser, Subcommand}; use discv5::ListenConfig; use reth_config::Config; use reth_db::create_db; -use reth_interfaces::p2p::bodies::client::BodiesClient; use reth_network::NetworkConfigBuilder; +use reth_network_p2p::bodies::client::BodiesClient; use reth_primitives::{BlockHashOrNumber, ChainSpec}; use reth_provider::ProviderFactory; use std::{ From 944180348fd43df164d59df5af949757a7b12753 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Anda=20Estensen?= Date: Wed, 29 May 2024 17:05:31 +0200 Subject: [PATCH 698/700] chore: add _total suffix to counter metrics (#8263) Co-authored-by: Matthias Seitz --- .../beacon/src/engine/hooks/prune.rs | 4 +- crates/metrics/src/common/mpsc.rs | 38 +++++++++---------- crates/rpc/rpc-builder/src/metrics.rs | 28 +++++++------- crates/rpc/rpc/src/eth/cache/metrics.rs | 4 +- .../rpc/rpc/src/eth/cache/multi_consumer.rs | 4 +- crates/tasks/src/lib.rs | 11 ++++-- crates/tasks/src/metrics.rs | 12 +++--- etc/grafana/dashboards/overview.json | 4 +- etc/grafana/dashboards/reth-mempool.json | 8 ++-- 9 files changed, 58 insertions(+), 55 deletions(-) diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index b70bd6d18a3d5..29675806cbafe 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -99,7 +99,7 @@ impl PruneHook { let _ = tx.send((pruner, result)); }), ); - self.metrics.runs.increment(1); + self.metrics.runs_total.increment(1); self.pruner_state = PrunerState::Running(rx); Some(EngineHookEvent::Started) @@ -160,7 +160,7 @@ enum PrunerState { #[metrics(scope = "consensus.engine.prune")] struct Metrics { /// The number of times the pruner was run. - runs: Counter, + runs_total: Counter, } impl From for EngineHookError { diff --git a/crates/metrics/src/common/mpsc.rs b/crates/metrics/src/common/mpsc.rs index 98c670ef79905..40c6c23592823 100644 --- a/crates/metrics/src/common/mpsc.rs +++ b/crates/metrics/src/common/mpsc.rs @@ -55,11 +55,11 @@ impl UnboundedMeteredSender { pub fn send(&self, message: T) -> Result<(), SendError> { match self.sender.send(message) { Ok(()) => { - self.metrics.messages_sent.increment(1); + self.metrics.messages_sent_total.increment(1); Ok(()) } Err(error) => { - self.metrics.send_errors.increment(1); + self.metrics.send_errors_total.increment(1); Err(error) } } @@ -94,7 +94,7 @@ impl UnboundedMeteredReceiver { pub async fn recv(&mut self) -> Option { let msg = self.receiver.recv().await; if msg.is_some() { - self.metrics.messages_received.increment(1); + self.metrics.messages_received_total.increment(1); } msg } @@ -102,7 +102,7 @@ impl UnboundedMeteredReceiver { /// Tries to receive the next value for this receiver. pub fn try_recv(&mut self) -> Result { let msg = self.receiver.try_recv()?; - self.metrics.messages_received.increment(1); + self.metrics.messages_received_total.increment(1); Ok(msg) } @@ -115,7 +115,7 @@ impl UnboundedMeteredReceiver { pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { let msg = ready!(self.receiver.poll_recv(cx)); if msg.is_some() { - self.metrics.messages_received.increment(1); + self.metrics.messages_received_total.increment(1); } Poll::Ready(msg) } @@ -161,11 +161,11 @@ impl MeteredSender { pub fn try_send(&self, message: T) -> Result<(), TrySendError> { match self.sender.try_send(message) { Ok(()) => { - self.metrics.messages_sent.increment(1); + self.metrics.messages_sent_total.increment(1); Ok(()) } Err(error) => { - self.metrics.send_errors.increment(1); + self.metrics.send_errors_total.increment(1); Err(error) } } @@ -176,11 +176,11 @@ impl MeteredSender { pub async fn send(&self, value: T) -> Result<(), SendError> { match self.sender.send(value).await { Ok(()) => { - self.metrics.messages_sent.increment(1); + self.metrics.messages_sent_total.increment(1); Ok(()) } Err(error) => { - self.metrics.send_errors.increment(1); + self.metrics.send_errors_total.increment(1); Err(error) } } @@ -214,7 +214,7 @@ impl MeteredReceiver { pub async fn recv(&mut self) -> Option { let msg = self.receiver.recv().await; if msg.is_some() { - self.metrics.messages_received.increment(1); + self.metrics.messages_received_total.increment(1); } msg } @@ -222,7 +222,7 @@ impl MeteredReceiver { /// Tries to receive the next value for this receiver. pub fn try_recv(&mut self) -> Result { let msg = self.receiver.try_recv()?; - self.metrics.messages_received.increment(1); + self.metrics.messages_received_total.increment(1); Ok(msg) } @@ -235,7 +235,7 @@ impl MeteredReceiver { pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { let msg = ready!(self.receiver.poll_recv(cx)); if msg.is_some() { - self.metrics.messages_received.increment(1); + self.metrics.messages_received_total.increment(1); } Poll::Ready(msg) } @@ -254,9 +254,9 @@ impl Stream for MeteredReceiver { #[metrics(dynamic = true)] struct MeteredSenderMetrics { /// Number of messages sent - messages_sent: Counter, + messages_sent_total: Counter, /// Number of failed message deliveries - send_errors: Counter, + send_errors_total: Counter, } /// Throughput metrics for [MeteredReceiver] @@ -264,7 +264,7 @@ struct MeteredSenderMetrics { #[metrics(dynamic = true)] struct MeteredReceiverMetrics { /// Number of messages received - messages_received: Counter, + messages_received_total: Counter, } /// A wrapper type around [PollSender] that updates metrics on send. @@ -294,7 +294,7 @@ impl MeteredPollSender { Poll::Ready(Ok(permit)) => Poll::Ready(Ok(permit)), Poll::Ready(Err(error)) => Poll::Ready(Err(error)), Poll::Pending => { - self.metrics.back_pressure.increment(1); + self.metrics.back_pressure_total.increment(1); Poll::Pending } } @@ -305,7 +305,7 @@ impl MeteredPollSender { pub fn send_item(&mut self, item: T) -> Result<(), PollSendError> { match self.sender.send_item(item) { Ok(()) => { - self.metrics.messages_sent.increment(1); + self.metrics.messages_sent_total.increment(1); Ok(()) } Err(error) => Err(error), @@ -324,7 +324,7 @@ impl Clone for MeteredPollSender { #[metrics(dynamic = true)] struct MeteredPollSenderMetrics { /// Number of messages sent - messages_sent: Counter, + messages_sent_total: Counter, /// Number of delayed message deliveries caused by a full channel - back_pressure: Counter, + back_pressure_total: Counter, } diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs index ade36ed21654d..ae13ea26a55ce 100644 --- a/crates/rpc/rpc-builder/src/metrics.rs +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -90,7 +90,7 @@ pub(crate) struct RpcRequestMetricsService { impl RpcRequestMetricsService { pub(crate) fn new(service: S, metrics: RpcRequestMetrics) -> Self { // this instance is kept alive for the duration of the connection - metrics.inner.connection_metrics.connections_opened.increment(1); + metrics.inner.connection_metrics.connections_opened_total.increment(1); Self { inner: service, metrics } } } @@ -102,10 +102,10 @@ where type Future = MeteredRequestFuture; fn call(&self, req: Request<'a>) -> Self::Future { - self.metrics.inner.connection_metrics.requests_started.increment(1); + self.metrics.inner.connection_metrics.requests_started_total.increment(1); let call_metrics = self.metrics.inner.call_metrics.get_key_value(req.method.as_ref()); if let Some((_, call_metrics)) = &call_metrics { - call_metrics.started.increment(1); + call_metrics.started_total.increment(1); } MeteredRequestFuture { fut: self.inner.call(req), @@ -119,7 +119,7 @@ where impl Drop for RpcRequestMetricsService { fn drop(&mut self) { // update connection metrics, connection closed - self.metrics.inner.connection_metrics.connections_closed.increment(1); + self.metrics.inner.connection_metrics.connections_closed_total.increment(1); } } @@ -153,7 +153,7 @@ impl> Future for MeteredRequestFuture { let elapsed = this.started_at.elapsed().as_secs_f64(); // update transport metrics - this.metrics.inner.connection_metrics.requests_finished.increment(1); + this.metrics.inner.connection_metrics.requests_finished_total.increment(1); this.metrics.inner.connection_metrics.request_time_seconds.record(elapsed); // update call metrics @@ -162,9 +162,9 @@ impl> Future for MeteredRequestFuture { { call_metrics.time_seconds.record(elapsed); if resp.is_success() { - call_metrics.successful.increment(1); + call_metrics.successful_total.increment(1); } else { - call_metrics.failed.increment(1); + call_metrics.failed_total.increment(1); } } } @@ -202,13 +202,13 @@ impl RpcTransport { #[metrics(scope = "rpc_server.connections")] struct RpcServerConnectionMetrics { /// The number of connections opened - connections_opened: Counter, + connections_opened_total: Counter, /// The number of connections closed - connections_closed: Counter, + connections_closed_total: Counter, /// The number of requests started - requests_started: Counter, + requests_started_total: Counter, /// The number of requests finished - requests_finished: Counter, + requests_finished_total: Counter, /// Response for a single request/response pair request_time_seconds: Histogram, } @@ -218,11 +218,11 @@ struct RpcServerConnectionMetrics { #[metrics(scope = "rpc_server.calls")] struct RpcServerCallMetrics { /// The number of calls started - started: Counter, + started_total: Counter, /// The number of successful calls - successful: Counter, + successful_total: Counter, /// The number of failed calls - failed: Counter, + failed_total: Counter, /// Response for a single call time_seconds: Histogram, } diff --git a/crates/rpc/rpc/src/eth/cache/metrics.rs b/crates/rpc/rpc/src/eth/cache/metrics.rs index eb8058f3969f7..c9b18a299da37 100644 --- a/crates/rpc/rpc/src/eth/cache/metrics.rs +++ b/crates/rpc/rpc/src/eth/cache/metrics.rs @@ -9,7 +9,7 @@ pub(crate) struct CacheMetrics { /// The number of queued consumers. pub(crate) queued_consumers_count: Gauge, /// The number of cache hits. - pub(crate) hits: Counter, + pub(crate) hits_total: Counter, /// The number of cache misses. - pub(crate) misses: Counter, + pub(crate) misses_total: Counter, } diff --git a/crates/rpc/rpc/src/eth/cache/multi_consumer.rs b/crates/rpc/rpc/src/eth/cache/multi_consumer.rs index d64eeb0a0cf12..0293840f24a09 100644 --- a/crates/rpc/rpc/src/eth/cache/multi_consumer.rs +++ b/crates/rpc/rpc/src/eth/cache/multi_consumer.rs @@ -72,9 +72,9 @@ where pub fn get(&mut self, key: &K) -> Option<&mut V> { let entry = self.cache.get(key); if entry.is_some() { - self.metrics.hits.increment(1); + self.metrics.hits_total.increment(1); } else { - self.metrics.misses.increment(1); + self.metrics.misses_total.increment(1); } entry } diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 362a692d1ef07..f6a853986ff0a 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -328,12 +328,14 @@ impl TaskExecutor { let on_shutdown = self.on_shutdown.clone(); // Clone only the specific counter that we need. - let finished_regular_tasks_metrics = self.metrics.finished_regular_tasks.clone(); + let finished_regular_tasks_total_metrics = + self.metrics.finished_regular_tasks_total.clone(); // Wrap the original future to increment the finished tasks counter upon completion let task = { async move { // Create an instance of IncCounterOnDrop with the counter to increment - let _inc_counter_on_drop = IncCounterOnDrop::new(finished_regular_tasks_metrics); + let _inc_counter_on_drop = + IncCounterOnDrop::new(finished_regular_tasks_total_metrics); let fut = pin!(fut); let _ = select(on_shutdown, fut).await; } @@ -405,10 +407,11 @@ impl TaskExecutor { .in_current_span(); // Clone only the specific counter that we need. - let finished_critical_tasks_metrics = self.metrics.finished_critical_tasks.clone(); + let finished_critical_tasks_total_metrics = + self.metrics.finished_critical_tasks_total.clone(); let task = async move { // Create an instance of IncCounterOnDrop with the counter to increment - let _inc_counter_on_drop = IncCounterOnDrop::new(finished_critical_tasks_metrics); + let _inc_counter_on_drop = IncCounterOnDrop::new(finished_critical_tasks_total_metrics); let task = pin!(task); let _ = select(on_shutdown, task).await; }; diff --git a/crates/tasks/src/metrics.rs b/crates/tasks/src/metrics.rs index a8397b7fe415d..6d414a77431dc 100644 --- a/crates/tasks/src/metrics.rs +++ b/crates/tasks/src/metrics.rs @@ -9,24 +9,24 @@ use reth_metrics::{metrics::Counter, Metrics}; #[metrics(scope = "executor.spawn")] pub struct TaskExecutorMetrics { /// Number of spawned critical tasks - pub(crate) critical_tasks: Counter, + pub(crate) critical_tasks_total: Counter, /// Number of finished spawned critical tasks - pub(crate) finished_critical_tasks: Counter, + pub(crate) finished_critical_tasks_total: Counter, /// Number of spawned regular tasks - pub(crate) regular_tasks: Counter, + pub(crate) regular_tasks_total: Counter, /// Number of finished spawned regular tasks - pub(crate) finished_regular_tasks: Counter, + pub(crate) finished_regular_tasks_total: Counter, } impl TaskExecutorMetrics { /// Increments the counter for spawned critical tasks. pub(crate) fn inc_critical_tasks(&self) { - self.critical_tasks.increment(1); + self.critical_tasks_total.increment(1); } /// Increments the counter for spawned regular tasks. pub(crate) fn inc_regular_tasks(&self) { - self.regular_tasks.increment(1); + self.regular_tasks_total.increment(1); } } diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index eacc3a25c8e54..e1bc6ddbc9239 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -7342,7 +7342,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "sum(reth_rpc_server_connections_connections_opened{instance=~\"$instance\"} - reth_rpc_server_connections_connections_closed{instance=~\"$instance\"}) by (transport)", + "expr": "sum(reth_rpc_server_connections_connections_opened_total{instance=~\"$instance\"} - reth_rpc_server_connections_connections_closed_total{instance=~\"$instance\"}) by (transport)", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, @@ -7933,7 +7933,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum(rate(reth_rpc_server_calls_successful{instance =~ \"$instance\"}[$__rate_interval])) by (method) > 0", + "expr": "sum(rate(reth_rpc_server_calls_successful_total{instance =~ \"$instance\"}[$__rate_interval])) by (method) > 0", "instant": false, "legendFormat": "{{method}}", "range": true, diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index 3ba499a9a4fb0..ff45a5bf03a86 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -1458,7 +1458,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_network_pool_transactions_messages_sent{instance=~\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_network_pool_transactions_messages_sent_total{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "instant": false, "legendFormat": "Tx", @@ -1471,7 +1471,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_network_pool_transactions_messages_received{instance=~\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_network_pool_transactions_messages_received_total{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Rx", "range": true, @@ -1483,7 +1483,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_network_pool_transactions_messages_sent{instance=~\"$instance\"} - reth_network_pool_transactions_messages_received{instance=~\"$instance\"}", + "expr": "reth_network_pool_transactions_messages_sent_total{instance=~\"$instance\"} - reth_network_pool_transactions_messages_received_total{instance=~\"$instance\"}", "hide": false, "legendFormat": "Messages in Channel", "range": true, @@ -3087,7 +3087,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_network_invalid_messages_received{instance=~\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_network_invalid_messages_received_total{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Invalid Messages", "range": true, From d388fc0becbe372673f128740c85b3d3cce284b4 Mon Sep 17 00:00:00 2001 From: James Prestwich Date: Wed, 29 May 2024 16:15:11 +0100 Subject: [PATCH 699/700] feat: create_transport_rpc_modules (#8482) --- crates/rpc/rpc-builder/src/lib.rs | 45 +++++++++++++++++-------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 0545356410a35..a9dc85d46ee29 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -469,26 +469,14 @@ where EngineT: EngineTypes + 'static, EngineApi: EngineApiServer, { - let mut modules = TransportRpcModules::default(); - let Self { provider, pool, network, executor, events, evm_config } = self; - let TransportRpcModuleConfig { http, ws, ipc, config } = module_config.clone(); + let config = module_config.config.clone().unwrap_or_default(); - let mut registry = RethModuleRegistry::new( - provider, - pool, - network, - executor, - events, - config.unwrap_or_default(), - evm_config, - ); + let mut registry = + RethModuleRegistry::new(provider, pool, network, executor, events, config, evm_config); - modules.config = module_config; - modules.http = registry.maybe_module(http.as_ref()); - modules.ws = registry.maybe_module(ws.as_ref()); - modules.ipc = registry.maybe_module(ipc.as_ref()); + let modules = registry.create_transport_rpc_modules(module_config); let auth_module = registry.create_auth_module(engine); @@ -1138,11 +1126,28 @@ where self } - /// Helper function to create a [RpcModule] if it's not `None` + /// Helper function to create a [`RpcModule`] if it's not `None` fn maybe_module(&mut self, config: Option<&RpcModuleSelection>) -> Option> { - let config = config?; - let module = self.module_for(config); - Some(module) + config.map(|config| self.module_for(config)) + } + + /// Configure a [`TransportRpcModules`] using the current registry. This + /// creates [`RpcModule`] instances for the modules selected by the + /// `config`. + pub fn create_transport_rpc_modules( + &mut self, + config: TransportRpcModuleConfig, + ) -> TransportRpcModules<()> { + let mut modules = TransportRpcModules::default(); + let http = self.maybe_module(config.http.as_ref()); + let ws = self.maybe_module(config.ws.as_ref()); + let ipc = self.maybe_module(config.ipc.as_ref()); + + modules.config = config; + modules.http = http; + modules.ws = ws; + modules.ipc = ipc; + modules } /// Populates a new [RpcModule] based on the selected [RethRpcModule]s in the given From bab8aaa29f82594ea83c0dc0f94bf3a4422f40ca Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 May 2024 17:35:47 +0200 Subject: [PATCH 700/700] chore: rm reth-interfaces from bin stages (#8484) --- Cargo.lock | 1 - crates/stages/Cargo.toml | 2 +- crates/stages/benches/setup/mod.rs | 12 ++++++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d8ac329843691..90277e5b352e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7965,7 +7965,6 @@ dependencies = [ "reth-evm-ethereum", "reth-execution-errors", "reth-exex", - "reth-interfaces", "reth-network-p2p", "reth-network-types", "reth-primitives", diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index c78d10488e93f..7f300fa5ef225 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -50,8 +50,8 @@ reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils", "mdbx"] } reth-evm-ethereum.workspace = true reth-execution-errors.workspace = true -reth-interfaces = { workspace = true, features = ["test-utils"] } reth-consensus = { workspace = true, features = ["test-utils"] } +reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true reth-revm.workspace = true reth-static-file.workspace = true diff --git a/crates/stages/benches/setup/mod.rs b/crates/stages/benches/setup/mod.rs index 06e15192b159f..cbfd44c4d41d5 100644 --- a/crates/stages/benches/setup/mod.rs +++ b/crates/stages/benches/setup/mod.rs @@ -7,18 +7,18 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, DatabaseEnv, }; -use reth_interfaces::test_utils::{ +use reth_primitives::{Account, Address, SealedBlock, B256, U256}; +use reth_stages::{ + stages::{AccountHashingStage, StorageHashingStage}, + test_utils::{StorageKind, TestStageDB}, +}; +use reth_testing_utils::{ generators, generators::{ random_block_range, random_changeset_range, random_contract_account_range, random_eoa_accounts, }, }; -use reth_primitives::{Account, Address, SealedBlock, B256, U256}; -use reth_stages::{ - stages::{AccountHashingStage, StorageHashingStage}, - test_utils::{StorageKind, TestStageDB}, -}; use reth_trie::StateRoot; use std::{collections::BTreeMap, fs, path::Path, sync::Arc}; use tokio::runtime::Handle;